content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Generated by Django 3.1.7 on 2021-03-21 15:54
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
3070,
12,
2481,
1315,
25,
4051,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
from .project import load_project, new_project, use_project # noqa: F401
| [
6738,
764,
16302,
1330,
3440,
62,
16302,
11,
649,
62,
16302,
11,
779,
62,
16302,
220,
1303,
645,
20402,
25,
376,
21844,
198
] | 3.217391 | 23 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkCommonCore import (
vtkPoints,
vtkUnsignedCharArray
)
from vtkmodules.vtkCommonDataModel import (
vtkCellArray,
vtkLine,
vtkPolyData
)
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkPolyDataMapper,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkRenderer
)
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
11748,
410,
30488,
18170,
13,
85,
30488,
9492,
2673,
21466... | 2.556017 | 241 |
from django.utils import timezone
from django.views.generic import ListView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import path
from .urls import urlpatterns
from ..models import File
urlpatterns.append(
path('files/', FileListView.as_view(), name='file-list')
)
| [
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
7343,
7680,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
19816,
1040,
1330,
23093,
37374,
35608,
259,
198,
6738,
42625,
14208,... | 3.210526 | 95 |
from chatbot import app
from flask import render_template,flash, request
from chatbot.forms import chatbotform
from chatbot.__init__ import model,words,classes,intents
import nltk
import pickle
import json
import numpy as np
from keras.models import Sequential,load_model
import random
from datetime import datetime
import pytz
import requests
import os
import billboard
import time
from pygame import mixer
import COVID19Py
from nltk.stem import WordNetLemmatizer
lemmatizer=WordNetLemmatizer()
#Predict
@app.route('/',methods=['GET','POST'])
#@app.route('/home',methods=['GET','POST'])
@app.route('/chat',methods=['GET','POST'])
#@app.route('/home',methods=['GET','POST'])
@app.route("/get")
| [
6738,
8537,
13645,
1330,
598,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
34167,
11,
2581,
198,
6738,
8537,
13645,
13,
23914,
1330,
8537,
13645,
687,
198,
6738,
8537,
13645,
13,
834,
15003,
834,
1330,
2746,
11,
10879,
11,
37724,
11,
... | 3.017167 | 233 |
import os
import etcd
ETCD_HOST = os.getenv("ETCD_HOST", "localhost")
etcd_client = EtcdClient(host=ETCD_HOST, port=2379)
| [
11748,
28686,
198,
11748,
3503,
67,
628,
198,
198,
2767,
8610,
62,
39,
10892,
796,
28686,
13,
1136,
24330,
7203,
2767,
8610,
62,
39,
10892,
1600,
366,
36750,
4943,
198,
316,
10210,
62,
16366,
796,
17906,
10210,
11792,
7,
4774,
28,
276... | 2.358491 | 53 |
#!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2022-02-24 14:51
@author: johannes
"""
import flask
from flask import (
Flask,
render_template,
flash,
request,
redirect,
url_for
)
import os
import shutil
import sys
import glob
import functools
import datetime
import pandas as pd
import folium
from folium.plugins import FastMarkerCluster
from folium.plugins import Fullscreen
from werkzeug.utils import secure_filename
from threading import Thread
import requests
from io import StringIO
import cbs
import utils
PYTHON_VERSION = int(f'{sys.version_info.major}{sys.version_info.minor}')
UPLOAD_FOLDER = './tmp'
ALLOWED_EXTENSIONS = {'xlsx'}
PAGES = ('Home', 'Search', 'Upload')
app = Flask(__name__)
app.secret_key = '****************'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
"""Return bool."""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def reset_temporary_folder():
"""Reset the temporary folder."""
today = datetime.date.today().strftime('%y%m%d')
if not os.path.exists(UPLOAD_FOLDER):
os.mkdir(UPLOAD_FOLDER)
else:
for f in glob.glob('./tmp/*/'):
if today not in f:
shutil.rmtree(f)
folder_today = os.path.join(UPLOAD_FOLDER, today)
if not os.path.exists(folder_today):
os.mkdir(folder_today)
def get_register_frame(raw=False):
"""Return dataframe.
Read master station list (SODC).
"""
# response = requests.request(
# "GET", "http://localhost:8005/getfile"
# )
# Store string data in a pandas Dataframe.
df = pd.read_csv(
# StringIO(response.text),
r'data\station.txt',
sep='\t',
header=0,
encoding='cp1252',
dtype=str,
keep_default_na=False,
)
if raw:
return df
else:
floats = ['LATITUDE_WGS84_SWEREF99_DD', 'LONGITUDE_WGS84_SWEREF99_DD']
df[floats] = df[floats].astype(float)
df['SYNONYM_NAMES'] = df['SYNONYM_NAMES'].str.replace('<or>', '; ')
return df.filter(
['LATITUDE_WGS84_SWEREF99_DD', 'LONGITUDE_WGS84_SWEREF99_DD',
'STATION_NAME', 'REG_ID', 'REG_ID_GROUP', 'SYNONYM_NAMES',
'OUT_OF_BOUNDS_RADIUS', 'LAT_DM', 'LONG_DM',
'LATITUDE_SWEREF99TM', 'LONGITUDE_SWEREF99TM'],
axis=1
)
def get_template_stations(path):
"""Return dataframe.
Read excel template with new stations.
"""
df = pd.read_excel(
path,
sheet_name='Provplatser',
dtype=str,
keep_default_na=False,
engine=None if PYTHON_VERSION >= 37 else 'openpyxl'
)
df = utils.eliminate_empty_rows(df)
utils.validate_coordinates(df)
utils.check_for_radius(df)
return df.filter(['Position WGS84 Dec N (DD.dddd)',
'Position WGS84 Dec E (DD.dddd)',
'Namn', 'Radie (m)'], axis=1)
def get_folium_map(file_name=None):
"""Return folium a map object."""
df = get_register_frame()
the_map = folium.Map(location=(60., 20.), zoom_start=5,
tiles='OpenStreetMap')
fs = Fullscreen()
folium.TileLayer('cartodbdark_matter').add_to(the_map)
fmc = FastMarkerCluster(df.values.tolist(), callback=cbs.callback)
fmc.layer_name = 'Register stations'
fmc_rad = FastMarkerCluster(df.values.tolist(), callback=cbs.callback_rad)
fmc_rad.layer_name = 'Register stations Radius'
the_map.add_child(fmc)
the_map.add_child(fmc_rad)
if file_name:
tmp_path = os.path.join(
app.config['UPLOAD_FOLDER'],
datetime.date.today().strftime('%y%m%d'),
file_name
)
try:
df_temp = get_template_stations(tmp_path)
fmc_tmp = FastMarkerCluster(df_temp.values.tolist(),
callback=cbs.callback_tmps)
fmc_tmp.layer_name = 'New stations'
fmc_tmp_rad = FastMarkerCluster(df_temp.values.tolist(),
callback=cbs.callback_rad_tmps)
fmc_tmp_rad.layer_name = 'New stations Radius'
the_map.add_child(fmc_tmp)
the_map.add_child(fmc_tmp_rad)
except BaseException:
pass
the_map.add_child(fs)
folium.LayerControl().add_to(the_map)
return the_map
def get_layout_active_spec(name):
"""Return active layout spec."""
return [
{'name': n, 'class': "active" if n == name else "", 'href': n.lower()}
for n in PAGES
]
@app.context_processor
def inject_today_date():
"""Return current year."""
return {'year': datetime.date.today().year}
@app.route('/cover.css', methods=['GET'])
@app.route('/station_app/cover.css', methods=['GET'])
@app.route('/dm_map.png', methods=['GET'])
@app.route('/station_app/dm_map.png', methods=['GET'])
@app.route('/searcher', methods=['GET'])
@app.route('/station_app/searcher', methods=['GET'])
def search():
"""Search station from the main NODC list."""
df = get_register_frame(raw=True)
spec = get_layout_active_spec('Search')
return render_template('searcher.html',
records=df.to_dict('records'),
active_spec=spec)
@app.route('/upload', methods=['GET', 'POST'])
@app.route('/station_app/upload', methods=['GET', 'POST'])
def upload():
"""Upload local file.
Needs to follow the station register template.
"""
spec = get_layout_active_spec('Upload')
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(
app.config['UPLOAD_FOLDER'],
datetime.date.today().strftime('%y%m%d'),
filename
))
return render_template('upload_file.html',
success=True,
active_spec=spec,
uploaded_file=filename)
return render_template('upload_file.html', active_spec=spec)
@app.route('/submit', methods=['GET', 'POST'])
@app.route('/station_app/submit', methods=['GET', 'POST'])
def submit():
"""Upload local file.
Needs to follow the station register template.
"""
spec = get_layout_active_spec('Upload')
if request.method == 'POST':
filename = os.path.join(
app.config['UPLOAD_FOLDER'],
datetime.date.today().strftime('%y%m%d'),
request.form.get('uploaded_file')
)
if filename:
return render_template('upload_file.html',
active_spec=spec,
connect_to_reg=True)
return render_template('upload_file.html', active_spec=spec)
@app.route('/map')
@app.route('/station_app/map')
def station_map():
"""Return html page based on a folium map."""
map_obj = get_folium_map(file_name=request.args.get('uploaded_file'))
return map_obj._repr_html_()
@app.route('/')
@app.route('/station_app/')
def home():
"""Return html page from template."""
Thread(target=reset_temporary_folder).start()
spec = get_layout_active_spec('Home')
return render_template('home.html', active_spec=spec)
if __name__ == '__main__':
app.run(port=5000)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
33160,
9447,
25374,
11,
14023,
25582,
2770,
290,
15084,
3225,
30766,
5136,
13,
198,
2,
13789,
25,
17168,
13789,
357,
3826,
38559,
24290,
13,
14116,
393,
2638,
137... | 2.145355 | 3,660 |
from flask import Flask, render_template, request
import requests
import json
from datetime import datetime
import csv
from activate import process_activation
app = Flask(__name__)
global_token = ""
@app.route('/activate', methods=['GET', 'POST'])
@app.route('/', methods=['GET', 'POST'])
if __name__ == '__main__':
app.run()
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
11748,
7007,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
269,
21370,
198,
6738,
15155,
1330,
1429,
62,
48545,
198,
198,
1324,
796,
46947,
7,
834,
... | 3.110092 | 109 |
import os
| [
11748,
28686,
628
] | 3.666667 | 3 |
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import permission_required
from django.views.generic import ArchiveIndexView, YearArchiveView, MonthArchiveView, DayArchiveView, DetailView, ListView, RedirectView
from django.views.generic.edit import DeleteView
from geoevents.core.views import CreateViewWithMessages, UpdateViewWithMessages
from geoevents.operations.forms import DeploymentForm, NewDeploymentForm, EventForm, LessonLearnedForm, ServiceForm, SitRepForm
from geoevents.operations.models import Deployment, Event, LessonLearned, SitRep, Service
from geoevents.operations.views import CreateService, EventsDashboard, EventPage, NewDeploymentFromIncident, \
SitRepCreateView, MustBeOwnerDeleteView, ServiceLists, KMLReponse, DeploymentView, view_service
from geoevents.operations.proxies import proxy_to
urlpatterns = [
url(r'^activeEvents/$', EventsDashboard.as_view(), name='active-incidents'),
url(r'^incidents/(?P<pk>\d+)/$', EventPage.as_view(), name='operations-view-incident'),
url(r'^incidents/(?P<pk>\d+)/(?P<slug>[\w\d\-]+)/$', EventPage.as_view(),
name='operations-view-incident-slug'),
url(r'^activeEvents/full/$',
EventsDashboard.as_view(template_name='event-list-dashboard-fullscreen.html'),
name='operations-view-full-dashboard'),
url(r'^incidents/full/(?P<pk>\d+)/', EventPage.as_view(template_name='incident-fullscreen.html'),
name='operations-view-full-incident'),
url(r'^incidents/manage/(?P<pk>\d+)/$',
permission_required('operations.change_event', reverse_lazy('home'))(
UpdateViewWithMessages.as_view(form_class=EventForm,
pk_url_kwarg='pk',
queryset=Event.objects.all(),
template_name='generic_form_page.html')),
name='operations-manage-incident-pk'),
url(r'^incidents/manage/$',
permission_required('operations.add_event', reverse_lazy('home'))(
CreateViewWithMessages.as_view(form_class=EventForm,
template_name='generic_form_page.html')),
name='operations-manage-incident'),
url(r'^incidents/archives/$', ArchiveIndexView.as_view(queryset=Event.objects.all(),
date_field='created',
template_name='incident-archive.html',
context_object_name='object_list'),
name='operations-view-incident-archive'),
url(r'^incidents/archives/(?P<year>\d{4})/$',
YearArchiveView.as_view(queryset=Event.objects.all(),
date_field='created',
template_name='incident-archive-year.html',
context_object_name='events'),
name='operations-view-incident-archive-year'),
url(r'^incidents/archives/(?P<year>\d{4})/(?P<month>\d{2})/$',
MonthArchiveView.as_view(queryset=Event.objects.all(),
date_field='created',
template_name='incident-archive-month.html',
month_format='%m'), name='operations-view-incident-archive-month'),
url(r'^incidents/archives/(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$',
DayArchiveView.as_view(queryset=Event.objects.all(),
date_field='created',
template_name='incident-archive-day.html',
month_format='%m',
), name='operations-view-incident-archive-day'),
url(r'^incidents/delete/(?P<pk>\d+)/$',
permission_required('operations.delete_event', reverse_lazy('home'))(
DeleteView.as_view(model=Event,
template_name='generic-delete.html',
success_url=reverse_lazy('active-incidents'))),
name='operations-delete-incident-pk'),
url(r'^incidents/kml/(?P<pk>\d+)/$', KMLReponse.as_view(queryset=Event.objects.all(),
template_name='incidents.kml',
context_object_name='incident'),
name='operations-view-incident-kml'),
url(r'^deployments/manage/$',
permission_required('operations.add_deployment', reverse_lazy('home'))(
NewDeploymentFromIncident.as_view(form_class=NewDeploymentForm,
template_name='generic_form_page.html')),
name='operations-manage-deployment'),
url(r'^deployments/manage/(?P<pk>\d+)/$',
permission_required('operations.change_deployment', reverse_lazy('home'))(
UpdateViewWithMessages.as_view(form_class=DeploymentForm,
queryset=Deployment.objects.all(),
template_name='generic_form_page.html',
pk_url_kwarg='pk',
)), name='operations-manage-deployment-pk'),
url(r'^deployments/delete/(?P<pk>\d+)/$',
permission_required('operations.delete_deployment', reverse_lazy('home'))(
DeleteView.as_view(model=Deployment,
template_name='generic-delete.html',
success_url=reverse_lazy('home'))),
name='operations-delete-deployment-pk'),
url(r'^deployments/(?P<pk>\d+)/', DeploymentView.as_view(queryset=Deployment.objects.all(),
template_name='deployment-detail.html',
context_object_name='item',
), name='operations-view-deployment-pk'),
url(r'^lessons-learned/manage/$',
permission_required('operations.add_lessonlearned', reverse_lazy('home'))(
CreateViewWithMessages.as_view(form_class=LessonLearnedForm,
template_name='generic_form_page.html',
)), name='operations-manage-lesson-learned'),
url(r'^lessons-learned/manage/(?P<pk>\d+)/$',
permission_required('operations.change_lessonlearned', reverse_lazy('home'))(
UpdateViewWithMessages.as_view(form_class=LessonLearnedForm,
queryset=LessonLearned.objects.all(),
pk_url_kwarg='pk',
template_name='generic_form_page.html',
)), name='operations-manage-lesson-learned-pk'),
url(r'^lessons-learned/(?P<pk>\d+)/', DetailView.as_view(queryset=LessonLearned.objects.all(),
template_name='lesson-learned-detail.html',
context_object_name='item',
), name='operations-view-lesson-learned-pk'),
url(r'^lessons-learned/delete/(?P<pk>\d+)/$',
permission_required('operations.delete_lessonlearned', reverse_lazy('home'))(
DeleteView.as_view(model=LessonLearned,
template_name='generic-delete.html',
success_url=reverse_lazy('home'))),
name='operations-delete-lesson-learned-pk'),
url(r'sitreps/$', ListView.as_view(queryset=SitRep.objects.filter(status=1),
template_name='sitreps-list.html',
paginate_by=25,
context_object_name='events'),
name='operations-view-sitreps'),
url(r'^sitreps/(?P<pk>\d+)/', DetailView.as_view(queryset=SitRep.objects.all(),
template_name='sitrep-detail.html',
context_object_name='item',
), name='operations-view-sitrep-pk'),
url(r'^sitreps/manage/$', permission_required('operations.add_sitrep', reverse_lazy('home'))(
SitRepCreateView.as_view(form_class=SitRepForm, template_name='generic_form_page.html')),
name='operations-manage-sitrep'),
url(r'^sitreps/manage/(?P<pk>\d+)/$',
permission_required('operations.change_sitrep', reverse_lazy('home'))(
UpdateViewWithMessages.as_view(form_class=SitRepForm, template_name='sitrep-manage.html',
queryset=SitRep.objects.all(), pk_url_kwarg='pk')),
name='operations-manage-sitrep-pk'),
url(r'^sitreps/delete/(?P<pk>\d+)/$',
permission_required('operations.delete_sitrep', reverse_lazy('home'))(
MustBeOwnerDeleteView.as_view(model=SitRep, template_name='generic-delete.html',
owner_field='owner', success_url=reverse_lazy('home'))),
name='operations-delete-sitrep-pk'),
## Services
url(r'^services/(?P<pk>\d+)/$', view_service,
name='operations-view-service'),
url(r'^services/manage/(?P<pk>\d+)/$',
permission_required('operations.change_service',
reverse_lazy('home'))(
UpdateViewWithMessages.as_view(form_class=ServiceForm,
template_name='service-manage.html',
queryset=Service.objects.all())),
name='operations-manage-service-pk'),
url(r'^services/manage/$',
permission_required('operations.add_service',
reverse_lazy('home'))(
CreateViewWithMessages.as_view(form_class=ServiceForm,
template_name='service-manage.html',
)), name='operations-manage-service'),
url(r'^services/manage/(?P<model>\w+)/(?P<model_pk>\d+)/$',
permission_required('operations.add_service',
reverse_lazy('home'))(CreateService.as_view()),
name='operations-manage-service-model'),
url(r'^services/delete/(?P<pk>\d+)/$',
permission_required('operations.delete_service', reverse_lazy('home'))(
DeleteView.as_view(model=Service,
template_name='generic-delete.html',
success_url=reverse_lazy('home'))),
name='operations-delete-service-pk'),
url(r'^services/$', ServiceLists.as_view(template_name='service-list.html', ),
name='operations-view-services'),
url(r'^proxy/(?P<path>.*)$', proxy_to, {'target_url': ''}),
url(r'^incidents/$', RedirectView.as_view(url=reverse_lazy('active-incidents'), permanent=True), name='home'),
]
| [
2,
770,
6276,
1366,
373,
4635,
329,
262,
471,
13,
311,
13,
5070,
739,
17453,
1400,
13,
370,
1314,
47,
22,
51,
12,
1485,
12,
34,
12,
37,
8054,
11,
290,
198,
2,
318,
2426,
284,
262,
6923,
287,
20671,
6060,
12,
15419,
36313,
17230,... | 1.632443 | 8,532 |
# ===STEP 2=== #
from selenium import webdriver
from time import sleep # Sleep will pause the program
import os # for getting the local path
dirpath = os.getcwd() # gets local path
driver = webdriver.Chrome(dirpath + "/chromedriver")
driver.get("http://www.dominos.com.cn/menu/menu.html?menuIndex=0&RNDSEED=c42905609c237eff75c1bd6767345f43") # dominos menu page
english = driver.find_element_by_css_selector('#lanEn > a')
english.click() # click on english to change tha language
sleep(4) # Wait for the website to update
myPizza = "Pepperoni Pizza"
pizzaText = driver.find_element_by_xpath('//div[text() = "{}"]'.format(myPizza)) # uses xpath to select the pizza label element
# Here, we have the label that says pepperoni pizza.
# This isn't what we want though
#
# We want to click the button that says to buy
# To do that, we have to have a look at the actual structure of the page
# The label and the button are all wrapped in a bigger div. We can get the button by finding
# the label's parent and getting it's child, the button
#
# That's essentially what the following code does
containerDiv = pizzaText.parent
button = containerDiv.find_element_by_tag_name('a')
button.click()
# Okay so it looks like that works. The things is, it won't let me buy anything because I haven't signed in
| [
2,
24844,
42135,
362,
18604,
1303,
198,
198,
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
640,
1330,
3993,
220,
1303,
17376,
481,
14985,
262,
1430,
198,
11748,
28686,
220,
1303,
329,
1972,
262,
1957,
3108,
198,
198,
15908,
6978... | 3.308861 | 395 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-07 15:40
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
16,
319,
1584,
12,
2931,
12,
2998,
1315,
25,
1821,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import io
from . import vis
from . import transform
from . import light
from . import render
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
764,
1330,
33245,
198,
6738,
764,
1330,
1490,
198,
6738,
764,
1330,
6121,
198,... | 4.137255 | 51 |
# Driver method
if __name__ == '__main__':
mat = [[1, 2, 3],
[4, 5, 6],
[7, 18, 14]];
n = 3;
if (findSaddlePoint(mat, n) ==
False):
print("No Saddle Po"); | [
201,
198,
201,
198,
2,
12434,
2446,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
201,
198,
220,
220,
220,
2603,
796,
16410,
16,
11,
362,
11,
513,
4357,
201,
198,
220,
220,
220,
220,
220,
220,
220,
... | 1.684211 | 133 |
import sys
import os
import time, traceback
import json
PACKAGE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0,PACKAGE_ROOT)
from commons.logger import Logger
from config.initializers import KAFKA_BROKER, KAFKA_TOPIC,KAFKA_CONSUMER_GROUP_ID
from kafka import KafkaConsumer
logger = Logger().get_logger("test")
if __name__ == "__main__":
consumer = Consumer()
while True:
consumer.kafka_consumer.poll()
for message in consumer.kafka_consumer:
try:
logger.debug ("topic=%s:partition=%d:offset=%d:" % (message.topic, message.partition,message.offset))
json_message = json.loads(message.value.decode())
#logger.debug('Datapoint from kafka: %s', json_message)
except ValueError:
logger.error("Failed to decode message from Kafka, skipping..")
except Exception as e:
logger.error("Generic exception while pulling datapoints from Kafka")
traceback.print_exc()
consumer.close()
sys.exit(1) | [
11748,
25064,
198,
11748,
28686,
198,
11748,
640,
11,
12854,
1891,
198,
11748,
33918,
198,
47,
8120,
11879,
62,
13252,
2394,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
... | 2.362069 | 464 |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import re
import ament_index_python
def create_output_lines_filter(
filtered_prefixes=None,
filtered_patterns=None,
filtered_rmw_implementation=None
):
"""
Create a line filtering function to help output testing.
:param filtered_prefixes: A list of byte strings representing prefixes that will cause
output lines to be ignored if they start with one of the prefixes. By default lines
starting with the process ID (`b'pid'`) and return code (`b'rc'`) will be ignored.
:param filtered_patterns: A list of byte strings representing regexes that will cause
output lines to be ignored if they match one of the regexes.
:param filtered_rmw_implementation: RMW implementation for which the output will be
ignored in addition to the `filtered_prefixes`/`filtered_patterns`.
"""
filtered_prefixes = filtered_prefixes or get_default_filtered_prefixes()
filtered_patterns = filtered_patterns or get_default_filtered_patterns()
if filtered_rmw_implementation:
filtered_prefixes.extend(get_rmw_output_filter(
filtered_rmw_implementation, 'prefixes'
))
filtered_patterns.extend(get_rmw_output_filter(
filtered_rmw_implementation, 'patterns'
))
filtered_patterns = map(re.compile, filtered_patterns)
encoded_line_sep = os.linesep.encode('ascii')
return _filter
def create_output_lines_test(expected_lines):
"""Create output test given a list of expected lines."""
return io.BytesIO(), _collate, _match, expected_lines
def create_output_regex_test(expected_patterns):
"""Create output test given a list of expected matching regular expressions."""
return io.BytesIO(), _collate, _match, expected_patterns
def create_output_test_from_file(output_file):
"""
Create output test using the given file content.
:param output_file: basename (i.e. w/o extension) of either a .txt file containing the
lines to be matched or a .regex file containing patterns to be searched for.
"""
literal_file = output_file + '.txt'
if os.path.isfile(literal_file):
with open(literal_file, 'rb') as f:
expected_output = f.read().splitlines()
return create_output_lines_test(expected_output)
regex_file = output_file + '.regex'
if os.path.isfile(regex_file):
with open(regex_file, 'rb') as f:
patterns = [re.compile(regex) for regex in f.read().splitlines()]
return create_output_regex_test(patterns)
raise RuntimeError('could not find output check file: {}'.format(output_file))
| [
2,
15069,
2864,
4946,
8090,
47061,
5693,
11,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 3.023452 | 1,066 |
from vbr.tableclasses import Status
__all__ = ["StatusApi"]
| [
6738,
410,
1671,
13,
11487,
37724,
1330,
12678,
198,
198,
834,
439,
834,
796,
14631,
19580,
32,
14415,
8973,
628
] | 3.1 | 20 |
# Copyright 2018 Brad Jascob
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
class ProgressBar(object):
''' Progress bar for display
Args:
end_val (int): The value at 100%
bar_len (int): Number of ascii characters in length
'''
def update(self, val):
''' Redraw the progress
Args:
val (int): value of the item to be displayed
'''
percent = float(val) / self.end_val
if percent > 1.0:
percent = 1.0
hashes = '#' * int(round(percent * self.bar_len))
spaces = ' ' * (self.bar_len - len(hashes))
sys.stdout.write('\rPercent: [{0}] {1}%'.format(hashes + spaces,
int(round(100 * percent))))
sys.stdout.flush()
def clear(self):
''' Clear the indicator from the screen'''
spaces = ' ' * (30 + self.bar_len)
sys.stdout.write('\r{0}'.format(spaces))
sys.stdout.write('\r')
| [
2,
15069,
2864,
8114,
449,
3372,
672,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
... | 2.558464 | 573 |
from .__Fundamental_44 import *
import typing
import System.IO
import System.Collections.Generic
import System
import QuantConnect.Data.Fundamental.MultiPeriodField
import QuantConnect.Data.Fundamental
import QuantConnect.Data
import QuantConnect
import datetime
class NetLongTermDebtIssuanceCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The increase or decrease between periods of long term debt. Long term debt includes notes payable, bonds payable, mortgage
loans, convertible debt, subordinated debt and other types of long term debt.
NetLongTermDebtIssuanceCashFlowStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetMargin(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Refers to the ratio of net income to revenue. Morningstar calculates the ratio by using the underlying data reported in the company
filings or reports: Net Income / Revenue.
NetMargin(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
OneYear: float
SixMonths: float
ThreeMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetNonOperatingInterestIncomeExpenseIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Net-Non Operating interest income or expenses caused by financing activities.
NetNonOperatingInterestIncomeExpenseIncomeStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetOccupancyExpenseIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Occupancy expense may include items, such as depreciation of facilities and equipment, lease expenses, property taxes and
property and casualty insurance expense. This item is usually only available for bank industry.
NetOccupancyExpenseIncomeStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetOtherFinancingChargesCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Miscellaneous charges incurred due to Financing activities.
NetOtherFinancingChargesCashFlowStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetOtherInvestingChangesCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Miscellaneous charges incurred due to Investing activities.
NetOtherInvestingChangesCashFlowStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetOutwardLoansCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Adjustments due to net loans to/from outsiders in the Investing Cash Flow section.
NetOutwardLoansCashFlowStatement(store: IDictionary[str, Decimal])
"""
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetPolicyholderBenefitsAndClaimsIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The net provision in current period for future policy benefits, claims, and claims settlement expenses incurred in the claims
settlement process before the effects of reinsurance arrangements. The value is net of the effects of contracts assumed and
ceded.
NetPolicyholderBenefitsAndClaimsIncomeStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetPPEBalanceSheet(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Tangible assets that are held by an entity for use in the production or supply of goods and services, for rental to others, or for
administrative purposes and that are expected to provide economic benefit for more than one year; net of accumulated
depreciation.
NetPPEBalanceSheet(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetPPEPurchaseAndSaleCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The net change between Purchases/Sales of PPE.
NetPPEPurchaseAndSaleCashFlowStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetPreferredStockIssuanceCashFlowStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
The increase or decrease between periods of preferred stock.
NetPreferredStockIssuanceCashFlowStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
OneMonth: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
TwoMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
class NetPremiumsWrittenIncomeStatement(QuantConnect.Data.Fundamental.MultiPeriodField):
"""
Net premiums written are gross premiums written less ceded premiums. This item is usually only available for insurance industry.
NetPremiumsWrittenIncomeStatement(store: IDictionary[str, Decimal])
"""
NineMonths: float
SixMonths: float
ThreeMonths: float
TwelveMonths: float
Store: typing.List[QuantConnect.Data.Fundamental.MultiPeriodField.PeriodField]
| [
6738,
764,
834,
24553,
6860,
62,
2598,
1330,
1635,
198,
11748,
19720,
198,
11748,
4482,
13,
9399,
198,
11748,
4482,
13,
5216,
26448,
13,
46189,
198,
11748,
4482,
198,
11748,
16972,
13313,
13,
6601,
13,
24553,
6860,
13,
29800,
5990,
2101... | 3.121058 | 2,156 |
import argparse
import logging
import sys
import os
import traceback
import time
import subprocess
import getpass
import datetime
from inspect import getfullargspec
from argparse import RawTextHelpFormatter
from du.denv.Denv import Denv
from du.denv.Denv import ErrCodes
# Host platform string for Windows
PLATFORM_OS_WIN32 = "win32"
# Host platform string for Linux
PLATFORM_OS_LINUX = "linux"
# Host platform string for MAC OS
PLATFORM_OS_MACOS = "darwin"
try:
import cmd2
except ModuleNotFoundError:
if sys.platform == PLATFORM_OS_WIN32:
# Install automatically via PIP if missing on Windows (we can do this from script)
print("Installing cmd2 ..")
# Make sure we have pip installed (this will fail on Linux, since it has to be done via apt)
subprocess.check_call([sys.executable, "-m", "ensurepip"])
# Install cmd2 dependency
subprocess.check_call([sys.executable, "-m", "pip", "install", "cmd2"])
import cmd2
elif sys.platform == PLATFORM_OS_LINUX:
# On linux the user has to do it manually
print("#" * 80 + "\n")
print("Dependencies missing, run the following commands to install them: ")
# Command to install PIP
print("\n\tsudo apt-get install -y python3-pip")
# Command to install dependencies via PIP
print("\n\tsudo python3 -m pip install cmd2")
print("\n\tsudo python3 -m pip install gnureadline")
print("\n" + "#" * 80 + "\n")
sys.exit(-1)
elif sys.platform == PLATFORM_OS_MACOS:
# On MAC OS the user has to do it manually
print("#" * 80 + "\n")
print("Dependencies missing, run the following commands to install them: ")
# Command to install dependencies via PIP
print("\n\tpython3 -m pip install cmd2 --user")
print("\n\tpython3 -m pip install gnureadline --user")
print("\n" + "#" * 80 + "\n")
sys.exit(-1)
else:
print("Unhandled platform {}".format(sys.platform))
sys.exit(-1)
from cmd2 import with_argparser
logger = logging.getLogger(__name__.split(".")[-1])
try:
import cmd2
except ModuleNotFoundError:
# Install automatically via PIP if missing
logger.info("Installing cmd2 ..")
subprocess.check_call([sys.executable, "-m", "pip", "install", "cmd2"])
import cmd2
from cmd2 import with_argparser
if __name__ == "__main__":
sys.exit(main())
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
12854,
1891,
198,
11748,
640,
198,
11748,
850,
14681,
198,
11748,
651,
6603,
198,
11748,
4818,
8079,
198,
198,
6738,
10104,
1330,
651,
12853,
853,
16... | 2.613248 | 936 |
# For tokens
ONE_OR_MORE = "+"
OR = "|"
# Reserved is a generic keyword type.
# Meant to be a builtin for the TCL syntax.
RESERVED = "RESERVED"
# Ignore is a flag for characters that don't have functional impact on the TCL source.
# We are using it quite liberally to ignore all whitespace and newline characters.
IGNORE = "IGNORE"
# Int is a flag applied on numeric tokens.
INT = "INT"
# Id is a flag applied on word tokens.
ID = "ID"
# Environment key to provide extra grammar files to parse.
# Separated with os.pathsep.
# Full paths are expected.
GRAMMAR_PLUGIN_ENV_KEY = "STUNNING_BNF_GRAMMAR_FILES"
| [
2,
1114,
16326,
198,
11651,
62,
1581,
62,
23346,
796,
43825,
1,
198,
1581,
796,
366,
91,
1,
198,
198,
2,
33876,
318,
257,
14276,
21179,
2099,
13,
198,
2,
2185,
415,
284,
307,
257,
3170,
259,
329,
262,
309,
5097,
15582,
13,
198,
... | 2.920188 | 213 |
import os
print(os.path.abspath(__file__))
| [
11748,
28686,
198,
4798,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
834,
7753,
834,
4008,
198
] | 2.388889 | 18 |
# Copyright (c) 2020 Club Raiders Project
# https://github.com/HausReport/ClubRaiders
#
# SPDX-License-Identifier: BSD-3-Clause
#
# SPDX-License-Identifier: BSD-3-Clause
from typing import Dict
from craid.eddb.base.NamedItem import NamedItem
| [
2,
220,
220,
15069,
357,
66,
8,
12131,
6289,
16120,
4935,
198,
2,
220,
220,
3740,
1378,
12567,
13,
785,
14,
39,
8717,
19100,
14,
42350,
49043,
364,
198,
2,
198,
2,
220,
220,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
... | 2.663158 | 95 |
from .version import __version__
from .genre import main, process
| [
6738,
764,
9641,
1330,
11593,
9641,
834,
198,
6738,
764,
35850,
1330,
1388,
11,
1429,
198
] | 4.125 | 16 |
# -*- coding: utf-8 -*-
try:
from django.urls import reverse
except ImportError: # Will be removed in Django 2.0
from django.core.urlresolvers import reverse
from social_core.backends.oauth import BaseOAuth2
from .settings import DRFSO2_PROPRIETARY_BACKEND_NAME, DRFSO2_URL_NAMESPACE
from oauth2_provider.models import AccessToken
from social_django.models import UserSocialAuth
from django.contrib.auth.models import User
class DjangoOAuth2(BaseOAuth2):
"""Default OAuth2 authentication backend used by this package"""
name = DRFSO2_PROPRIETARY_BACKEND_NAME
AUTHORIZATION_URL = reverse(DRFSO2_URL_NAMESPACE + ':authorize'
if DRFSO2_URL_NAMESPACE else 'authorize')
ACCESS_TOKEN_URL = reverse(DRFSO2_URL_NAMESPACE + ':token'
if DRFSO2_URL_NAMESPACE else 'authorize')
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
data = self.user_data(access_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
kwargs.update({'response': response, 'backend': self})
if response.get(self.ID_KEY, None):
user = User.objects.get(pk=response[self.ID_KEY])
return user
else:
return None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
16341,
17267,
12331,
25,
220,
1303,
2561,
307,
4615,
287,
37770,
362,
13,
15,
198... | 2.337861 | 589 |
import json
from copy import copy, deepcopy
from io import StringIO
import pandas
from pandas import DataFrame # Required for eval
FORCE_INCLUDE_DATAFRAME_IN_PYCHARM = DataFrame() # Prevents clean=up removal
import pytest
from deepdiff import DeepDiff
from hypothesis import given
from hypothesis.extra.pandas import data_frames, column
from canvasxpress.data.matrix import CXDataframeData
from tests.util.hypothesis_support import everything_except
csv_sample = """
"C1","C2","C3"
1,2,3
4,5,6
"""
df_sample = pandas.read_csv(
StringIO(csv_sample),
index_col=False
)
@given(everything_except(dict, str))
@given(everything_except(dict, str))
@given(
data_frames([column('A', dtype=int), column('B', dtype=float)]),
data_frames([column('C', dtype=int), column('D', dtype=float)]),
data_frames([column('E', dtype=int), column('F', dtype=float)]),
data_frames([column('G', dtype=int), column('H', dtype=float)])
)
| [
11748,
33918,
198,
6738,
4866,
1330,
4866,
11,
2769,
30073,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
11748,
19798,
292,
198,
6738,
19798,
292,
1330,
6060,
19778,
220,
1303,
20906,
329,
5418,
198,
198,
13775,
5222,
62,
1268,
5097,
... | 2.773913 | 345 |
import os
from gradetools.py.execute2.insert import InsertConfig
PYTHON_EXTENSIONS = ('ipynb', 'py')
EXCEL_EXTENSIONS = ('xlsx', 'xls', 'xlsm')
PYTHON_FOLDER = os.path.sep.join(['Grading', 'Python'])
PYTHON_PLAGIARISM_FOLDER = os.path.sep.join([PYTHON_FOLDER, 'Plagiarism'])
EXCEL_FOLDER = os.path.sep.join(['Grading', 'Excel'])
PYTHON_ALWAYS_INSERT_CONFIGS = [
InsertConfig(
[
'import matplotlib',
'matplotlib.use("Agg")'
],
0
)
] | [
11748,
28686,
198,
198,
6738,
3915,
316,
10141,
13,
9078,
13,
41049,
17,
13,
28463,
1330,
35835,
16934,
198,
198,
47,
56,
4221,
1340,
62,
13918,
16938,
11053,
796,
19203,
541,
2047,
65,
3256,
705,
9078,
11537,
198,
6369,
34,
3698,
62,... | 1.929134 | 254 |
# time O(n^2)
# space O(1)
| [
2,
640,
440,
7,
77,
61,
17,
8,
201,
198,
2,
2272,
440,
7,
16,
8,
201,
198
] | 1.611111 | 18 |
'''pessoas = {'nome': 'Lorran', 'sexo': 'M', 'idade': 22}
#pessoas['peso'] = '100.0' #Esse comando adiciona uma linha
for k, v in pessoas.items():
print(f'{k} = {v}')'''
#print(pessoas.items())
#print(pessoas.values())
#print(pessoas.keys())
#print(f'O {pessoas["nome"]} tem {pessoas["idade"]} anos')
'''Brasil = []
estado1 = {'uf': 'Rio de janeiro', 'sigla': 'RJ'}
estado2 = {'uf': 'São Paulo', 'sigla': 'SP'}
Brasil.append(estado1)
Brasil.append(estado2)
print(Brasil[0]['sigla'])'''
estado = dict()
brasil = list()
for c in range(0, 3):
estado['uf'] = str(input('Unidade federativa: '))
estado['sigla'] = str(input('Silga do Estado: '))
brasil.append(estado.copy())#Jamais podemos fazer um fatiamento em um dicionario fatiamento se faz desse jeito
# ---> [:]
for e in brasil:
for k, v in e.keys():
print(v, end=' ') | [
7061,
6,
79,
408,
78,
292,
796,
1391,
6,
77,
462,
10354,
705,
43,
273,
2596,
3256,
705,
8044,
78,
10354,
705,
44,
3256,
705,
312,
671,
10354,
2534,
92,
201,
198,
2,
79,
408,
78,
292,
17816,
12272,
78,
20520,
796,
705,
3064,
13,
... | 2.011416 | 438 |
import hashlib
str = "String_senha" #Senha em texto aberto
hash_sha3_512 = hashlib.new("sha3_512", str.encode()) #Algoritimo de Hash selecionado e executado
print(hash_sha3_512)
print(hash_sha3_512.hexdigest())#Hash gerada
| [
198,
11748,
12234,
8019,
628,
198,
2536,
796,
366,
10100,
62,
6248,
3099,
1,
1303,
10445,
3099,
795,
2420,
78,
450,
13806,
198,
198,
17831,
62,
26270,
18,
62,
25836,
796,
12234,
8019,
13,
3605,
7203,
26270,
18,
62,
25836,
1600,
965,
... | 2.505495 | 91 |
"""Wrapper for Xinerama
Generated with:
tools/genwrappers.py xinerama
Do not modify this file.
"""
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xinerama')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
import pyglet.libs.x11.xlib
struct_anon_93._fields_ = [
('screen_number', c_int),
('x_org', c_short),
('y_org', c_short),
('width', c_short),
('height', c_short),
]
# /usr/include/X11/extensions/Xinerama.h:40
XineramaScreenInfo = struct_anon_93
Display = pyglet.libs.x11.xlib.Display
# /usr/include/X11/extensions/Xinerama.h:44
XineramaQueryExtension = _lib.XineramaQueryExtension
XineramaQueryExtension.restype = c_int
XineramaQueryExtension.argtypes = [
POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:50
XineramaQueryVersion = _lib.XineramaQueryVersion
XineramaQueryVersion.restype = c_int
XineramaQueryVersion.argtypes = [
POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:56
XineramaIsActive = _lib.XineramaIsActive
XineramaIsActive.restype = c_int
XineramaIsActive.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/Xinerama.h:67
XineramaQueryScreens = _lib.XineramaQueryScreens
XineramaQueryScreens.restype = POINTER(XineramaScreenInfo)
XineramaQueryScreens.argtypes = [POINTER(Display), POINTER(c_int)]
__all__ = ['XineramaScreenInfo', 'XineramaQueryExtension',
'XineramaQueryVersion', 'XineramaIsActive', 'XineramaQueryScreens']
| [
37811,
36918,
2848,
329,
1395,
7274,
1689,
198,
198,
8645,
515,
351,
25,
198,
31391,
14,
5235,
29988,
11799,
13,
9078,
2124,
7274,
1689,
198,
198,
5211,
407,
13096,
428,
2393,
13,
198,
37811,
198,
198,
11748,
269,
19199,
198,
6738,
26... | 2.497245 | 726 |
#%%
from localsearch.local_search import NeighborhoodRelation
from localsearch.simulated_annealing import SimulatedAnnealing
from typing import Callable
import networkx as nx
| [
2,
16626,
198,
6738,
1957,
12947,
13,
12001,
62,
12947,
1330,
37914,
6892,
341,
198,
6738,
1957,
12947,
13,
14323,
4817,
62,
21952,
4272,
1330,
3184,
4817,
43227,
4272,
198,
6738,
19720,
1330,
4889,
540,
198,
11748,
3127,
87,
355,
299,
... | 4.069767 | 43 |
from django.shortcuts import render
from hdcp_app.models import test_table
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
289,
67,
13155,
62,
1324,
13,
27530,
1330,
1332,
62,
11487,
198,
2,
13610,
534,
5009,
994,
13,
198
] | 3.482759 | 29 |
import numpy as np
import sn_plotter_metrics.nsnPlot as nsn_plot
import matplotlib.pylab as plt
import argparse
from optparse import OptionParser
import glob
# from sn_tools.sn_obs import dataInside
import healpy as hp
import numpy.lib.recfunctions as rf
import pandas as pd
import os
import multiprocessing
def processMulti(toproc, Npixels, outFile, nproc=1):
"""
Function to analyze metric output using multiprocesses
The results are stored in outFile (npy file)
Parameters
--------------
toproc: pandas df
data to process
Npixels: numpy array
array of the total number of pixels per OS
outFile: str
output file name
nproc: int, opt
number of cores to use for the processing
"""
nfi = len(toproc)
tabfi = np.linspace(0, nfi, nproc+1, dtype='int')
print(tabfi)
result_queue = multiprocessing.Queue()
# launching the processes
for j in range(len(tabfi)-1):
ida = tabfi[j]
idb = tabfi[j+1]
p = multiprocessing.Process(name='Subprocess-'+str(j), target=processLoop, args=(
toproc[ida:idb], Npixels, j, result_queue))
p.start()
# grabing the results
resultdict = {}
for j in range(len(tabfi)-1):
resultdict.update(result_queue.get())
for p in multiprocessing.active_children():
p.join()
resdf = pd.DataFrame()
for j in range(len(tabfi)-1):
resdf = pd.concat((resdf, resultdict[j]))
print('finally', resdf.columns)
# saving the results in a npy file
np.save(outFile, resdf.to_records(index=False))
def processLoop(toproc, Npixels, j=0, output_q=None):
"""
Function to analyze a set of metric result files
Parameters
--------------
toproc: pandas df
data to process
Npixels: numpy array
array of the total number of pixels per OS
j: int, opt
internal int for the multiprocessing
output_q: multiprocessing.queue
queue for multiprocessing
Returns
-----------
pandas df with the following cols:
zlim, nsn, sig_nsn, nsn_extra, dbName, plotName, color,marker
"""
# this is to get summary values here
resdf = pd.DataFrame()
for index, val in toproc.iterrows():
dbName = val['dbName']
idx = Npixels['dbName'] == dbName
npixels = Npixels[idx]['npixels'].item()
metricdata = nsn_plot.NSNAnalysis(dirFile, val, metricName, fieldType,
nside, npixels=npixels)
# metricdata.plot()
# plt.show()
if metricdata.data_summary is not None:
resdf = pd.concat((resdf, metricdata.data_summary))
if output_q is not None:
output_q.put({j: resdf})
else:
return resdf
def print_best(resdf, ref_var='nsn', num=10, name='a'):
"""
Method to print the "best" OS maximizing ref_var
Parameters
--------------
resdf: pandas df
data to process
ref_var: str, opt
variable chosen to rank the strategies (default: nsn)
num: int, opt
number of OS to display)
"""
ressort = pd.DataFrame(resdf)
ressort = ressort.sort_values(by=[ref_var], ascending=False)
ressort['rank'] = ressort[ref_var].rank(
ascending=False, method='first').astype('int')
print(ressort[['dbName', ref_var, 'rank']][:num])
ressort['dbName'] = ressort['dbName'].str.split('v1.4_10yrs').str[0]
ressort['dbName'] = ressort['dbName'].str.rstrip('_')
ressort[['dbName', ref_var, 'rank']][:].to_csv(
'OS_best_{}.csv'.format(name), index=False)
def rankCadences(resdf, ref_var='nsn'):
"""
Method to print the "best" OS maximizing ref_var
Parameters
--------------
resdf: pandas df
data to process
ref_var: str, opt
variable chosen to rank the strategies (default: nsn)
Returns
-----------
original pandas df plus rank
"""
ressort = pd.DataFrame(resdf)
ressort = ressort.sort_values(by=[ref_var], ascending=False)
ressort['rank'] = ressort[ref_var].rank(
ascending=False, method='first').astype('int')
return ressort
def plotSummary(resdf, ref=False, ref_var='nsn'):
"""
Method to draw the summary plot nSN vs zlim
Parameters
---------------
resdf: pandas df
dat to plot
ref: bool, opt
if true, results are displayed from a reference cadence (default: False)
ref_var: str, opt
column from which the reference OS is chosen (default: nsn_ref
"""
fig, ax = plt.subplots()
zlim_ref = -1
nsn_ref = -1
if ref:
ido = np.argmax(resdf[ref_var])
zlim_ref = resdf.loc[ido, 'zlim']
nsn_ref = resdf.loc[ido, 'nsn']
print(zlim_ref, nsn_ref)
"""
if zlim_ref > 0:
mscatter(zlim_ref-resdf['zlim'], resdf['nsn']/nsn_ref, ax=ax,
m=resdf['marker'].to_list(), c=resdf['color'].to_list())
else:
mscatter(resdf['zlim'], resdf['nsn'], ax=ax,
m=resdf['marker'].to_list(), c=resdf['color'].to_list())
"""
for ii, row in resdf.iterrows():
if zlim_ref > 0:
ax.text(zlim_ref-row['zlim'], row['nsn']/nsn_ref, row['dbName'])
else:
ax.plot(row['zlim'], row['nsn'], marker=row['marker'],
color=row['color'], ms=10)
ax.text(row['zlim']+0.001, row['nsn'], row['dbName'], size=12)
ax.grid()
ax.set_xlabel('$z_{faint}$')
ax.set_ylabel('$N_{SN}(z\leq z_{faint})$')
def plotCorrel(resdf, x=('', ''), y=('', '')):
"""
Method for 2D plots
Parameters
---------------
resdf: pandas df
data to plot
x: tuple
x-axis variable (first value: colname in resdf; second value: x-axis label)
y: tuple
y-axis variable (first value: colname in resdf; second value: y-axis label)
"""
fig, ax = plt.subplots()
resdf = filter(resdf, ['alt_sched'])
for ik, row in resdf.iterrows():
varx = row[x[0]]
vary = row[y[0]]
ax.plot(varx, vary, marker=row['marker'], color=row['color'])
#ax.text(varx+0.1, vary, row['dbName'], size=10)
ax.set_xlabel(x[1])
ax.set_ylabel(y[1])
ax.grid()
def plotBarh(resdf, varname,leg):
"""
Method to plot varname - barh
Parameters
---------------
resdf: pandas df
data to plot
varname: str
column to plot
"""
fig, ax = plt.subplots(figsize=(10,5))
fig.subplots_adjust(left=0.3)
resdf = resdf.sort_values(by=[varname])
resdf['dbName'] = resdf['dbName'].str.split('_10yrs', expand=True)[0]
ax.barh(resdf['dbName'], resdf[varname], color=resdf['color'])
ax.set_xlabel(r'{}'.format(leg))
ax.tick_params(axis='y', labelsize=15.)
plt.grid(axis='x')
#plt.tight_layout
plt.savefig('Plots_pixels/Summary_{}.png'.format(varname))
def filter(resdf, strfilt=['_noddf']):
"""
Function to remove OS according to their names
Parameters
---------------
resdf: pandas df
data to process
strfilt: list(str),opt
list of strings used to remove OS (default: ['_noddf']
"""
for vv in strfilt:
idx = resdf['dbName'].str.contains(vv)
resdf = pd.DataFrame(resdf[~idx])
return resdf
parser = OptionParser(
description='Display NSN metric results for WFD fields')
parser.add_option("--dirFile", type="str", default='/sps/lsst/users/gris/MetricOutput',
help="file directory [%default]")
parser.add_option("--nside", type="int", default=64,
help="nside for healpixels [%default]")
parser.add_option("--fieldType", type="str", default='WFD',
help="field type - DD, WFD, Fake [%default]")
parser.add_option("--nPixelsFile", type="str", default='ObsPixels_fbs14_nside_64.npy',
help="file with the total number of pixels per obs. strat.[%default]")
parser.add_option("--listdb", type="str", default='plot_scripts/input/WFD_test.csv',
help="list of dbnames to process [%default]")
parser.add_option("--tagbest", type="str", default='snpipe_a',
help="tag for the best OS [%default]")
opts, args = parser.parse_args()
# Load parameters
dirFile = opts.dirFile
nside = opts.nside
fieldType = opts.fieldType
metricName = 'NSN'
nPixelsFile = opts.nPixelsFile
listdb = opts.listdb
tagbest = opts.tagbest
metricTot = None
metricTot_med = None
toproc = pd.read_csv(listdb,comment='#')
pixArea = hp.nside2pixarea(nside, degrees=True)
x1 = -2.0
color = 0.2
if os.path.isfile(nPixelsFile):
Npixels = np.load(nPixelsFile)
else:
print('File with the total number of pixels not found')
r = toproc.copy()
r['npixels'] = 0.
Npixels = r.to_records(index=False)
print(Npixels.dtype)
outFile = 'Summary_WFD_{}.npy'.format(tagbest)
if not os.path.isfile(outFile):
processMulti(toproc, Npixels, outFile, nproc=8)
resdf = pd.DataFrame(np.load(outFile, allow_pickle=True))
print(resdf.columns)
resdf['dbName'] = resdf['dbName'].str.split('_10yrs').str[0]
# filter cadences here
resdf = filter(resdf, ['_noddf', 'footprint_stuck_rolling', 'weather'])
# rank cadences
resdf = rankCadences(resdf)
# select only the first 20
idx = resdf['rank'] <= 22
resdf = resdf[idx]
# Summary plot
#plotSummary(resdf, ref=False)
# 2D plots
"""
plotCorrel(resdf, x=('cadence', 'cadence'), y=('nsn', '#number of supernovae'))
plotBarh(resdf, 'cadence')
"""
plotBarh(resdf, 'cadence','cadence')
plotBarh(resdf, 'season_length','season length')
"""
bandstat = ['u','g','r','i','z','y','gr','gi','gz','iz','uu','gg','rr','ii','zz','yy']
for b in bandstat:
plotBarh(resdf, 'cadence_{}'.format(b),'Effective cadence - {} band'.format(b))
print('hello',resdf)
"""
#plotBarh(resdf, 'N_{}_tot'.format(b))
"""
for bb in 'grizy':
plotBarh(resdf, 'N_{}{}'.format(b,bb))
"""
# plotCorrel(resdf, x=('cadence_{}'.format(b), 'cadence_{}'.format(b)), y=(
# 'nsn', '#number of supernovae'))
#plotBarh(resdf, 'N_total')
print_best(resdf, num=20, name=tagbest)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
3013,
62,
29487,
353,
62,
4164,
10466,
13,
5907,
77,
43328,
355,
299,
16184,
62,
29487,
198,
11748,
2603,
29487,
8019,
13,
79,
2645,
397,
355,
458,
83,
198,
11748,
1822,
29572,
198,
6738,
217... | 2.295232 | 4,383 |
import re
from collections import Counter
INPUT_FILE = "../../input/06.txt"
# pre-written code starts here
with open(INPUT_FILE) as f:
input_ = f.read()
blocks = [block.split("\n") for block in input_.split("\n\n")]
lines = [line.strip() for line in input_.split("\n")]
wordlines = [line.split() for line in lines]
nums = intify(lines)
alphanums = [re.findall(r"[\w+-]+", line) for line in lines]
renums = [intify(line) for line in alphanums]
# pre-written code ends here
data = [int(x) for x in lines[0].split(",")]
days = 80
for day in range(days):
new_data = []
extras = []
for thing in data:
new_point = thing - 1
if new_point == -1:
new_data.append(6)
extras.append(8)
else:
new_data.append(new_point)
data = new_data + extras
print(len(data))
data = {key: val for key, val in Counter([int(x) for x in lines[0].split(",")]).items()}
print(data)
days = 256
for day in range(days):
data = {(key - 1): val for key, val in data.items()}
extras = data.get(-1, 0)
data[6] = data.get(6, 0) + extras
data[8] = extras
data[-1] = 0
print(sum(data.values()))
| [
11748,
302,
198,
6738,
17268,
1330,
15034,
198,
198,
1268,
30076,
62,
25664,
796,
366,
40720,
40720,
15414,
14,
3312,
13,
14116,
1,
198,
198,
2,
662,
12,
15266,
2438,
4940,
994,
628,
198,
4480,
1280,
7,
1268,
30076,
62,
25664,
8,
35... | 2.372709 | 491 |
from google.appengine.ext import ndb
from rest_gae.users import User as RESTUser
class User(RESTUser):
"""Our own user class"""
organization = ndb.KeyProperty(kind='Organization')
role = ndb.KeyProperty(kind='Role', repeated=True)
# # This is optional, but if we use a RESTMeta - we must inherit it (and not
# # run over the original properties)
# class RESTMeta(RESTUser.RESTMeta):
# When a new instance is created, this property will be set to the
# logged-in user's organization if defined
# user_organization_property = 'organization'
# When a new instance is created, this property will be set to the
# logged-in user's role if defined
# user_role_property = 'role'
class Organization(ndb.Model):
'''An organization grouping of users'''
name = ndb.StringProperty()
owner = ndb.KeyProperty(kind='User')
class Role(ndb.Model):
'''A security role at an organization'''
name = ndb.StringProperty()
owner = ndb.KeyProperty(kind='User')
organization = ndb.KeyProperty(kind='Organization')
| [
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
299,
9945,
198,
6738,
1334,
62,
25002,
13,
18417,
1330,
11787,
355,
30617,
12982,
628,
198,
4871,
11787,
7,
49,
6465,
12982,
2599,
628,
220,
220,
220,
37227,
5122,
898,
2836,
1398,
37811,
... | 3.097983 | 347 |
"""
Copyright (c) 2020 LG Electronics Inc.
SPDX-License-Identifier: MIT
"""
import bb
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from devtool import setup_tinfoil, DevtoolError
logger = logging.getLogger("devtool")
logger.setLevel(logging.WARNING)
class Task(object):
"""Task representation class
"""
class Recipe(object):
"""Recipe representation class
"""
def parse(args, basepath):
"""Return the relevant information of the recipe
"""
try:
tmpdir = tempfile.mkdtemp(prefix="devtool-cache-")
env = os.environ.copy()
env["BB_ENV_EXTRAWHITE"] = env.get("BB_ENV_EXTRAWHITE", "") + " TMPDIR:forcevariable"
env["TMPDIR:forcevariable"] = tmpdir
cmd = "-c %s" % args.cmd if args.cmd else ""
output = execute("bitbake %s -n %s" % (args.recipe, cmd), env=env)
tids = []
matcher = re.compile("NOTE: Running(?: setscene)? task [0-9]+ of [0-9]+ \(([^)]+)\)")
for line in output.splitlines():
matched = matcher.match(line)
if matched:
tids.append(matched.group(1))
finally:
shutil.rmtree(tmpdir)
try:
tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
tasks = []
recipes = []
for tid in tids:
(mc, fn, taskname) = bb.runqueue.split_tid(tid)
pn = tinfoil.cooker.recipecaches[mc].pkg_fn[fn]
tasks.append(Task(tid, pn, taskname))
if not pn in recipes:
data = tinfoil.parse_recipe_file(fn)
fetcher = Fetch2(data)
if fetcher.size() > 0:
recipes.append(Recipe(pn, fetcher.check_premirrors()))
tasks.sort(key=lambda x: (x.pn, x.task))
recipes.sort(key=lambda x: x.pn)
return tasks, recipes
finally:
tinfoil.shutdown()
def cache(args, config, basepath, workspace):
"""Show the shared state cache and source availability of the recipe
"""
print("INFO: Parsing in progress... This may take a few minutes to complete.")
try:
tasks, recipes = parse(args, basepath)
from operator import methodcaller
found_shared_state = sorted([x for x in tasks if x.isSetsceneTask()], key=methodcaller("__str__"))
missed_shared_state = sorted([x for x in tasks if not x.isSetsceneTask()], key=methodcaller("__str__"))
found_source = sorted([x for x in recipes if x.isAvailable()], key=methodcaller("__str__"))
missed_source = sorted([x for x in recipes if not x.isAvailable()], key=methodcaller("__str__"))
if args.output:
make_report = make_json_report
else:
make_report = make_plain_report
report = make_report(args, found_shared_state, missed_shared_state,
found_source, missed_source)
output = open(args.output, "w") if args.output else sys.stdout
output.write(report)
if args.output:
output.close()
return 0
except Exception as e:
logger.error(str(e))
return 2
| [
37811,
198,
15269,
357,
66,
8,
12131,
17370,
27828,
3457,
13,
198,
4303,
36227,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
37811,
198,
198,
11748,
275,
65,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
302,
198,
... | 2.284173 | 1,390 |
'''
This module provides the ``Entity`` base class, as well as its metaclass
``EntityMeta``.
'''
from py23compat import sorted
import sys
import types
import warnings
from copy import deepcopy
import sqlalchemy
from sqlalchemy import Table, Column, Integer, desc, ForeignKey, and_, \
ForeignKeyConstraint
from sqlalchemy.orm import MapperExtension, mapper, object_session, \
EXT_CONTINUE, polymorphic_union, scoped_session as ScopedSession, \
ColumnProperty
from sqlalchemy.sql import ColumnCollection
import elixir
from elixir.statements import process_mutators, MUTATORS
from elixir import options
from elixir.properties import Property
DEBUG = False
try:
from sqlalchemy.orm import EXT_PASS
SA05orlater = False
except ImportError:
SA05orlater = True
__doc_all__ = ['Entity', 'EntityMeta']
class EntityDescriptor(object):
'''
EntityDescriptor describes fields and options needed for table creation.
'''
def setup_options(self):
'''
Setup any values that might depend on the "using_options" class
mutator. For example, the tablename or the metadata.
'''
elixir.metadatas.add(self.metadata)
if self.collection is not None:
self.collection.append(self.entity)
entity = self.entity
if self.parent:
if self.inheritance == 'single':
self.tablename = self.parent._descriptor.tablename
if not self.tablename:
if self.shortnames:
self.tablename = entity.__name__.lower()
else:
modulename = entity.__module__.replace('.', '_')
name_list = []
begin = 0
for i, c in enumerate(entity.__name__):
if i == 0:
continue
if 'A' <= c <= 'Z':
name_list.append(entity.__name__[begin:i].lower())
begin = i
name_list.append(entity.__name__[begin:])
#tablename = "%s" % (entity.__name__)
tablename = "%s" % ('_'.join(name_list))
self.tablename = tablename.lower()
elif hasattr(self.tablename, '__call__'):
self.tablename = self.tablename(entity)
if not self.identity:
if 'polymorphic_identity' in self.mapper_options:
self.identity = self.mapper_options['polymorphic_identity']
else:
#TODO: include module name (We could have b.Account inherit
# from a.Account)
self.identity = entity.__name__.lower()
elif 'polymorphic_identity' in self.mapper_options:
raise Exception('You cannot use the "identity" option and the '
'polymorphic_identity mapper option at the same '
'time.')
elif hasattr(self.identity, '__call__'):
self.identity = self.identity(entity)
if self.polymorphic:
if not isinstance(self.polymorphic, basestring):
self.polymorphic = options.DEFAULT_POLYMORPHIC_COL_NAME
#---------------------
# setup phase methods
def create_pk_cols(self):
"""
Create primary_key columns. That is, call the 'create_pk_cols'
builders then add a primary key to the table if it hasn't already got
one and needs one.
This method is "semi-recursive" in some cases: it calls the
create_keys method on ManyToOne relationships and those in turn call
create_pk_cols on their target. It shouldn't be possible to have an
infinite loop since a loop of primary_keys is not a valid situation.
"""
if self._pk_col_done:
return
self.call_builders('create_pk_cols')
if not self.autoload:
if self.parent:
if self.inheritance == 'multi':
# Add columns with foreign keys to the parent's primary
# key columns
parent_desc = self.parent._descriptor
tablename = parent_desc.table_fullname
join_clauses = []
for pk_col in parent_desc.primary_keys:
colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \
{'entity': self.parent.__name__.lower(),
'key': pk_col.key}
# It seems like SA ForeignKey is not happy being given
# a real column object when said column is not yet
# attached to a table
pk_col_name = "%s.%s" % (tablename, pk_col.key)
fk = ForeignKey(pk_col_name, ondelete='cascade')
col = Column(colname, pk_col.type, fk,
primary_key=True)
self.add_column(col)
join_clauses.append(col == pk_col)
self.join_condition = and_(*join_clauses)
elif self.inheritance == 'concrete':
# Copy primary key columns from the parent.
for col in self.parent._descriptor.columns:
if col.primary_key:
self.add_column(col.copy())
elif not self.has_pk and self.auto_primarykey:
if isinstance(self.auto_primarykey, basestring):
colname = self.auto_primarykey
else:
colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME
self.add_column(
Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE,
primary_key=True))
self._pk_col_done = True
def setup_table(self, only_autoloaded=False):
'''
Create a SQLAlchemy table-object with all columns that have been
defined up to this point.
'''
if self.entity.table is not None:
return
if self.autoload != only_autoloaded:
return
kwargs = self.table_options
if self.autoload:
args = self.table_args
kwargs['autoload'] = True
else:
if self.parent:
if self.inheritance == 'single':
# we know the parent is setup before the child
self.entity.table = self.parent.table
# re-add the entity columns to the parent entity so that
# they are added to the parent's table (whether the
# parent's table is already setup or not).
for col in self._columns:
self.parent._descriptor.add_column(col)
for constraint in self.constraints:
self.parent._descriptor.add_constraint(constraint)
return
elif self.inheritance == 'concrete':
#TODO: we should also copy columns from the parent table
# if the parent is a base (abstract?) entity (whatever the
# inheritance type -> elif will need to be changed)
# Copy all non-primary key columns from parent table
# (primary key columns have already been copied earlier).
for col in self.parent._descriptor.columns:
if not col.primary_key:
self.add_column(col.copy())
for con in self.parent._descriptor.constraints:
self.add_constraint(
ForeignKeyConstraint(
[e.parent.key for e in con.elements],
[e._get_colspec() for e in con.elements],
name=con.name, #TODO: modify it
onupdate=con.onupdate, ondelete=con.ondelete,
use_alter=con.use_alter))
if self.polymorphic and \
self.inheritance in ('single', 'multi') and \
self.children and not self.parent:
self.add_column(Column(self.polymorphic,
options.POLYMORPHIC_COL_TYPE))
if self.version_id_col:
if not isinstance(self.version_id_col, basestring):
self.version_id_col = options.DEFAULT_VERSION_ID_COL_NAME
self.add_column(Column(self.version_id_col, Integer))
args = list(self.columns) + self.constraints + self.table_args
self.entity.table = Table(self.tablename, self.metadata,
*args, **kwargs)
if DEBUG:
print self.entity.table.repr2()
def setup_mapper(self):
'''
Initializes and assign a mapper to the entity.
At this point the mapper will usually have no property as they are
added later.
'''
if self.entity.mapper:
return
# for now we don't support the "abstract" parent class in a concrete
# inheritance scenario as demonstrated in
# sqlalchemy/test/orm/inheritance/concrete.py
# this should be added along other
kwargs = {}
if self.order_by:
kwargs['order_by'] = self.translate_order_by(self.order_by)
if self.version_id_col:
kwargs['version_id_col'] = self.get_column(self.version_id_col)
if self.inheritance in ('single', 'concrete', 'multi'):
if self.parent and \
(self.inheritance != 'concrete' or self.polymorphic):
# non-polymorphic concrete doesn't need this
kwargs['inherits'] = self.parent.mapper
if self.inheritance == 'multi' and self.parent:
kwargs['inherit_condition'] = self.join_condition
if self.polymorphic:
if self.children:
if self.inheritance == 'concrete':
keys = [(self.identity, self.entity.table)]
keys.extend([(child._descriptor.identity, child.table)
for child in self._get_children()])
# Having the same alias name for an entity and one of
# its child (which is a parent itself) shouldn't cause
# any problem because the join shouldn't be used at
# the same time. But in reality, some versions of SA
# do misbehave on this. Since it doesn't hurt to have
# different names anyway, here they go.
pjoin = polymorphic_union(
dict(keys), self.polymorphic,
'pjoin_%s' % self.identity)
kwargs['with_polymorphic'] = ('*', pjoin)
kwargs['polymorphic_on'] = \
getattr(pjoin.c, self.polymorphic)
elif not self.parent:
kwargs['polymorphic_on'] = \
self.get_column(self.polymorphic)
if self.children or self.parent:
kwargs['polymorphic_identity'] = self.identity
if self.parent and self.inheritance == 'concrete':
kwargs['concrete'] = True
if self.parent and self.inheritance == 'single':
args = []
else:
args = [self.entity.table]
# let user-defined kwargs override Elixir-generated ones, though that's
# not very usefull since most of them expect Column instances.
kwargs.update(self.mapper_options)
#TODO: document this!
if 'primary_key' in kwargs:
cols = self.entity.table.c
kwargs['primary_key'] = [getattr(cols, colname) for
colname in kwargs['primary_key']]
# do the mapping
if self.session is None:
self.entity.mapper = mapper(self.entity, *args, **kwargs)
elif isinstance(self.session, ScopedSession):
session_mapper = session_mapper_factory(self.session)
self.entity.mapper = session_mapper(self.entity, *args, **kwargs)
else:
raise Exception("Failed to map entity '%s' with its table or "
"selectable. You can only bind an Entity to a "
"ScopedSession object or None for manual session "
"management."
% self.entity.__name__)
#----------------
# helper methods
def add_column(self, col, check_duplicate=None):
'''when check_duplicate is None, the value of the allowcoloverride
option of the entity is used.
'''
if check_duplicate is None:
check_duplicate = not self.allowcoloverride
if col.key in self._columns:
if check_duplicate:
raise Exception("Column '%s' already exist in '%s' ! " %
(col.key, self.entity.__name__))
else:
del self._columns[col.key]
self._columns.add(col)
if col.primary_key:
self.has_pk = True
# Autosetup triggers shouldn't be active anymore at this point, so we
# can theoretically access the entity's table safely. But the problem
# is that if, for some reason, the trigger removal phase didn't
# happen, we'll get an infinite loop. So we just make sure we don't
# get one in any case.
table = type.__getattribute__(self.entity, 'table')
if table is not None:
if check_duplicate and col.key in table.columns.keys():
raise Exception("Column '%s' already exist in table '%s' ! " %
(col.key, table.name))
table.append_column(col)
if DEBUG:
print "table.append_column(%s)" % col
def get_inverse_relation(self, rel, check_reverse=True):
'''
Return the inverse relation of rel, if any, None otherwise.
'''
matching_rel = None
for other_rel in self.relationships:
if rel.is_inverse(other_rel):
if matching_rel is None:
matching_rel = other_rel
else:
raise Exception(
"Several relations match as inverse of the '%s' "
"relation in entity '%s'. You should specify "
"inverse relations manually by using the inverse "
"keyword."
% (rel.name, rel.entity.__name__))
# When a matching inverse is found, we check that it has only
# one relation matching as its own inverse. We don't need the result
# of the method though. But we do need to be careful not to start an
# infinite recursive loop.
if matching_rel and check_reverse:
rel.entity._descriptor.get_inverse_relation(matching_rel, False)
return matching_rel
#------------------------
# some useful properties
def table_fullname(self):
'''
Complete name of the table for the related entity.
Includes the schema name if there is one specified.
'''
schema = self.table_options.get('schema', None)
if schema is not None:
return "%s.%s" % (schema, self.tablename)
else:
return self.tablename
table_fullname = property(table_fullname)
columns = property(columns)
def primary_keys(self):
"""
Returns the list of primary key columns of the entity.
This property isn't valid before the "create_pk_cols" phase.
"""
if self.autoload:
return [col for col in self.entity.table.primary_key.columns]
else:
if self.parent and self.inheritance == 'single':
return self.parent._descriptor.primary_keys
else:
return [col for col in self.columns if col.primary_key]
primary_keys = property(primary_keys)
table = property(table)
def primary_key_properties(self):
"""
Returns the list of (mapper) properties corresponding to the primary
key columns of the table of the entity.
This property caches its value, so it shouldn't be called before the
entity is fully set up.
"""
if not hasattr(self, '_pk_props'):
col_to_prop = {}
mapper = self.entity.mapper
for prop in mapper.iterate_properties:
if isinstance(prop, ColumnProperty):
for col in prop.columns:
for col in col.proxy_set:
col_to_prop[col] = prop
pk_cols = [c for c in mapper.mapped_table.c if c.primary_key]
self._pk_props = [col_to_prop[c] for c in pk_cols]
return self._pk_props
primary_key_properties = property(primary_key_properties)
class TriggerProxy(object):
"""
A class that serves as a "trigger" ; accessing its attributes runs
the setup_all function.
Note that the `setup_all` is called on each access of the attribute.
"""
def is_entity(cls):
"""
Scan the bases classes of `cls` to see if any is an instance of
EntityMeta. If we don't find any, it means it is either an unrelated class
or an entity base class (like the 'Entity' class).
"""
for base in cls.__bases__:
if isinstance(base, EntityMeta):
return True
return False
# Note that we don't use inspect.getmembers because of
# http://bugs.python.org/issue1785
# See also http://elixir.ematia.de/trac/changeset/262
def instrument_class(cls):
"""
Instrument a class as an Entity. This is usually done automatically through
the EntityMeta metaclass.
"""
# Create the entity descriptor
desc = cls._descriptor = EntityDescriptor(cls)
# Process mutators
# We *do* want mutators to be processed for base/abstract classes
# (so that statements like using_options_defaults work).
process_mutators(cls)
# We do not want to do any more processing for base/abstract classes
# (Entity et al.).
if not is_entity(cls) or is_abstract_entity(cls):
return
cls.table = None
cls.mapper = None
# Copy the properties ('Property' instances) of the entity base class(es).
# We use getmembers (instead of __dict__) so that we also get the
# properties from the parents of the base class if any.
base_props = []
for base in cls.__bases__:
if isinstance(base, EntityMeta) and \
(not is_entity(base) or is_abstract_entity(base)):
base_props += [(name, deepcopy(attr)) for name, attr in
getmembers(base, lambda a: isinstance(a, Property))]
# Process attributes (using the assignment syntax), looking for
# 'Property' instances and attaching them to this entity.
properties = [(name, attr) for name, attr in cls.__dict__.iteritems()
if isinstance(attr, Property)]
sorted_props = sorted(base_props + properties,
key=lambda i: i[1]._counter)
for name, prop in sorted_props:
prop.attach(cls, name)
# setup misc options here (like tablename etc.)
desc.setup_options()
# create trigger proxies
# TODO: support entity_name... It makes sense only for autoloaded
# tables for now, and would make more sense if we support "external"
# tables
if desc.autosetup:
_install_autosetup_triggers(cls)
class EntityMeta(type):
"""
Entity meta class.
You should only use it directly if you want to define your own base class
for your entities (ie you don't want to use the provided 'Entity' class).
"""
def setup_entities(entities):
'''Setup all entities in the list passed as argument'''
for entity in entities:
# delete all Elixir properties so that it doesn't interfere with
# SQLAlchemy. At this point they should have be converted to
# builders.
for name, attr in entity.__dict__.items():
if isinstance(attr, Property):
delattr(entity, name)
if entity._descriptor.autosetup:
_cleanup_autosetup_triggers(entity)
for method_name in (
'setup_autoload_table', 'create_pk_cols', 'setup_relkeys',
'before_table', 'setup_table', 'setup_reltables', 'after_table',
'setup_events',
'before_mapper', 'setup_mapper', 'after_mapper',
'setup_properties',
'finalize'):
# if DEBUG:
# print "=" * 40
# print method_name
# print "=" * 40
for entity in entities:
# print entity.__name__, "...",
if hasattr(entity, '_setup_done'):
# print "already done"
continue
method = getattr(entity._descriptor, method_name)
method()
# print "ok"
def cleanup_entities(entities):
"""
Try to revert back the list of entities passed as argument to the state
they had just before their setup phase. It will not work entirely for
autosetup entities as we need to remove the autosetup triggers.
As of now, this function is *not* functional in that it doesn't revert to
the exact same state the entities were before setup. For example, the
properties do not work yet as those would need to be regenerated (since the
columns they are based on are regenerated too -- and as such the
corresponding joins are not correct) but this doesn't happen because of
the way relationship setup is designed to be called only once (especially
the backref stuff in create_properties).
"""
for entity in entities:
desc = entity._descriptor
if desc.autosetup:
_cleanup_autosetup_triggers(entity)
if hasattr(entity, '_setup_done'):
del entity._setup_done
entity.table = None
entity.mapper = None
desc._pk_col_done = False
desc.has_pk = False
desc._columns = ColumnCollection()
desc.constraints = []
desc.properties = {}
class EntityBase(object):
"""
This class holds all methods of the "Entity" base class, but does not act
as a base class itself (it does not use the EntityMeta metaclass), but
rather as a parent class for Entity. This is meant so that people who want
to provide their own base class but don't want to loose or copy-paste all
the methods of Entity can do so by inheriting from EntityBase:
.. sourcecode:: python
class MyBase(EntityBase):
__metaclass__ = EntityMeta
def myCustomMethod(self):
# do something great
"""
update_or_create = classmethod(update_or_create)
def from_dict(self, data):
"""
Update a mapped class with data from a JSON-style nested dict/list
structure.
"""
# surrogate can be guessed from autoincrement/sequence but I guess
# that's not 100% reliable, so we'll need an override
mapper = sqlalchemy.orm.object_mapper(self)
for key, value in data.iteritems():
if isinstance(value, dict):
dbvalue = getattr(self, key)
rel_class = mapper.get_property(key).mapper.class_
pk_props = rel_class._descriptor.primary_key_properties
# If the data doesn't contain any pk, and the relationship
# already has a value, update that record.
if not [1 for p in pk_props if p.key in data] and \
dbvalue is not None:
dbvalue.from_dict(value)
else:
record = rel_class.update_or_create(value)
setattr(self, key, record)
elif isinstance(value, list) and \
value and isinstance(value[0], dict):
rel_class = mapper.get_property(key).mapper.class_
new_attr_value = []
for row in value:
if not isinstance(row, dict):
raise Exception(
'Cannot send mixed (dict/non dict) data '
'to list relationships in from_dict data.')
record = rel_class.update_or_create(row)
new_attr_value.append(record)
setattr(self, key, new_attr_value)
else:
setattr(self, key, value)
def to_dict(self, deep={}, exclude=[]):
"""Generate a JSON-style nested dict/list structure from an object."""
col_prop_names = [p.key for p in self.mapper.iterate_properties \
if isinstance(p, ColumnProperty)]
data = dict([(name, getattr(self, name))
for name in col_prop_names if name not in exclude])
for rname, rdeep in deep.iteritems():
dbdata = getattr(self, rname)
#FIXME: use attribute names (ie coltoprop) instead of column names
fks = self.mapper.get_property(rname).remote_side
exclude = [c.name for c in fks]
if dbdata is None:
data[rname] = None
elif isinstance(dbdata, list):
data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata]
else:
data[rname] = dbdata.to_dict(rdeep, exclude)
return data
# session methods
# This bunch of session methods, along with all the query methods below
# only make sense when using a global/scoped/contextual session.
_global_session = property(_global_session)
# only exist in SA < 0.5
# IMO, the replacement (session.add) doesn't sound good enough to be added
# here. For example: "o = Order(); o.add()" is not very telling. It's
# better to leave it as "session.add(o)"
# query methods
def get_by(cls, *args, **kwargs):
"""
Returns the first instance of this class matching the given criteria.
This is equivalent to:
session.query(MyClass).filter_by(...).first()
"""
return cls.query.filter_by(*args, **kwargs).first()
get_by = classmethod(get_by)
def get(cls, *args, **kwargs):
"""
Return the instance of this class based on the given identifier,
or None if not found. This is equivalent to:
session.query(MyClass).get(...)
"""
return cls.query.get(*args, **kwargs)
get = classmethod(get)
class Entity(EntityBase):
'''
The base class for all entities
All Elixir model objects should inherit from this class. Statements can
appear within the body of the definition of an entity to define its
fields, relationships, and other options.
Here is an example:
.. sourcecode:: python
class Person(Entity):
name = Field(Unicode(128))
birthdate = Field(DateTime, default=datetime.now)
Please note, that if you don't specify any primary keys, Elixir will
automatically create one called ``id``.
For further information, please refer to the provided examples or
tutorial.
'''
__metaclass__ = EntityMeta
| [
7061,
6,
198,
1212,
8265,
3769,
262,
7559,
32398,
15506,
2779,
1398,
11,
355,
880,
355,
663,
1138,
330,
31172,
198,
15506,
32398,
48526,
15506,
13,
198,
7061,
6,
198,
198,
6738,
12972,
1954,
5589,
265,
1330,
23243,
198,
198,
11748,
25... | 2.153912 | 12,884 |
from .lstm import LSTMTransformer
from .resnet1d import Resnet1dTransformer
from .resnet2d import Resnet2dTransformer
from .gated_resnet1d import GatedResnet1dTransformer
from .gated_resnet2d import GatedResnet2dTransformer
| [
6738,
764,
75,
301,
76,
1330,
406,
2257,
44,
8291,
16354,
198,
6738,
764,
411,
3262,
16,
67,
1330,
1874,
3262,
16,
67,
8291,
16354,
198,
6738,
764,
411,
3262,
17,
67,
1330,
1874,
3262,
17,
67,
8291,
16354,
198,
6738,
764,
70,
515,... | 2.871795 | 78 |
#!/usr/bin/env python3
"""
Example file showing the tuning of an Izhikevich neuron using pyNeuroML.
File: source/Userdocs/NML2_examples/tune-izhikevich.py
Copyright 2021 NeuroML contributors
"""
from pyneuroml.tune.NeuroMLTuner import run_optimisation
import pynwb # type: ignore
import numpy as np
from pyelectro.utils import simple_network_analysis
from typing import List, Dict, Tuple
from pyneuroml.pynml import write_neuroml2_file
from pyneuroml.pynml import generate_plot
from pyneuroml.pynml import run_lems_with_jneuroml
from neuroml import (
NeuroMLDocument,
Izhikevich2007Cell,
PulseGenerator,
Network,
Population,
ExplicitInput,
)
from hdmf.container import Container
from pyneuroml.lems.LEMSSimulation import LEMSSimulation
import sys
def get_data_metrics(datafile: Container) -> Tuple[Dict, Dict, Dict]:
"""Analyse the data to get metrics to tune against.
:returns: metrics from pyelectro analysis, currents, and the membrane potential values
"""
analysis_results = {}
currents = {}
memb_vals = {}
total_acquisitions = len(datafile.acquisition)
for acq in range(1, total_acquisitions):
print("Going over acquisition # {}".format(acq))
# stimulus lasts about 1000ms, so we take about the first 1500 ms
data_v = (
datafile.acquisition["CurrentClampSeries_{:02d}".format(acq)].data[:15000] * 1000.0
)
# get sampling rate from the data
sampling_rate = datafile.acquisition[
"CurrentClampSeries_{:02d}".format(acq)
].rate
# generate time steps from sampling rate
data_t = np.arange(0, len(data_v) / sampling_rate, 1.0 / sampling_rate) * 1000.0
# run the analysis
analysis_results[acq] = simple_network_analysis({acq: data_v}, data_t)
# extract current from description, but can be extracted from other
# locations also, such as the CurrentClampStimulus series.
data_i = (
datafile.acquisition["CurrentClampSeries_{:02d}".format(acq)]
.description.split("(")[1]
.split("~")[1]
.split(" ")[0]
)
currents[acq] = data_i
memb_vals[acq] = (data_t, data_v)
return (analysis_results, currents, memb_vals)
def tune_izh_model(acq_list: List, metrics_from_data: Dict, currents: Dict) -> Dict:
"""Tune networks model against the data.
Here we generate a network with the necessary number of Izhikevich cells,
one for each current stimulus, and tune them against the experimental data.
:param acq_list: list of indices of acquisitions/sweeps to tune against
:type acq_list: list
:param metrics_from_data: dictionary with the sweep number as index, and
the dictionary containing metrics generated from the analysis
:type metrics_from_data: dict
:param currents: dictionary with sweep number as index and stimulus current
value
"""
# length of simulation of the cells---should match the length of the
# experiment
sim_time = 1500.0
# Create a NeuroML template network simulation file that we will use for
# the tuning
template_doc = NeuroMLDocument(id="IzhTuneNet")
# Add an Izhikevich cell with some parameters to the document
template_doc.izhikevich2007_cells.append(
Izhikevich2007Cell(
id="Izh2007",
C="100pF",
v0="-60mV",
k="0.7nS_per_mV",
vr="-60mV",
vt="-40mV",
vpeak="35mV",
a="0.03per_ms",
b="-2nS",
c="-50.0mV",
d="100pA",
)
)
template_doc.networks.append(Network(id="Network0"))
# Add a cell for each acquisition list
popsize = len(acq_list)
template_doc.networks[0].populations.append(
Population(id="Pop0", component="Izh2007", size=popsize)
)
# Add a current source for each cell, matching the currents that
# were used in the experimental study.
counter = 0
for acq in acq_list:
template_doc.pulse_generators.append(
PulseGenerator(
id="Stim{}".format(counter),
delay="80ms",
duration="1000ms",
amplitude="{}pA".format(currents[acq]),
)
)
template_doc.networks[0].explicit_inputs.append(
ExplicitInput(
target="Pop0[{}]".format(counter), input="Stim{}".format(counter)
)
)
counter = counter + 1
# Print a summary
print(template_doc.summary())
# Write to a neuroml file and validate it.
reference = "TuneIzhFergusonPyr3"
template_filename = "{}.net.nml".format(reference)
write_neuroml2_file(template_doc, template_filename, validate=True)
# Now for the tuning bits
# format is type:id/variable:id/units
# supported types: cell/channel/izhikevich2007cell
# supported variables:
# - channel: vShift
# - cell: channelDensity, vShift_channelDensity, channelDensityNernst,
# erev_id, erev_ion, specificCapacitance, resistivity
# - izhikevich2007Cell: all available attributes
# we want to tune these parameters within these ranges
# param: (min, max)
parameters = {
"izhikevich2007Cell:Izh2007/C/pF": (100, 300),
"izhikevich2007Cell:Izh2007/k/nS_per_mV": (0.01, 2),
"izhikevich2007Cell:Izh2007/vr/mV": (-70, -50),
"izhikevich2007Cell:Izh2007/vt/mV": (-60, 0),
"izhikevich2007Cell:Izh2007/vpeak/mV": (35, 70),
"izhikevich2007Cell:Izh2007/a/per_ms": (0.001, 0.4),
"izhikevich2007Cell:Izh2007/b/nS": (-10, 10),
"izhikevich2007Cell:Izh2007/c/mV": (-65, -10),
"izhikevich2007Cell:Izh2007/d/pA": (50, 500),
} # type: Dict[str, Tuple[float, float]]
# Set up our target data and so on
ctr = 0
target_data = {}
weights = {}
for acq in acq_list:
# data to fit to:
# format: path/to/variable:metric
# metric from pyelectro, for example:
# https://pyelectro.readthedocs.io/en/latest/pyelectro.html?highlight=mean_spike_frequency#pyelectro.analysis.mean_spike_frequency
mean_spike_frequency = "Pop0[{}]/v:mean_spike_frequency".format(ctr)
average_last_1percent = "Pop0[{}]/v:average_last_1percent".format(ctr)
first_spike_time = "Pop0[{}]/v:first_spike_time".format(ctr)
# each metric can have an associated weight
weights[mean_spike_frequency] = 1
weights[average_last_1percent] = 1
weights[first_spike_time] = 1
# value of the target data from our data set
target_data[mean_spike_frequency] = metrics_from_data[acq][
"{}:mean_spike_frequency".format(acq)
]
target_data[average_last_1percent] = metrics_from_data[acq][
"{}:average_last_1percent".format(acq)
]
target_data[first_spike_time] = metrics_from_data[acq][
"{}:first_spike_time".format(acq)
]
# only add these if the experimental data includes them
# these are only generated for traces with spikes
if "{}:average_maximum".format(acq) in metrics_from_data[acq]:
average_maximum = "Pop0[{}]/v:average_maximum".format(ctr)
weights[average_maximum] = 1
target_data[average_maximum] = metrics_from_data[acq][
"{}:average_maximum".format(acq)
]
if "{}:average_minimum".format(acq) in metrics_from_data[acq]:
average_minimum = "Pop0[{}]/v:average_minimum".format(ctr)
weights[average_minimum] = 1
target_data[average_minimum] = metrics_from_data[acq][
"{}:average_minimum".format(acq)
]
ctr = ctr + 1
# simulator to use
simulator = "jNeuroML"
return run_optimisation(
# Prefix for new files
prefix="TuneIzh",
# Name of the NeuroML template file
neuroml_file=template_filename,
# Name of the network
target="Network0",
# Parameters to be fitted
parameters=list(parameters.keys()),
# Our max and min constraints
min_constraints=[v[0] for v in parameters.values()],
max_constraints=[v[1] for v in parameters.values()],
# Weights we set for parameters
weights=weights,
# The experimental metrics to fit to
target_data=target_data,
# Simulation time
sim_time=sim_time,
# EC parameters
population_size=100,
max_evaluations=500,
num_selected=30,
num_offspring=50,
mutation_rate=0.9,
num_elites=3,
# Seed value
seed=12345,
# Simulator
simulator=simulator,
dt=0.025,
show_plot_already='-nogui' not in sys.argv,
save_to_file="fitted_izhikevich_fitness.png",
save_to_file_scatter="fitted_izhikevich_scatter.png",
save_to_file_hist="fitted_izhikevich_hist.png",
save_to_file_output="fitted_izhikevich_output.png",
num_parallel_evaluations=4,
)
def run_fitted_cell_simulation(
sweeps_to_tune_against: List, tuning_report: Dict, simulation_id: str
) -> None:
"""Run a simulation with the values obtained from the fitting
:param tuning_report: tuning report from the optimser
:type tuning_report: Dict
:param simulation_id: text id of simulation
:type simulation_id: str
"""
# get the fittest variables
fittest_vars = tuning_report["fittest vars"]
C = str(fittest_vars["izhikevich2007Cell:Izh2007/C/pF"]) + "pF"
k = str(fittest_vars["izhikevich2007Cell:Izh2007/k/nS_per_mV"]) + "nS_per_mV"
vr = str(fittest_vars["izhikevich2007Cell:Izh2007/vr/mV"]) + "mV"
vt = str(fittest_vars["izhikevich2007Cell:Izh2007/vt/mV"]) + "mV"
vpeak = str(fittest_vars["izhikevich2007Cell:Izh2007/vpeak/mV"]) + "mV"
a = str(fittest_vars["izhikevich2007Cell:Izh2007/a/per_ms"]) + "per_ms"
b = str(fittest_vars["izhikevich2007Cell:Izh2007/b/nS"]) + "nS"
c = str(fittest_vars["izhikevich2007Cell:Izh2007/c/mV"]) + "mV"
d = str(fittest_vars["izhikevich2007Cell:Izh2007/d/pA"]) + "pA"
# Create a simulation using our obtained parameters.
# Note that the tuner generates a graph with the fitted values already, but
# we want to keep a copy of our fitted cell also, so we'll create a NeuroML
# Document ourselves also.
sim_time = 1500.0
simulation_doc = NeuroMLDocument(id="FittedNet")
# Add an Izhikevich cell with some parameters to the document
simulation_doc.izhikevich2007_cells.append(
Izhikevich2007Cell(
id="Izh2007",
C=C,
v0="-60mV",
k=k,
vr=vr,
vt=vt,
vpeak=vpeak,
a=a,
b=b,
c=c,
d=d,
)
)
simulation_doc.networks.append(Network(id="Network0"))
# Add a cell for each acquisition list
popsize = len(sweeps_to_tune_against)
simulation_doc.networks[0].populations.append(
Population(id="Pop0", component="Izh2007", size=popsize)
)
# Add a current source for each cell, matching the currents that
# were used in the experimental study.
counter = 0
for acq in sweeps_to_tune_against:
simulation_doc.pulse_generators.append(
PulseGenerator(
id="Stim{}".format(counter),
delay="80ms",
duration="1000ms",
amplitude="{}pA".format(currents[acq]),
)
)
simulation_doc.networks[0].explicit_inputs.append(
ExplicitInput(
target="Pop0[{}]".format(counter), input="Stim{}".format(counter)
)
)
counter = counter + 1
# Print a summary
print(simulation_doc.summary())
# Write to a neuroml file and validate it.
reference = "FittedIzhFergusonPyr3"
simulation_filename = "{}.net.nml".format(reference)
write_neuroml2_file(simulation_doc, simulation_filename, validate=True)
simulation = LEMSSimulation(
sim_id=simulation_id,
duration=sim_time,
dt=0.1,
target="Network0",
simulation_seed=54321,
)
simulation.include_neuroml2_file(simulation_filename)
simulation.create_output_file("output0", "{}.v.dat".format(simulation_id))
counter = 0
for acq in sweeps_to_tune_against:
simulation.add_column_to_output_file(
"output0", "Pop0[{}]".format(counter), "Pop0[{}]/v".format(counter)
)
counter = counter + 1
simulation_file = simulation.save_to_file()
# simulate
run_lems_with_jneuroml(simulation_file, max_memory="2G", nogui=True, plot=False)
def plot_sim_data(
sweeps_to_tune_against: List, simulation_id: str, memb_pots: Dict
) -> None:
"""Plot data from our fitted simulation
:param simulation_id: string id of simulation
:type simulation_id: str
"""
# Plot
data_array = np.loadtxt("%s.v.dat" % simulation_id)
# construct data for plotting
counter = 0
time_vals_list = []
sim_v_list = []
data_v_list = []
data_t_list = []
stim_vals = []
for acq in sweeps_to_tune_against:
stim_vals.append("{}pA".format(currents[acq]))
# remains the same for all columns
time_vals_list.append(data_array[:, 0] * 1000.0)
sim_v_list.append(data_array[:, counter + 1] * 1000.0)
data_v_list.append(memb_pots[acq][1])
data_t_list.append(memb_pots[acq][0])
counter = counter + 1
# Model membrane potential plot
generate_plot(
xvalues=time_vals_list,
yvalues=sim_v_list,
labels=stim_vals,
title="Membrane potential (model)",
show_plot_already=False,
save_figure_to="%s-model-v.png" % simulation_id,
xaxis="time (ms)",
yaxis="membrane potential (mV)",
)
# data membrane potential plot
generate_plot(
xvalues=data_t_list,
yvalues=data_v_list,
labels=stim_vals,
title="Membrane potential (exp)",
show_plot_already=False,
save_figure_to="%s-exp-v.png" % simulation_id,
xaxis="time (ms)",
yaxis="membrane potential (mV)",
)
if __name__ == "__main__":
# set the default size for generated plots
# https://matplotlib.org/stable/tutorials/introductory/customizing.html#a-sample-matplotlibrc-file
import matplotlib as mpl
mpl.rcParams["figure.figsize"] = [18, 12]
io = pynwb.NWBHDF5IO("./FergusonEtAl2015_PYR3.nwb", "r")
datafile = io.read()
analysis_results, currents, memb_pots = get_data_metrics(datafile)
# Choose what sweeps to tune against.
# There are 33 sweeps: 1..33.
# sweeps_to_tune_against = [1, 2, 15, 30, 31, 32, 33]
sweeps_to_tune_against = [16,21]
report = tune_izh_model(sweeps_to_tune_against, analysis_results, currents)
simulation_id = "fitted_izhikevich_sim"
run_fitted_cell_simulation(sweeps_to_tune_against, report, simulation_id)
plot_sim_data(sweeps_to_tune_against, simulation_id, memb_pots)
# close the data file
io.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
16281,
2393,
4478,
262,
24549,
286,
281,
314,
23548,
522,
49547,
43164,
1262,
12972,
8199,
1434,
5805,
13,
198,
198,
8979,
25,
2723,
14,
12982,
31628,
14,
45,
5805,
17,... | 2.263486 | 6,729 |
import robot
import robot2
import numpy as np
policy = Policy()
policy.collect_training_data()
| [
11748,
9379,
198,
11748,
9379,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
30586,
796,
7820,
3419,
198,
30586,
13,
33327,
62,
34409,
62,
7890,
3419,
198
] | 3.428571 | 28 |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
if __name__ == '__main__':
rospy.init_node('keyboard_cmd_vel')
keyboard_cmd_vel = KeyTwist()
keyboard_cmd_vel.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
44088,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
686,
2777,
... | 2.47561 | 82 |
from torch.distributions import Distribution
import torch
from typing import Type, Callable, Sequence, Union
HyperParameters = Union[torch.Tensor, float, int]
DistributionOrBuilder = Union[Type[Distribution], Callable[[Sequence[HyperParameters]], Distribution]]
| [
6738,
28034,
13,
17080,
2455,
507,
1330,
27484,
198,
11748,
28034,
198,
6738,
19720,
1330,
5994,
11,
4889,
540,
11,
45835,
11,
4479,
198,
198,
38197,
48944,
796,
4479,
58,
13165,
354,
13,
51,
22854,
11,
12178,
11,
493,
60,
198,
20344,... | 3.984848 | 66 |
"""
This creates an index.html file for the find-links directory on the openmdao.org site.
"""
import sys
import os.path
import hashlib
import fnmatch
def find_files(startdir, pats):
"""Return a list of files (using a generator) that match
the given list of glob patterns. Walks an entire directory structure.
"""
match = fnmatch.fnmatch
join = os.path.join
for path, dirlist, filelist in os.walk(startdir):
for name in filelist:
for pat in pats:
if match(name, pat):
yield join(path, name)
def file_md5(fpath):
"""Return the MD5 digest for the given file"""
try:
f = open(fpath,'rb')
m = hashlib.md5()
while True:
s = f.read(4096)
if not s:
break
m.update(s)
return m.hexdigest()
finally:
f.close()
if __name__ == '__main__':
make_egglist_index()
| [
37811,
198,
1212,
8075,
281,
6376,
13,
6494,
2393,
329,
262,
1064,
12,
28751,
8619,
319,
262,
1280,
9132,
5488,
13,
2398,
2524,
13,
198,
37811,
628,
198,
11748,
25064,
198,
11748,
28686,
13,
6978,
198,
11748,
12234,
8019,
198,
11748,
... | 2.201856 | 431 |
# -*- coding: utf-8 -*-
"""
Script Name: Process.py
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
import os
from PySide2.QtCore import QProcess
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 10/11/2019 - 12:06 AM
# © 2017 - 2018 DAMGteam. All rights reserved | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
7391,
6530,
25,
10854,
13,
9078,
198,
13838,
25,
2141,
33822,
71,
14,
40335,
532,
513,
35,
6802,
13,
198,
198,
11828,
25,
198,
198,
37811,
198,
2,
1... | 4.189655 | 116 |
import PyPDF2
import sys
#inputs from command
inputs = sys.argv[1:]
#merging PDF
#call the function
pdf_merger(inputs)
#ROTATE PDFs
# with open('dummy.pdf', 'rb') as file:
# reader = PyPDF2.PdfFileReader(file)
# page = reader.getPage(0)
# page.rotateClockwise(90)
# writer = PyPDF2.PdfFileWriter()
# writer.addPage(page)
# with open('tilt.pdf', 'wb') as new_file:
# writer.write(new_file)
| [
198,
11748,
9485,
20456,
17,
198,
11748,
25064,
198,
198,
2,
15414,
82,
422,
3141,
198,
15414,
82,
796,
25064,
13,
853,
85,
58,
16,
47715,
198,
198,
2,
647,
2667,
12960,
628,
198,
198,
2,
13345,
262,
2163,
198,
12315,
62,
647,
136... | 2.19 | 200 |
from time import sleep
from LEDStrip import LEDStrip
from Wheels import Wheels
from Servos import Servos
from Joystick import Joystick
if __name__ == "__main__":
print("> Testing Robot <")
print(" > Testing LEDs...")
led = LEDStrip(0.5)
led.led_test()
print(" > LED Testing Complete!")
sleep(0.5)
print(" > Testing Wheels...")
wheels = Wheels()
wheels.wheel_test()
print(" > Wheels Testing Complete!")
sleep(0.5)
print(" > Testing Servos...")
servos = Servos()
servos.servo_test()
print(" > Servos Testing Complete!")
sleep(0.5)
print(" > Testing Joystick...")
joystick = Joystick()
joystick.joystick_test()
print(" > Joystick Testing Complete!")
sleep(0.5)
print("> Testing Complete!") | [
6738,
640,
1330,
3993,
198,
198,
6738,
12365,
1273,
5528,
1330,
12365,
1273,
5528,
198,
6738,
37416,
1330,
37416,
198,
6738,
3116,
418,
1330,
3116,
418,
198,
6738,
14087,
13915,
1330,
14087,
13915,
628,
628,
198,
198,
361,
11593,
3672,
... | 2.473054 | 334 |
# Script to Take a JSON object, convert it into a Python structure, and convert the Python structure into Flora-2 code.
# Jason Morris
import sys, json, types
# Get the data from the command line
filename = sys.argv[1]
file = open(filename, "r")
# Convert from JSON to Python structure
dictionary = json.load(file)
#print(dictionary)
#print(dictionary['test'])
# Convert all lists to dictionaries, maybe?
# Convert the resulting Python dictionary to a list of Flora-2 Entries.
output = []
for k,v in dictionary.items():
output.append(json2flora(k,v,root=True))
# Output the Flora-2 Code
for o in output:
print(o + ".\n")
| [
2,
12327,
284,
7214,
257,
19449,
2134,
11,
10385,
340,
656,
257,
11361,
4645,
11,
290,
10385,
262,
11361,
4645,
656,
4432,
64,
12,
17,
2438,
13,
198,
2,
8982,
14433,
198,
198,
11748,
25064,
11,
33918,
11,
3858,
198,
198,
2,
3497,
... | 3.169154 | 201 |
import importlib.machinery
import importlib.util
import inspect
import sys
from pathlib import Path
from types import ModuleType
from typing import Callable, Iterator, List, Optional, Tuple, TypedDict
MemberDefinitions = Iterator[_MemberDef]
Members = List[Tuple[str, object]]
| [
11748,
1330,
8019,
13,
76,
620,
15451,
198,
11748,
1330,
8019,
13,
22602,
198,
11748,
10104,
198,
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
3858,
1330,
19937,
6030,
198,
6738,
19720,
1330,
4889,
540,
11,
40806,
1352,
... | 3.54321 | 81 |
# To run all test in a Python environment:
import sys; sys.path.insert(0, "C:\Work\python\mikecore"); import tests.test_run_all
| [
2,
1675,
1057,
477,
1332,
287,
257,
11361,
2858,
25,
198,
11748,
25064,
26,
25064,
13,
6978,
13,
28463,
7,
15,
11,
366,
34,
7479,
12468,
59,
29412,
59,
76,
522,
7295,
15341,
1330,
5254,
13,
9288,
62,
5143,
62,
439,
628,
198
] | 3.023256 | 43 |
import numpy as np
import tensorflow as tf
import sys
from emotion_inferring.model.GCALSTM import GCALSTMCell
from emotion_inferring.model.MALSTM import MALSTMCell
from emotion_inferring.model.GCM import GCAttention
from emotion_inferring.model.SelfAttention import *
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
25064,
198,
198,
6738,
9942,
62,
259,
2232,
1806,
13,
19849,
13,
15916,
1847,
2257,
44,
1330,
20145,
1847,
2257,
9655,
695,
198,
6738,
9942,
62,
259,
... | 3.032609 | 92 |
# -*- coding: utf-8 -*-
from collections import namedtuple
from typing import Dict, List, Union
LDCreatorResult = namedtuple("LDCreatorResult", ["success", "json_ld_object", "keys", "messages"])
DATALAD_SCHEMA_BASE = "https://schema.datalad.org"
# Automatically translated study-element properties
STUDY_TRANSLATION_TABLE = {
SMMProperties.NAME: SMMSchemaOrgProperties.NAME,
SMMProperties.PRINCIPAL_INVESTIGATOR: SMMSchemaOrgProperties.ACCOUNTABLE_PERSON,
SMMProperties.KEYWORD: SMMSchemaOrgProperties.KEYWORDS,
SMMProperties.PURPOSE: SMMSchemaOrgProperties.ABSTRACT,
SMMProperties.START_DATE: SMMSchemaOrgProperties.DATE_CREATED
}
# Automatically translated person-element properties
PERSON_TRANSLATION_TABLE = {
SMMProperties.GIVEN_NAME: SMMSchemaOrgProperties.GIVEN_NAME,
SMMProperties.LAST_NAME: SMMSchemaOrgProperties.FAMILY_NAME,
SMMProperties.TITLE: SMMSchemaOrgProperties.HONORIFIC_SUFFIX,
SMMProperties.AFFILIATION: SMMSchemaOrgProperties.AFFILIATION,
SMMProperties.ORCID_ID: SMMSchemaOrgProperties.SAME_AS
}
# Automatically translated publication-element properties (to https://schema.org/ScholarlyArticle)
PUBLICATION_TRANSLATION_TABLE = {
SMMProperties.TITLE: SMMSchemaOrgProperties.HEADLINE,
SMMProperties.YEAR: SMMSchemaOrgProperties.DATE_PUBLISHED,
SMMProperties.DOI: SMMSchemaOrgProperties.SAME_AS,
SMMProperties.PAGES: SMMSchemaOrgProperties.PAGINATION,
SMMProperties.CORRESPONDING_AUTHOR: SMMSchemaOrgProperties.ACCOUNTABLE_PERSON
}
# Automatically translated dataset-element properties (to https://schema.org/Dataset)
DATASET_TRANSLATION_TABLE = {
SMMProperties.NAME: SMMSchemaOrgProperties.NAME,
SMMProperties.LOCATION: SMMSchemaOrgProperties.URL,
SMMProperties.KEYWORD: SMMSchemaOrgProperties.KEYWORDS,
SMMProperties.DESCRIPTION: SMMSchemaOrgProperties.DESCRIPTION
}
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
11,
4479,
628,
198,
43,
9697,
630,
273,
23004,
796,
3706,
83,
29291,
7203,
43,
969... | 2.600829 | 724 |
import os
import pickle
import re
from datetime import datetime as dt
import cv2
import numpy as np
from functools import wraps
from time import time
@timing
| [
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
6... | 3.192308 | 52 |
# -*- coding: utf-8 -*-
import Queue
# 先序遍历
# 递归实现先序遍历
# 递归实现中序遍历
if __name__ == '__main__':
b = BinaryTree()
b.add(1)
b.add(2)
b.add(3)
b.add(4)
b.add(5)
b.add(6)
b.add(7)
# b.p_traversal()
b.recursion_p(b.root)
print '========================='
b.recursion_af(b.root)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4670,
518,
628,
628,
220,
220,
220,
1303,
10263,
227,
230,
41753,
237,
34402,
235,
43889,
228,
628,
220,
220,
220,
1303,
16268,
222,
240,
37605,
240,
22522,
... | 1.522523 | 222 |
import numpy as np
import tensorflow as tf
if __name__ == '__main__':
np.random.seed(0)
img = np.random.randn(1, 10, 10, 1)
tf_img = tf.convert_to_tensor(img)
print(img[0, :, :, 0])
rois = np.array([[0, 0, 6, 6],
[2, 2, 8, 8]], dtype=np.float32)
tf_rois = tf.convert_to_tensor(rois, dtype=tf.float32)
tf_rois_norm = _normalize(rois, 10, 10)
total_crops_tf, gt_rois = _grid_features(tf_img, tf_rois_norm)
sess = tf.Session()
# print(sess.run(tf_rois_norm))
# for gt_roi in gt_rois:
# print(sess.run(gt_roi))
for crop in total_crops_tf:
print(sess.run(tf.squeeze(crop, axis=[3])))
print('==='*10)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
45941,
13,
25120,
13,
28826,
7,
15,
8,
628,
220,
220,
220,
33705,
... | 1.909589 | 365 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-11-09 10:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import lib.fields
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
22,
319,
2864,
12,
1157,
12,
2931,
838,
25,
1415,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.926829 | 82 |
import random
import string
from shutit_module import ShutItModule
| [
11748,
4738,
198,
11748,
4731,
198,
198,
6738,
4423,
270,
62,
21412,
1330,
18736,
1026,
26796,
628,
198
] | 3.888889 | 18 |
"""Time taken when baking lasagna
"""
EXPECTED_BAKE_TIME = 40
PREPARATION_TIME = 2
def bake_time_remaining(elapsed_bake_time):
"""Calculate the bake time remaining.
:param elapsed_bake_time: int baking time already elapsed.
:return: int remaining bake time derived from 'EXPECTED_BAKE_TIME'.
Function that takes the actual minutes the lasagna has been in the oven as
an argument and returns how many minutes the lasagna still needs to bake
based on the `EXPECTED_BAKE_TIME`.
"""
return EXPECTED_BAKE_TIME - elapsed_bake_time
def preparation_time_in_minutes(number_of_layers):
"""Calculating the preparation time for lasagna's layers.
:param number_of_layers: int layers user want to bake
:return: int return total time it will take to prepare layers
"""
return number_of_layers * PREPARATION_TIME
def elapsed_time_in_minutes(number_of_layers, elapsed_bake_time):
"""Calculating baking elapsed time
This function takes two numbers representing the number of layers & the time already spent
baking and calculates the total elapsed minutes spent cooking the lasagna.
"""
return number_of_layers * PREPARATION_TIME + elapsed_bake_time
| [
37811,
7575,
2077,
618,
16871,
39990,
48669,
198,
37811,
198,
49864,
9782,
1961,
62,
4339,
7336,
62,
34694,
796,
2319,
198,
198,
46437,
27082,
6234,
62,
34694,
796,
362,
198,
198,
4299,
28450,
62,
2435,
62,
2787,
1397,
7,
417,
28361,
... | 3.206349 | 378 |
import socket as s
try:
server_soc = s.socket(family=s.AF_INET,type=s.SOCK_STREAM)
server_soc.bind(('localhost',9999))
server_soc.listen(1)
print("<---- Server side ---->")
while True:
(clien_soc,(c_addr)) = server_soc.accept()
print("Connection Established from {0} with {1} port".format(c_addr[0],c_addr[1]))
clien_soc.send(bytes("What's your name?",'utf-8'))
print("Welcome {} to this amazing world of python".format(clien_soc.recv(1024).decode('utf-8')))
''' receives are disallowed so it gives error at line 14 because at line
15 we receive something from client '''
#clien_soc.shutdown(s.SHUT_RD)
''' this disallowed both receive and send '''
#clien_soc.shutdown(s.SHUT_RDWR)
clien_soc.send(bytes("What's your age?", 'utf-8'))
print("You can access the material") if int(clien_soc.recv(1024).decode('utf-8')) > 13 else print("Your age less than 13")
clien_soc.close()
#server_soc.close()
except Exception as e:
print(e)
| [
11748,
17802,
355,
264,
198,
198,
28311,
25,
628,
220,
220,
220,
4382,
62,
35634,
796,
264,
13,
44971,
7,
17989,
28,
82,
13,
8579,
62,
1268,
2767,
11,
4906,
28,
82,
13,
50,
11290,
62,
2257,
32235,
8,
198,
220,
220,
220,
4382,
62... | 2.368539 | 445 |
"""
79. Word Search
------------------
https://leetcode.com/problems/word-search/
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
Example:
board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
Given word = "ABCCED", return true.
Given word = "SEE", return true.
Given word = "ABCB", return false.
"""
s = Solution()
board = [["a"]]
assert s.exist(board, "a") is True
board = [["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]]
assert s.exist(board, "ABCCED") is True
assert s.exist(board, "SEE") is True
assert s.exist(board, "ABCB") is False
board = [["C", "A", "A"], ["A", "A", "A"], ["B", "C", "D"]]
assert s.exist(board, "AAB") is True
| [
37811,
198,
3720,
13,
9678,
11140,
198,
1783,
438,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
4775,
12,
12947,
14,
198,
198,
15056,
257,
362,
35,
3096,
290,
257,
1573,
11,
1064,
611,
262,
1573,
7160,
287,
262,
... | 2.741641 | 329 |
"""
The purpose of this mini project for ES 8.01/8.012 is to show the orbit of
two objects in a bynary system using Newton's Gravitational Law, Conservation
of Momentum and Newton's 2nd Law. The visualization will be produced using the
different features of vpython.
Before running the code, in the command prompt for python/anaconda type:
pip install vpython
so that the code works and can use the vpython package.
"""
from vpython import *
import math
def body(position: "vector",rad: "metres",mass: "kg", texture= False, angle= 0.01,axis= vector(0,1,0)) -> "body":
"""
This function returns a body which could be a planet, star or asteroid.
position = A vector which represents the position of the body in our
coordinate system in metres, this normally can be arbitrarily
chosen as long as magintude of radius coincide.
radius = Radius of body in metres. Due to the limitations of showing
the actual sizes between, e.g. the sun and the earth, and fitting
them in a screen, after experimentation, it has been found that it is
better to choose a radius of 1 smaller order of magnitude than the
order of magnitudes in the position rather than the actual radius of
the two bodies.
mass = Mass of boody in kilograms.
texture = This is by default False as there are many bodies for which
we can't obtain a texture but if there is one, then this is assigned
as one of the attributes of the body.
angle = In radians, by default it is 0.01 and is the angle that the
the body is tilted respect to its axis.
axis = A vector which by default is (0,1,0) and represents the axis
of rotation of the body.
Using the class sphere(), this function adds new attributes to this
this class such as mass,angle and axis and sets some default conditions
too such as make_trail, interval, etc.
"""
body = sphere(pos= position, radius= rad, make_trail = True,trail_type='points', interval=10, retain=10)
#The folowing lines will add new atributes to the sphere object which is
#body.
body.mass = mass
body.angle = angle
body.axis = axis
if texture != False:
body.texture = texture
return body
def binary_system(body1,body2):
""""
This function takes as inputs two bodies and shows their motion
in space using Newton's Gravitational Laws and Momentum's Conservation.
body1 and body2 are two bodies which were created using the previous
body() function.
Some other constants are also defined at the beginning such as G,
the gravitational constant.
Moreover a new attribute will be added to the bodies which is
their momentum which is constantly changing in the while loop.
The position and the respective distances between the two bodies are
also constantly changing due to the infiinite while loop.
The function will end up producing an scenario where both bodies are
in a binary system orbiting around the center of mass of the system.
"""
scene.forward = vector(0,-.3,-1)
#This will be the intial viewpoint from which we will see the scene.
G = 6.7e-11
initial_velocity = vector(0,0,1e4)
#We stil need to resolve some of the physics behind this velocity, but
#initally for a system of masses 2e30 and 1e30, this was the intial
#velocity of the bigger mass and we know the code worked with this conditions,
#so later in the code, using some ratios respect to the original
#2e30 and 1e30 masses compared to the input ones and by multiplying them
#to this velocity, we can create a new initial velocity which would
#work for any masses we choose for the bodies.
dt = 1e5
#dt represents the difference in time that occurs for each impulse to be
#added to the momentums of each body.
#The following if/else statement sets big as the body with the bigger
#mass and small as the body with the smaller mass.
if body1.mass > body2.mass:
big = body1
small = body2
else:
big = body2
small = body1
#In here we will see how we use the following ratios to obtain the new
#value of the initial velocity for the big body. This was obtained after
#experimentation from an intial_velocity that we knew that worked for 2
#bodies. We will try to find a better way to do it with physics.
ratio_small_mass = small.mass/1e30
ration_big_masss = big.mass/2e30
ratio_masses = ratio_small_mass/ration_big_masss
initial_velocity = initial_velocity*ratio_masses
big.momentum = big.mass*initial_velocity
#Due that we assume we are in a closed system where momentum is conserved
#and that it was zero originally in the z direction, we establish
#the momentum of the small body as the opposite of the big body's momentum.
small.momentum = -big.momentum
#Use of while loop to run the scenario.
while True:
#Rate determines how fast we want to see the system moving,
#after some trials 50 seems adequate for this project.
rate(50)
r = small.pos - big.pos
#Use of the Gravitational Force formula to calculate F in the r
#direction, hence use of r.hat in the equation.
F = G * big.mass * small.mass * r.hat / mag(r)**2
#Using Newton's 2nd Law, the momentum of the big body will increase due
#to the impulse given by F*dt.
big.momentum += F*dt
#By consevatium of momentum, if one of them increases by F*dt,
#the other one has to decrease by the same amount.
small.momentum -= F*dt
#the respective position of each also changes as they get new momentum
#therefore we can change their position using dx = v*dt, in this case
#where the new v is given by dividing the new momentum by the mass
#of the body and multiplying it by how long this occurs which is dt.
big.pos += (big.momentum/big.mass)*dt
small.pos += (small.momentum/small.mass)*dt
#Finally using the rotate() function with original attributes of angle
#and axis from each body, we can make the bodies rotate.
big.rotate(angle = big.angle, axis = big.axis)
small.rotate(angle = small.angle, axis = small.axis)
if __name__ == "__main__":
pass
#Example of earth-sun system right below
#body1 = body(vector(0,0,0),2e10,2e30,"https://i.imgur.com/ejXbe1E.jpg")
#body2 = body(vector(1.5e11,0,0),1e10,6e24,"https://i.imgur.com/dl1sA.jpg",angle = 0.4, axis= vector(1,1,math.cos(23.5)))
#binary_system(body1,body2)
| [
37811,
201,
198,
464,
4007,
286,
428,
9927,
1628,
329,
13380,
807,
13,
486,
14,
23,
13,
30206,
318,
284,
905,
262,
13066,
286,
220,
201,
198,
11545,
5563,
287,
257,
416,
77,
560,
1080,
1262,
17321,
338,
32599,
22181,
3854,
11,
23702... | 2.715224 | 2,588 |
# Generated by Django 3.2 on 2021-04-12 10:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
import uuid
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
1065,
838,
25,
940,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9... | 3.032787 | 61 |
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as interp
import scipy.constants as constants
import scipy.signal as signal
import sys, time, os
import bead_util as bu
fac = 425 * constants.elementary_charge
base_path = '/processed_data/comsol_data/patch_potentials/'
fnames = bu.find_all_fnames(base_path, ext='')
names = []
for filname in fnames:
parts = filname.split('/')
name = parts[-1].split('.')[0]
if name not in names:
names.append(name)
names = [#'patch_pot_2um_1Vrms_50um-deep-patches', \
#'patch_pot_2um_1Vrms_150um-deep-patches', \
#'patch_pot_2um_1Vrms_250um-deep-patches', \
#'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC', \
#
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed0', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed10', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed20', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed30', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed40', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed50', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed60', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed70', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed80', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed90', \
'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed100', \
]
base_name = 'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC_seed'
for seed in np.linspace(0,2000,21):
names.append(base_name + str(int(seed)))
#names = []
#base_name = 'patch_pot_2um_1Vrms_150um-deep-patches_4mmBC'
#names.append(base_name + '_FINGER')
#names.append(base_name + '_PATCH')
#names.append(base_name + '_PATCH-FINGER')
#print names
#style_dict = {0: '--', 1: ':', 2: '-', 3: '-.'}
#label_dict = {0: '$\pm$500mV on Fingers', 1: '1Vrms Patches', 2: 'Sum'}
#color_dict = {0: 'C0', 1: 'C1', 2: 'C2',}
style_dict = {}
color_dict = {}
for nameind in range(len(names)):
style_dict[nameind] = '-'
color_dict[nameind] = 'C0'
all_rms = [[], [], []]
mean_rms = []
finger_rms = []
N = 0
for nameind, name in enumerate(names):
print('Processing: ', name)
xx = np.load(open(base_path + name + '.xx', 'rb'))
yy = np.load(open(base_path + name + '.yy', 'rb'))
zz = np.load(open(base_path + name + '.zz', 'rb'))
field = np.load(open(base_path + name + '.field', 'rb'))
potential = np.load(open(base_path + name + '.potential', 'rb'))
pot_func = interp.RegularGridInterpolator((xx, yy, zz), potential)
field_func = []
for resp in 0,1,2:
field_func.append( interp.RegularGridInterpolator((xx, yy, zz), field[resp]) )
posvec = np.linspace(-50e-6, 50e-6, 101)
ones = np.ones_like(posvec)
xval = 12.0e-6
zval = 0.0e-6
eval_pts = np.stack((xval*ones, posvec, zval*ones), axis=-1)
ann_str = 'Sep: %0.2f um, Height: %0.2f um' % (xval*1e6, zval*1e6)
plt.figure(0)
plt.plot(posvec*1e6, pot_func(eval_pts), color='C0', ls=style_dict[nameind])
plt.figure(1, figsize=(7,5))
plt.title(name)
if nameind == 0:
plt.plot(posvec*1e6, field_func[0](eval_pts)*fac, label='fx', color='C0', \
ls=style_dict[nameind])
plt.plot(posvec*1e6, field_func[1](eval_pts)*fac, label='fy', color='C1', \
ls=style_dict[nameind])
plt.plot(posvec*1e6, field_func[2](eval_pts)*fac, label='fz', color='C2', \
ls=style_dict[nameind])
else:
plt.plot(posvec*1e6, field_func[0](eval_pts)*fac, color='C0', \
ls=style_dict[nameind])
plt.plot(posvec*1e6, field_func[1](eval_pts)*fac, color='C1', \
ls=style_dict[nameind])
plt.plot(posvec*1e6, field_func[2](eval_pts)*fac, color='C2', \
ls=style_dict[nameind])
plt.legend()
plt.xlabel('Displacement Along Cantilever Face [um]')
plt.ylabel('Force on 425e$^-$ [N]')
plt.annotate(ann_str, xy=(0.2, 0.9), xycoords='axes fraction')
plt.tight_layout()
if name == names[-1]:
plt.grid()
xx_plot = xx[xx > 0.9e-6]
rms_force = [[], [], []]
#rms_force = []
for sepind, sep in enumerate(xx_plot):
rms_val = 0.0
eval_pts = np.stack((sep*ones, posvec, zval*ones), axis=-1)
for resp in [0,1,2]:
forcevec = field_func[resp](eval_pts) * fac
rms_val += np.std(forcevec)
rms_force[resp].append(rms_val)
#rms_val *= 1.0 / np.sqrt(3)
#rms_val *= 1.0 / 3
#rms_force.append(np.sqrt(rms_val))
for resp in [0,1,2]:
all_rms[resp].append(rms_force[resp])
if 'FINGER' in name:
if 'PATCH' not in name:
finger_rms = np.copy(rms_force)
if not len(mean_rms):
mean_rms = rms_force
else:
mean_rms += rms_force
N += 1
#for resp in [0,1,2]:
# if resp == 0:
# plt.loglog(xx_plot*1e6, rms_force[resp]*fudge_dict[nameind], label=name, \
# ls=style_dict[resp], color=color_dict[nameind])
# else:
# plt.loglog(xx_plot*1e6, rms_force[resp]*fudge_dict[nameind], \
# ls=style_dict[resp], color=color_dict[nameind])
plt.figure(2)
#plt.title('RMS Force vs. X: Different Patch Realizations')
for resp in [0]:#,1,2]:
plt.loglog(xx_plot*1e6, rms_force[resp], color=color_dict[nameind], \
ls=style_dict[nameind])#, \
#label=label_dict[nameind])
plt.xlabel('X [um]')
plt.ylabel('RMS force on 425e$^-$ [N]')
plt.legend(loc=0)
#if '250um' in name:
'''
xz_plane = np.meshgrid(xx, zz, indexing='ij')
levels = np.linspace(np.min(potential), np.max(potential), 100) * 0.1
levels = 100
for i in range(10):
fig = plt.figure()
ax = fig.add_subplot(111)
cont = ax.contourf(xz_plane[0]*1e6, xz_plane[1]*1e6, potential[:,-(i+1),:], levels)
cbar = plt.colorbar(cont)
ax.set_xlabel('Displaccment Along Cantilever [um]')
ax.set_ylabel('Height [um]')
plt.tight_layout()
plt.show()
'''
mean_rms = [[], [], []]
std_rms = [[], [], []]
for resp in [0,1,2]:
mean_rms[resp] = np.array(all_rms[resp]).mean(axis=0)
std_rms[resp] = np.array(all_rms[resp]).std(axis=0)
max_xrms = np.array(all_rms[0]).max(axis=0)
min_xrms = np.array(all_rms[0]).min(axis=0)
std_xrms = np.array(all_rms[0]).std(axis=0)
mean_xrms = np.array(all_rms[0]).mean(axis=0)
plt.figure(4)
plt.loglog(xx_plot*1e6, mean_xrms, color='C0', ls='-', label='X')
plt.fill_between(xx_plot*1e6, mean_xrms+std_xrms, mean_xrms-std_xrms, \
color='C0', alpha=0.2, edgecolor='C0')
plt.loglog(xx_plot*1e6, mean_xrms+std_xrms, color='C0', ls=':', label='X+', \
alpha=0.4)
plt.loglog(xx_plot*1e6, mean_xrms-std_xrms, color='C0', ls=':', label='X-', \
alpha=0.4)
#plt.loglog(xx_plot*1e6, min_xrms, color='C1', ls='--', label='Xmin')
#plt.loglog(xx_plot*1e6, max_xrms, color='C2', ls='--', label='Xmax')
#plt.loglog(xx_plot*1e6, mean_rms[1] / N, color='C1', ls='-', label='Y')
#plt.loglog(xx_plot*1e6, mean_rms[2] / N, color='C2', ls='-', label='Z')
plt.grid()
plt.legend()
outarr = [xx_plot, mean_rms[0], mean_rms[1], mean_rms[2], \
std_rms[0], std_rms[1], std_rms[2]]
outarr = np.array(outarr)
np.save( open(base_path + '2um-1Vrms-patches_rms-force_vs_separation.npy', 'wb'), outarr)
#outarr2 = [xx_plot, finger_rms[0], finger_rms[1], finger_rms[2]]
#outarr2 = np.array(outarr2)
#np.save( open(base_path + 'bipolar-500mV-fingers_rms-force_vs_separation.npy', 'wb'), outarr2)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
629,
541,
88,
13,
3849,
16104,
378,
355,
987,
79,
198,
11748,
629,
541,
88,
13,
9979,
1187,
355,
38491,
198,
11748,
629,
5... | 1.914406 | 4,054 |
import numpy as np
import copy
from datetime import datetime, timedelta
import pandas as pd
from sklearn.neural_network import MLPClassifier
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn import preprocessing
from sklearn.preprocessing import Imputer
import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
import classifiers
import db
import helper_functions
import optimization_trilateration
import itertools
pd.options.mode.chained_assignment = None # default='warn'
class ListOfRecords(list):
"""
list_of_records - list of records for one beacon for one gateway
"""
if __name__ == "__main__":
pass
# pd.options.mode.chained_assignment = None # default='warn'
# beacon_id = "0117C59B07A4"
# gateway_list = ['C9827BC63EE9', 'EF4DCFA41F7E', 'EDC36C497B43', 'EE5A181D4A27', 'D78A75B468C2', 'FF9AE92EE4C9','D13DF2E3B7E4','D9DD5DA69F7B','CD2DA08685AD']
# start_date = datetime(2016, 8, 13, 4, 27, 0, 0)
# end_date = datetime(2017, 8, 13, 4, 49, 0, 0)
# classifier_name = "test"
# classifier, scalar = classifiers.create_classifier(beacon_id, gateway_list, start_date, end_date, classifier_name)
# db.save_classifier(classifier, classifier_name, gateway_list, scalar)
# start_date = datetime(2017, 8, 13, 4, 49, 0, 0)
# end_date = datetime(2017, 8, 15, 04, 53, 0, 0)
# predictions = classifiers.use_classifier(beacon_id, start_date, end_date, classifier_name)
#above is what i'm using
# matched_timestamps = MatchedTimestamps()
# # specify what beacon, gateway and timerange you're interested in
# # filter length=None means no filter
# # if you put filter=10 for example you will use moving average over 10 seconds
# matched_timestamps.init_from_database('0117C59B07A4',
# ['C9827BC63EE9', 'EF4DCFA41F7E', 'EDC36C497B43', 'EE5A181D4A27', 'D78A75B468C2', 'FF9AE92EE4C9','D13DF2E3B7E4','D9DD5DA69F7B','CD2DA08685AD'],
# datetime(2016, 6, 29, 22, 00, 18, 0), datetime(2017, 8, 13, 4, 49, 0, 0),
# filter_length=3, slope_filter = True)
# matched_timestamps.two_d_plot('Training Data')
# matched_timestamps.replace_nan()
# matched_timestamps = matched_timestamps.remove_nan()
# matched_timestamps.standardize_training()
# scaler = matched_timestamps.standardize_scalar
# matched_timestamps.two_d_plot('Standardized Training Data')
# # split the entire datasat into training and testing
# training, testing = matched_timestamps.train_test_split(training_size=0.5, seed=None)
# labels = matched_timestamps.get_labels()
# # create a classfier using the trainging dataset
# svm = training.train_SVM()
# # check accuracy of the training dataset with training classifier
# accuracy = training.accuracy_of_classifier()
# print "accuracy of the training data is: " + str(accuracy)
# # assigin the classifier to the testing dataset
# testing.classifier = svm
# # check accuracy of the testing dataset with training classifier
# accuracy = testing.accuracy_of_classifier()
# print "accuracy of the testing data is: " + str(accuracy)
# #specify what beacon, gateway and timerange you're interested in
# #filter length=None means no filter
# #if you put filter=10 for example you will use moving average over 10 seconds
# # al_walk = MatchedTimestamps()
# # al_walk.init_from_database('0117C59B07A4',
# # ['EDC36C497B43', 'D78A75B468C2', 'EE5A181D4A27', 'C9827BC63EE9', 'D13DF2E3B7E4', 'EF4DCFA41F7E', 'FF9AE92EE4C9', 'D9DD5DA69F7B', 'CD2DA08685AD'],
# # datetime(2017, 8, 13, 04, 50, 0, 0), datetime(2017, 8, 13, 04, 53, 0, 0),
# # filter_length=3)
# al_walk = MatchedTimestamps()
# al_walk.init_from_database('0117C59B07A4',
# ['C9827BC63EE9', 'EF4DCFA41F7E', 'EDC36C497B43', 'EE5A181D4A27', 'D78A75B468C2', 'FF9AE92EE4C9','D13DF2E3B7E4','D9DD5DA69F7B','CD2DA08685AD'],
# datetime(2017, 8, 13, 4, 49, 0, 0), datetime(2017, 8, 15, 04, 53, 0, 0),
# filter_length=3)
# al_walk.two_d_plot('testing')
# al_walk.replace_nan()
# al_walk = al_walk.remove_nan()
# al_walk.standardize_testing(scaler)
# al_walk.two_d_plot('scaled_testing')
# al_walk.classifier = svm
# prediction = al_walk.predict()
# probabilites = al_walk.predict_proba()
# print probabilites
# #datetime(2017, 7, 11, 21, 12, 0, 0), datetime(2017, 7, 11, 21, 15, 28, 0),
# print prediction
#prediction = helper_functions.path_rules(prediction, probabilites,labels)
# probs = pd.DataFrame(probabilites)
# print(probs.to_csv(index=False, header=False))
# preds = pd.DataFrame(prediction) | [
11748,
299,
32152,
355,
45941,
198,
11748,
4866,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
1341,
35720,
13,
710,
1523,
62,
27349,
1330,
10373,
47,
9487,
7483,
198,
... | 2.580968 | 1,797 |
# coding=utf-8
try:
from src.testcase.GN_Y201H.case.GN_Y201H_SMART_LINK.GN_Y201H_SMART_LINK_001 import *
except ImportError as e:
print(e)
| [
2,
19617,
28,
40477,
12,
23,
198,
28311,
25,
198,
220,
220,
220,
422,
12351,
13,
9288,
7442,
13,
16630,
62,
56,
1264,
39,
13,
7442,
13,
16630,
62,
56,
1264,
39,
62,
12310,
7227,
62,
43,
17248,
13,
16630,
62,
56,
1264,
39,
62,
... | 2.1 | 70 |
#!/usr/bin/env python
import argparse
import complib
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
198,
11748,
2299,
571,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.526316 | 38 |
from concurrent import futures
import logging
import csv
import grpc
import meter_usage_pb2
import meter_usage_pb2_grpc
if __name__ == '__main__':
logging.basicConfig()
serve()
| [
6738,
24580,
1330,
25650,
198,
11748,
18931,
198,
11748,
269,
21370,
198,
198,
11748,
1036,
14751,
198,
11748,
16430,
62,
26060,
62,
40842,
17,
198,
11748,
16430,
62,
26060,
62,
40842,
17,
62,
2164,
14751,
628,
628,
198,
361,
11593,
367... | 2.96875 | 64 |
from aoc2015.day17 import packings, minimal_packings
| [
6738,
257,
420,
4626,
13,
820,
1558,
1330,
2353,
654,
11,
10926,
62,
8002,
654,
628,
198
] | 3.235294 | 17 |
"""Indicator model."""
class Indicator(object):
"""Indicator for FDC report.
Indicator for FDC reports have values for Q1, Q2, Q3, Q4 or/and a value.
it allows to follow the evolution of an indicator during the quarters and have its final value
or to follow an indicator with only a single value.
"""
def __init__(self, name, q1=None, q2=None, q3=None, q4=None, value=None):
"""Constructor returns an indicator.
it can be initilized with values for q1, q2, q3, q4 and another value
(cumulative)
Args:
name (str): Unique name (can be used to refer to the indicator)
q1 (float): Value at first quarter (optional)
q2 (float): Value at second quarter (optional)
q3 (float): Value at third quarter (optional)
q4 (float): Value at fourth quarter (optional)
value (float): A value of the indicator that has no relation to
a quarter: cumulative value, fixed value, etc. (optional)
"""
self.name = name
self.values = dict()
self.values["q1"] = q1
self.values["q2"] = q2
self.values["q3"] = q3
self.values["q4"] = q4
self.values["value"] = value
| [
37811,
5497,
26407,
2746,
526,
15931,
628,
198,
4871,
1423,
26407,
7,
15252,
2599,
628,
220,
220,
220,
37227,
5497,
26407,
329,
376,
9697,
989,
13,
628,
220,
220,
220,
1423,
26407,
329,
376,
9697,
3136,
423,
3815,
329,
1195,
16,
11,
... | 2.386194 | 536 |
"""
Functions shared by easypypi.py and licenses.py
Check: Separate file required to avoid circular import?
"""
import os
def create_file(filepath, content, **kwargs):
"""
Create a new file using writelines, and backup if filepath==setup.py.
Returns "file exists" if filepath already exists and overwrite = False
"""
if isinstance(content, str):
content = content.splitlines(True) # keep line breaks
if filepath.is_file():
if kwargs.get("overwrite"):
if filepath.name == "setup.py":
backup = filepath.with_name(f"{filepath.stem} - old.py")
filepath.replace(backup)
print(f"\n✓ Renamed {filepath.name} to:\n {backup.name}")
filepath.touch() # Create empty file to append lines to
else:
print(f"\nⓘ Existing file preserved:\n {filepath}")
return "file exists"
with filepath.open("a") as file:
file.writelines(content)
print(f"\n✓ Created new file:\n {filepath}")
def update_line(script_lines, old_line_starts, new_value):
""" Updates and returns script_lines, ready for writing to setup.py """
for index, line in enumerate(script_lines.copy()):
if line.lstrip().startswith(old_line_starts):
try:
if isinstance(new_value, list):
# Add quotation marks unless list
new_value = ", ".join(new_value)
else:
new_value = f'"{new_value}"'
old_line = script_lines[index]
script_lines[index] = old_line_starts + new_value.rstrip() + "\n"
if old_line != script_lines[index]:
print(
f"\n✓ Updated script line {index + 1}:\n{script_lines[index].rstrip()[:400]}"
)
break # only update first occurrence
except (IndexError, TypeError):
print(new_value, type(new_value))
return script_lines
| [
37811,
198,
24629,
2733,
4888,
416,
2562,
79,
4464,
72,
13,
9078,
290,
16625,
13,
9078,
198,
9787,
25,
8621,
30748,
2393,
2672,
284,
3368,
18620,
1330,
30,
198,
37811,
198,
198,
11748,
28686,
628,
198,
4299,
2251,
62,
7753,
7,
7753,
... | 2.155319 | 940 |
'''ToDo List GUI'''
import tkinter
from tkinter import END,ANCHOR
count=0
'''Defining root window'''
root=tkinter.Tk()
root.title('ToDo-GUI')
root.iconbitmap('todo.ico')
root.geometry('400x400')
root.resizable(0,0)
'''Define fonts and colors'''
my_font=('Times New Roman',12)
root_color='green'
button_color='#e2cff4'
root.config(bg=root_color)
'''Define functions'''
'''Defining frames'''
input_frame= tkinter.Frame(root,bg=root_color)
output_frame= tkinter.Frame(root,bg=root_color)
button_frame= tkinter.Frame(root,bg=root_color)
input_frame.pack()
output_frame.pack()
button_frame.pack()
'''Input frame layout'''
list_entry= tkinter.Entry(input_frame,width=35,borderwidth=3,font=my_font)
list_add_button= tkinter.Button(input_frame,text="Add",borderwidth=2,font=my_font,bg=button_color,command=add_item)
list_entry.grid(row=0,column=0,padx=5,pady=5)
list_add_button.grid(row=0,column=1,padx=5,pady=5,ipadx=5)
'''Output frame layout'''
my_scrollbar= tkinter.Scrollbar(output_frame)
my_listbox=tkinter.Listbox(output_frame,height=15,width=45,borderwidth=3,font=my_font,yscrollcommand=my_scrollbar.set)
'''Link scrollbar to listbox'''
my_scrollbar.config(command=my_listbox.yview)
my_listbox.grid(row=0,column=0)
my_scrollbar.grid(row=0,column=1,sticky="NS")
'''Button Frame layout'''
list_remove_button= tkinter.Button(button_frame,text="Remove Item",borderwidth=2,font=my_font,bg=button_color,command=remove_item)
list_clear_button= tkinter.Button(button_frame,text='Clear All',borderwidth=2,font=my_font,bg=button_color,command=clear_list)
save_button= tkinter.Button(button_frame,text='Save List',borderwidth=2,font=my_font,bg=button_color,command=save_list)
quit_button= tkinter.Button(button_frame,text='Quit',borderwidth=2,font=my_font,bg=button_color,command=root.destroy)
list_remove_button.grid(row=0,column=0,padx=2,pady=10)
list_clear_button.grid(row=0,column=1,padx=2,pady=10,ipadx=10)
save_button.grid(row=0,column=2,padx=2,pady=10,ipadx=10)
quit_button.grid(row=0,column=3,padx=2,pady=10,ipadx=25)
'''Open the previous list if available'''
open_list()
'''Run the root window's main loop'''
root.mainloop()\ | [
7061,
6,
2514,
5211,
7343,
25757,
7061,
6,
198,
11748,
256,
74,
3849,
198,
6738,
256,
74,
3849,
1330,
23578,
11,
1565,
3398,
1581,
198,
9127,
28,
15,
628,
198,
7061,
6,
7469,
3191,
6808,
4324,
7061,
6,
198,
15763,
28,
30488,
3849,
... | 2.483796 | 864 |
from sanic import Sanic
from sanic.response import json, text
from throttle import AsyncThrottle
from throttle.storage import RedisStorage
app = Sanic()
@app.route("/")
@AsyncThrottle("5/m", "request.remote_addr",
callback=lambda *args, **kwargs:
text('tirgger the throttle', status=503),
storage=RedisStorage('localhost',
port=6379, password=''))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| [
6738,
5336,
291,
1330,
2986,
291,
198,
6738,
5336,
291,
13,
26209,
1330,
33918,
11,
2420,
198,
6738,
29976,
1330,
1081,
13361,
817,
305,
23296,
198,
6738,
29976,
13,
35350,
1330,
2297,
271,
31425,
628,
198,
1324,
796,
2986,
291,
3419,
... | 2.22467 | 227 |
import os, pickle, time, sys, traceback
import DataSetPrepare as DSP
datalist=[]
#dfile='./Datasets/D10_CKplus_10groups_groupedbythe_CKplus-group-details_preprocessdata_with_calibRotation_rescaleimg_geometricfeatures_facepatches_weberface_skip-contempV2.pkl'
#datalist.append(dfile)
#dfile='./Datasets/D33_KDEF_10G_rescaleimg_geometryfeature_patches_web.pkl'
#datalist.append(dfile)
#dfile='./Datasets/D40_jaffe_10groups_groupedbysubjects_rescaleimg_geometricfeatures_facepatches_weber.pkl'
#datalist.append(dfile)
#dfile='./Datasets/D16_CKPLUS_10G_Enlargeby2015CCV_10T.pkl'
#datalist.append(dfile)
#dfile='./Datasets/D34_KDEF_10G_Enlargeby2015CCV_10T.pkl'
#datalist.append(dfile)
#dfile='./Datasets/D43_JAFFE_10G_Enlargeby2015CCV_10T.pkl'
#datalist.append(dfile)
#pklname='I:/Data/detected_records/detected/data_with_geometry_and_facepatches.pkl'
#datalist.append(pklname)
#pklname='I:/Data/detected_records/undetected/undetected_data_with_geometry_and_facepatches.pkl'
#datalist.append(pklname)
pklname='I:/Data/OuluCasIA/D551_OuluCASIA_Weak_10G_V5_newGeo_newPatch.pkl'
datalist.append(pklname)
pklname='I:/Data/OuluCasIA/D553_OuluCASIA_Dark_10G_V5_newGeo_newPatch.pkl'
datalist.append(pklname)
pklname='I:/Data/OuluCasIA/D552_OuluCASIA_Strong_10G_V5_newGeo_newPatch.pkl'
filepath='I:/Data/OuluCasIA'
posfix='OuluCASIA_Weak_Dark_Strong_10G_V5_newGeo_newPatch'
datalist.append(pklname)
t1=time.time()
DSP.loadandMergeData_v2(datalist, 554, Df=False, Path = filepath, posfix=posfix)
t2=time.time()
print('Time consumed: %fs'%(t2-t1)) | [
11748,
28686,
11,
2298,
293,
11,
640,
11,
25064,
11,
12854,
1891,
201,
198,
11748,
6060,
7248,
37534,
533,
355,
360,
4303,
201,
198,
201,
198,
67,
10254,
396,
28,
21737,
201,
198,
2,
7568,
576,
28,
4458,
14,
27354,
292,
1039,
14,
... | 2.097333 | 750 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The tests."""
from unittest import TestCase
from fzbz import FizzBuzz
class FzBzTest(TestCase):
"""Fizz Buzz Test."""
def setUp(self):
"""Setup."""
self.obj = FizzBuzz()
def test_num(self):
"""The return value should be based of FizzBuzz."""
for n in range(100):
with self.subTest(n=n):
ret = self.obj()
exp = "FizzBuzz" if n % 3 == 0 and n % 5 == 0 else \
"Fizz" if n % 3 == 0 and n % 5 != 0 else \
"Buzz" if n % 3 != 0 and n % 5 == 0 else \
str(n)
self.assertEqual(ret, exp)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
464,
5254,
526,
15931,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
277,
14969,
89,
1330,
... | 1.91989 | 362 |
import inspect
from inspect import signature, getfullargspec
from functools import wraps
from typing import Union
from celery.canvas import chain, Signature
from celery.local import PromiseProxy
from celery.result import AsyncResult
from celery.utils.log import get_task_logger
from firexkit.result import wait_on_async_results_and_maybe_raise
from firexkit.bag_of_goodies import BagOfGoodies
from firexkit.task import parse_signature, FireXTask, undecorate, ReturnsCodingException, get_attr_unwrapped
logger = get_task_logger(__name__)
def returns(*args):
""" The decorator is used to allow us to specify the keys of the
dict that the task returns.
This is used only to signal to the user the inputs and outputs
of a task, and deduce what arguments are required for a chain.
"""
if not args or len(args) != len(set(args)):
raise ReturnsCodingException("@returns cannot contain duplicate keys")
return decorator
Signature.injectArgs = _inject_args_into_signature
def verify_chain_arguments(sig: Signature):
"""
Verifies that the chain is not missing any parameters. Asserts if any parameters are missing, or if a
reference parameter (@something) has not provider
"""
try:
tasks = sig.tasks
except AttributeError:
tasks = [sig]
missing = {}
previous = set()
ref_args = {}
undefined_indirect = {}
for task in tasks:
task_obj = sig.app.tasks[task.task]
partial_bound = set(signature(task_obj.run).bind_partial(*task.args).arguments.keys())
kwargs_keys = set(task.kwargs.keys())
if isinstance(task_obj, FireXTask):
required_args = task_obj.required_args
else:
required_args, _ = parse_signature(inspect.signature(task_obj.run))
missing_params = set(required_args) - (partial_bound | kwargs_keys | previous)
if missing_params:
missing[task_obj.name] = missing_params
previous |= (partial_bound | kwargs_keys)
# get @returns for next loop
try:
current_task_returns = set(task_obj.return_keys)
previous |= current_task_returns
except AttributeError:
try:
current_task_returns = set(get_attr_unwrapped(task_obj, '_decorated_return_keys'))
previous |= current_task_returns
except AttributeError:
current_task_returns = set()
# If any of the previous keys has a dynamic return, then we can't do any validation
if any(FireXTask.is_dynamic_return(k) for k in current_task_returns):
break
# check for validity of reference values (@ arguments) that are consumed by this microservice
necessary_args = getfullargspec(undecorate(task_obj)).args
new_ref = {k: v[1:] for k, v in task.kwargs.items() if
hasattr(v, 'startswith') and v.startswith(BagOfGoodies.INDIRECT_ARG_CHAR)}
ref_args.update(new_ref)
for needed in necessary_args:
if needed in ref_args and ref_args[needed] not in previous:
undefined_indirect[needed] = ref_args[needed]
if missing:
txt = ''
for k, v in missing.items():
for arg in v:
if txt:
txt += '\n'
service_path = k.split('.')
txt += ' ' + arg + '\t: required by "%s" (%s)' % \
(service_path[-1], '.'.join(service_path[0:-1]))
raise InvalidChainArgsException('Missing mandatory arguments: \n%s' % txt, missing)
if undefined_indirect:
txt = "\n".join([k + ": " + v for k, v in undefined_indirect.items()])
raise InvalidChainArgsException('Chain indirectly references the following unavailable parameters: \n%s' %
txt, undefined_indirect)
return True
def set_execution_options(sig: Signature, **options):
"""Set arbitrary executions options in every task in the :attr:`sig`"""
try:
[task.set(**options) for task in sig.tasks]
except AttributeError:
sig.set(**options)
def set_priority(sig: Signature, priority: int):
"""Set the :attr:`priority` execution option in every task in :attr:`sig`"""
set_execution_options(sig, priority=priority)
def set_queue(sig: Signature, queue):
"""Set the :attr:`queue` execution option in every task in :attr:`sig`"""
set_execution_options(sig, queue=queue)
def set_soft_time_limit(sig: Signature, soft_time_limit):
"""Set the :attr:`soft_time_limit` execution option in every task in :attr:`sig`"""
set_execution_options(sig, soft_time_limit=soft_time_limit)
Signature.set_execution_options = set_execution_options
Signature.set_priority = set_priority
Signature.set_queue = set_queue
Signature.set_soft_time_limit = set_soft_time_limit
Signature.set_label = set_label
Signature.get_label = get_label
Signature.enqueue = _enqueue
| [
11748,
10104,
198,
6738,
10104,
1330,
9877,
11,
651,
12853,
853,
16684,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
18725,
1924,
13,
5171,
11017,
1330,
6333,
11,
34894,
198,
6738,
18725,
1924,
... | 2.497249 | 1,999 |
import datetime
import time
from google.appengine.api import urlfetch
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
from tweetengine import oauth
from tweetengine.i18n import _
ROLE_ANYONE = 0
ROLE_USER = 1
ROLE_ADMINISTRATOR = 2
ROLES = [
(ROLE_ANYONE, _("Anyone")),
(ROLE_USER, _("User")),
(ROLE_ADMINISTRATOR, _("Administrator"))
]
ROLE_IDS = [id for id,name in ROLES]
| [
11748,
4818,
8079,
198,
11748,
640,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
1330,
2956,
1652,
7569,
198,
6738,
23645,
13,
1324,
18392,
13,
15042,
13,
75,
8937,
1330,
4876,
36560,
198,
6738,
23645,
13,
1324,
18392,
13,
2302,
1330,
... | 2.579787 | 188 |
#!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: make_training_list.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import argparse
import os
import numpy as np
def make_training_list(wav_files, max_files_per_directory, bad_list=None):
"""
Create a training list file given the directory where the wav files are organized into subdirectories,
with one subdirectory per keyword to be recognized. This training list will exclude any files
already referenced by the 'testing_list.txt' or 'validation_list.txt'. It will also shuffle the
training list, and the testing_list and validation_list to ensure training and testing is fair.
"""
if not os.path.isdir(wav_files):
print("wav_file directory not found")
return
bad = []
if bad_list:
if not os.path.isfile(bad_list):
bad_list = os.path.join(wav_files, bad_list)
if not os.path.isfile(bad_list):
print("Bad list {} not found".format(bad_list))
else:
bad = load_list_file(bad_list)
testing_list = os.path.join(wav_files, "testing_list.txt")
if not os.path.isfile(testing_list):
print("### error: testing list '{}' file not found".format(testing_list))
return
validation_list = os.path.join(wav_files, "validation_list.txt")
if not os.path.isfile(validation_list):
print("### error: validation list '{}' file not found".format(validation_list))
return
if len(bad) > 0:
print("Cleaning: {}".format(testing_list))
clean_list_file(testing_list, bad)
print("Cleaning: {}".format(validation_list))
clean_list_file(validation_list, bad)
print("Shuffling: {}".format(testing_list))
shuffle(testing_list, testing_list)
print("Shuffling: {}".format(validation_list))
shuffle(validation_list, validation_list)
ignore_list = load_list_file(testing_list)
ignore_list += load_list_file(os.path.join(wav_files, "validation_list.txt"))
list_file_name = os.path.join(wav_files, "training_list.txt")
keywords = []
for f in os.listdir(wav_files):
if is_background_noise(f): # skip the background noise folder.
continue
elif os.path.isdir(os.path.join(wav_files, f)):
keywords += [f]
keywords.sort()
skipped = 0
count = 0
with open(list_file_name, "w") as f:
for dir_name in keywords:
files = os.listdir(os.path.join(wav_files, dir_name))
file_list = []
for n in files:
if os.path.splitext(n)[1] == ".wav":
entry = dir_name + "/" + n
if entry in bad:
continue
if entry in ignore_list:
skipped += 1
else:
file_list += [entry]
count += 1
max = max_files_per_directory
if len(file_list) < max_files_per_directory:
max = len(file_list)
# shuffle the training list also
file_list = np.random.choice(np.array(file_list), max, replace=False)
print("choosing {} random files from {}".format(len(file_list), dir_name))
for e in file_list:
f.write(e + "\n")
# write the categories file listing the keywords found.
categories_file = os.path.join(wav_files, "categories.txt")
with open(categories_file, "w") as f:
for n in keywords:
f.write(n + "\n")
print("Created {}".format(categories_file))
print("Created {} containing {} wav files".format(list_file_name, count))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Create training_list.txt \
that includes a randomly selected set of wav files from the given directory tree \
up to the given maximum number of files per directory.")
# options
arg_parser.add_argument("--max_files_per_directory", "-max",
help="Maximum number of files to include from each subdirectory (default: 5000)",
type=int, default=5000)
arg_parser.add_argument("--wav_files", "-w", help="Directory containing the wav files to process", required=True)
arg_parser.add_argument("--bad_list", "-b",
help="List of bad files to exclude from testing, training and validation lists")
args = arg_parser.parse_args()
make_training_list(args.wav_files, args.max_files_per_directory, args.bad_list)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
29113,
29113,
29113,
21017,
198,
2,
198,
2,
220,
4935,
25,
220,
13302,
47238,
18252,
10074,
357,
23304,
8,
198,
2,
220,
9220,
25,
220,
220,
220,
220,
787,
62,
34409,
62,
4868,
... | 2.446981 | 1,971 |
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats
from microsim.opencl.ramp.params import Params
from microsim.opencl.ramp.simulator import Simulator
from microsim.opencl.ramp.snapshot import Snapshot
from microsim.opencl.ramp.disease_statuses import DiseaseStatus
nplaces = 8
npeople = 50000
nslots = 8
| [
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
629,
541,
88,
13,
34242,
198,
6738,
4580,
14323,
13,
9654,
565,
13,
81,
696,
13,
37266,
1330,
2547,
4105,
198,
6738,
4580,
1432... | 2.991304 | 115 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for chef_validator.clients.glance_client """
from __future__ import unicode_literals
import mock
from oslo_config import cfg
from chef_validator.clients.glance_client import AmbiguousNameException
from chef_validator.clients.glance_client import GlanceClient
from chef_validator.tests.unit.base import ValidatorTestCase
CONF = cfg.CONF
CONF.import_group('clients_glance', 'chef_validator.clients.glance_client')
class AmbiguousNameExceptionTestCase(ValidatorTestCase):
"""Tests for class AmbiguousNameException """
def setUp(self):
"""Create a AmbiguousNameException instance """
super(AmbiguousNameExceptionTestCase, self).setUp()
self.item = AmbiguousNameException()
def tearDown(self):
"""Cleanup the AmbiguousNameException instance """
super(AmbiguousNameExceptionTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
class GlanceClientTestCase(ValidatorTestCase):
"""Tests for class GlanceClient """
def setUp(self):
"""Create a GlanceClient instance """
super(GlanceClientTestCase, self).setUp()
keystone_client = mock.MagicMock()
CONF.set_override('endpoint', "1234", group='clients_glance')
self.client = GlanceClient(keystone_client)
self.client._client = mock.MagicMock()
def test_list(self):
"""Tests for method list """
self.client._client.images.list = mock.MagicMock()
self.client._client.images.list.return_value = (mock.MagicMock()
for n in range(2))
observed = tuple(self.client.list())
expected = ("1", "2")
self.assertEqual(len(expected), len(observed))
def test_get_by_name(self):
"""Tests for method get_by_name """
input = "MyInput"
expected = None
observed = self.client.get_by_name(input)
self.assertEqual(expected, observed)
def test_getById(self):
"""Tests for method getById """
self.client._client.images.get.return_value = mock.MagicMock()
expected = {"id": "myid", "name": "myname"}
observed = self.client.getById("1234")
self.assertEqual(len(expected.items()), len(observed.items()))
def test_create_glance_client(self):
"""Tests for method create_glance_client """
keystone_client = mock.MagicMock()
keystone_client.auth_token = "1234"
keystone_client.service_catalog = mock.MagicMock()
self.client._client = mock.MagicMock()
observed = self.client.create_glance_client(keystone_client)
expected = None
self.assertEqual(expected, observed)
def tearDown(self):
"""Cleanup the GlanceClient instance """
super(GlanceClientTestCase, self).tearDown()
self.m.UnsetStubs()
self.m.ResetAll()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
... | 2.598355 | 1,337 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import re
import socket
import sys
# Colors
from argparse import Namespace
green = '\033[32m'
red = '\033[31m'
reset = '\033[0m'
yellow = '\033[33m'
banner = """
██████ ▓█████ ▓█████ ██ ▄█▀ ██▓ ▓█████ ▄▄▄ ██ ▄█▀
▒██ ▒ ▓█ ▀ ▓█ ▀ ██▄█▒ ▓██▒ ▓█ ▀▒████▄ ██▄█▒
░ ▓██▄ ▒███ ▒███ ▓███▄░ ▒██░ ▒███ ▒██ ▀█▄ ▓███▄░
▒ ██▒▒▓█ ▄ ▒▓█ ▄ ▓██ █▄ ▒██░ ▒▓█ ▄░██▄▄▄▄██ ▓██ █▄
▒██████▒▒░▒████▒░▒████▒▒██▒ █▄ ░██████▒░▒████▒▓█ ▓██▒▒██▒ █▄
▒ ▒▓▒ ▒ ░░░ ▒░ ░░░ ▒░ ░▒ ▒▒ ▓▒ ░ ▒░▓ ░░░ ▒░ ░▒▒ ▓▒█░▒ ▒▒ ▓▒
░ ░▒ ░ ░ ░ ░ ░ ░ ░ ░░ ░▒ ▒░ ░ ░ ▒ ░ ░ ░ ░ ▒ ▒▒ ░░ ░▒ ▒░
░ ░ ░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ▒ ░ ░░ ░
░ ░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░░ ░
"""
project = {
'project': 'Seek Leak',
'version': 'v0.0.1',
'author': 'skate_forever',
'labs' : 'iBLISSLabs'
}
try:
sys.path.insert(0, os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), ('./')))))
from modules.arguments import *
from modules.docker import Docker
from modules.haveibeenpwned import HaveIBeenPwned
from modules.local import Local
from modules.pwndb import PwnDB
except ModuleNotFoundError as error:
print("Error: ")
print(" Verify the existence of the directory: ./modules/")
print(" Please install the requirements: $ sudo -H pip3 install -r requirements.txt")
sys.exit(1)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
17802,
198,
11748,
25064,
198,
198,
2,
29792,
198,
6738,
1822,
29572,
1330,
285... | 1.634221 | 976 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookup tables used by surface distance metrics."""
# neighbour_code_to_normals is a lookup table.
# For every binary neighbour code
# (2x2x2 neighbourhood = 8 neighbours = 8 bits = 256 codes)
# it contains the surface normals of the triangles (called "surfel" for
# "surface element" in the following). The length of the normal
# vector encodes the surfel area.
#
# created using the marching_cube algorithm
# see e.g. https://en.wikipedia.org/wiki/Marching_cubes
# pylint: disable=line-too-long
neighbour_code_to_normals = [
[[0, 0, 0]],
[[0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
[[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
[[0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
[[0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
[[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
[[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
[[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
[[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
[[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
[[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
[[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
[[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
[[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
[[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
[[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
[[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
[[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
[[0.125, -0.125, -0.125]],
[[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
[[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0, 0, 0]]]
# pylint: enable=line-too-long
| [
2,
15069,
2864,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 1.595133 | 11,752 |
# -*- coding: utf-8 -*-
!pip uninstall tensorflow
!pip install tensorflow-gpu==2.0.0
!pip install keras-tuner==1.0.0 --no-dependencies
!pip install terminaltables colorama
import tensorflow as tf
import kerastuner as kt
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) =\
tf.keras.datasets.fashion_mnist.load_data()
import numpy as np
train_images = np.asarray(train_images, dtype=np.float32) / 255.0
test_images = np.asarray(test_images, dtype=np.float32) / 255.0
train_images = train_images.reshape(60000,784)
test_images = test_images.reshape(10000,784)
train_labels = tf.keras.utils.to_categorical(train_labels)
dataset = tf.data.Dataset.from_tensor_slices((train_images.astype(np.float32),
train_labels.astype(np.float32)))
k = 0
for i in dataset:
print(i)
if k==2:
break
k+=1
!rm -rf results
def build_model(hp):
"""Builds a dnn model."""
model = mod()
return model
temp_learning_rate_schedule = CustomSchedule(512)
plt.plot(temp_learning_rate_schedule(tf.range(4000, dtype=tf.float32)))
plt.ylabel("Learning Rate")
plt.xlabel("Train Step")
tuner = MyTuner(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('fin_loss', 'min'),
max_trials=10),
hypermodel=build_model,
directory='results',
project_name='mnist_custom_training')
tuner.search(train_ds=dataset)
m = tuner.get_best_models()[0]
k = 0
for i in dataset:
#print(i[1])
print('REAL -> {}'.format(np.argmax(i[1])))
pred = m(i[0][np.newaxis,:])
print('PRED -> {}'.format(np.argmax(pred,axis = 1)))
k+=1
if k==50:
break
best_hps = tuner.get_best_hyperparameters()[0]
print(best_hps.values)
for i in tuner.get_best_hyperparameters(num_trials = 10):
print(i.values)
tuner.results_summary()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
0,
79,
541,
43194,
11192,
273,
11125,
198,
198,
0,
79,
541,
2721,
11192,
273,
11125,
12,
46999,
855,
17,
13,
15,
13,
15,
198,
198,
0,
79,
541,
2721,
41927,
29... | 2.304239 | 802 |
LOCAL_DATA_ROOT = '/Users/varunn/Documents/projects/Sentence_Embeddings/'
INP_PATH = LOCAL_DATA_ROOT + 'raw_data/'
INT_PATH = LOCAL_DATA_ROOT + 'inter_data/'
PRE_PATH = LOCAL_DATA_ROOT + 'preprocessed_data/'
MODEL_PATH = LOCAL_DATA_ROOT + 'modelling/'
RESULTS_PATH = LOCAL_DATA_ROOT + 'results/'
INP_TICKETS_FN = INP_PATH + 'data_tickets_{}.csv.gz'
INP_TICKET_BODIES_FN = INP_PATH + 'data_ticket_bodies_{}.csv.gz'
INP_NOTES_FN = INP_PATH + 'data_notes_{}.csv.gz'
INP_NOTE_BODIES_FN = INP_PATH + 'data_note_bodies_{}.csv.gz'
PREPARED_TICKET_FN = INT_PATH + 'prepared_data_tickets_{}.csv'
PREPARED_NOTE_FN = INT_PATH + 'prepared_data_notes_{}.csv'
PREPROCESSED_TICKET_FN = PRE_PATH + 'preprocessed_data_tickets_{}.pkl'
PREPROCESSED_NOTE_FN = PRE_PATH + 'preprocessed_data_notes_{}.pkl'
MODELLING_DATA_TICKET_FN = MODEL_PATH + 'data_for_modelling_tickets_A1_{}.csv'
MODELLING_DATA_COMBINED_FN = MODEL_PATH + 'data_for_modelling_combined_{}_{}.csv'
TEST_START_DATE = '2018-12-30'
ID_COL = 'id'
SOURCE_COL = 'source'
SUBJECT_COL = 'subject'
DESC_COL = 'description_html'
DATE_COL = 'date'
L1_FEAT_COL = 'sub_desc_custom_cleaned'
SAMPLE = 'sample'
SEED = 2018
CONTRACTION = False
FILTER_POS = False
ITER = 'ITER'
KEEP_ENG = True
LEM = False
LOWER_CASE = True
MAX_DF = 'MAX_DF'
MAX_FEATURES = 'MAX_FEATURES'
MIN_DF = 'MIN_DF'
NGRAM_RANGE = 'NGRAM_RANGE'
POS_VAR = ('N', 'J')
RANDOM_STATE = 'RANDOM_STATE'
REGEX_CLEANING = False
REMOVE_IGNORE_WORDS = False
REMOVE_NONALPHA = True
REMOVE_NUMERALS = False
REMOVE_PUNKT = False
REMOVE_STOP = True
SPELL_CHECK = False
STEM = True
SVD_COMP = 'SVD_COMP'
TEMPLATE_REMOVAL = False
TEMPLATE_START_STRING = ''
TOKENIZE = False
MODEL_CONFIG = {
'chat': {
SVD_COMP: 400,
MAX_FEATURES: 50000,
MIN_DF: 20,
MAX_DF: 0.5,
NGRAM_RANGE: (1, 3),
RANDOM_STATE: 2018,
ITER: 10
},
'email': {
SVD_COMP: 1200,
MAX_FEATURES: 80000,
MIN_DF: 20,
MAX_DF: 0.5,
NGRAM_RANGE: (1, 3),
RANDOM_STATE: 2018,
ITER: 10
}
}
| [
29701,
1847,
62,
26947,
62,
13252,
2394,
796,
31051,
14490,
14,
7785,
20935,
14,
38354,
14,
42068,
14,
31837,
594,
62,
31567,
6048,
654,
14,
6,
198,
1268,
47,
62,
34219,
796,
37347,
1847,
62,
26947,
62,
13252,
2394,
1343,
705,
1831,
... | 2.06841 | 994 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility library for combining tf-agents and dm-control."""
import collections
import copy
import functools
from typing import Sequence, Text
from absl import logging
import gin
import numpy as np
from tf_agents.environments import dm_control_wrapper
from tf_agents.environments import py_environment
from tf_agents.environments import wrappers
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.typing import types
from distracting_control import suite
@gin.configurable
def load_pixels(
domain_name,
task_name,
observation_key = 'pixels',
pixels_only = True,
task_kwargs=None,
environment_kwargs=None,
visualize_reward = False,
render_kwargs=None,
env_wrappers = (),
camera_kwargs=None,
background_kwargs=None,
color_kwargs=None,
):
"""Returns an environment from a domain name, task name and optional settings.
Args:
domain_name: A string containing the name of a domain.
task_name: A string containing the name of a task.
observation_key: Optional custom string specifying the pixel observation's
key in the `OrderedDict` of observations. Defaults to 'pixels'.
pixels_only: If True (default), the original set of 'state' observations
returned by the wrapped environment will be discarded, and the
`OrderedDict` of observations will only contain pixels. If False, the
`OrderedDict` will contain the original observations as well as the pixel
observations.
task_kwargs: Optional `dict` of keyword arguments for the task.
environment_kwargs: Optional `dict` specifying keyword arguments for the
environment.
visualize_reward: Optional `bool`. If `True`, object colours in rendered
frames are set to indicate the reward at each step. Default `False`.
render_kwargs: Optional `dict` of keyword arguments for rendering.
env_wrappers: Iterable with references to wrapper classes to use on the
wrapped environment.
camera_kwargs: optional dict of camera distraction arguments
background_kwargs: optional dict of background distraction arguments
color_kwargs: optional dict of color distraction arguments
Returns:
The requested environment.
Raises:
ImportError: if dm_control module was not available.
"""
dm_env = suite.load(
domain_name,
task_name,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward,
camera_kwargs=camera_kwargs,
background_kwargs=background_kwargs,
color_kwargs=color_kwargs,
pixels_only=pixels_only,
render_kwargs=render_kwargs,
pixels_observation_key=observation_key)
env = dm_control_wrapper.DmControlWrapper(dm_env, render_kwargs)
for wrapper in env_wrappers:
env = wrapper(env)
return env
@gin.configurable
def load_dm_env_for_training(
env_name,
frame_shape=(84, 84, 3),
episode_length=1000,
action_repeat=4,
frame_stack=3,
task_kwargs=None,
render_kwargs=None,
# Camera args
camera_camera_id=None,
camera_horizontal_delta=None,
camera_vertical_delta=None,
camera_max_vel=None,
camera_vel_std=None,
camera_roll_delta=None,
camera_max_roll_vel=None,
camera_roll_std=None,
camera_max_zoom_in_percent=None,
camera_max_zoom_out_percent=None,
camera_limit_to_upper_quadrant=None,
camera_seed=None,
# Background args
background_dynamic=None,
background_ground_plane_alpha=None,
background_video_alpha=None,
background_dataset_path=None,
background_dataset_videos=None,
background_num_videos=None,
background_shuffle_buffer_size=None,
# Color args
color_max_delta=None,
color_step_std=None,
# Other args
stack_within_repeat=False):
"""Gin-configurable builder of training environment with augmentations."""
camera_kwargs = {}
if camera_camera_id is not None:
camera_kwargs['camera_id'] = camera_camera_id
if camera_horizontal_delta is not None:
camera_kwargs['horizontal_delta'] = camera_horizontal_delta
if camera_vertical_delta is not None:
camera_kwargs['vertical_delta'] = camera_vertical_delta
if camera_max_vel is not None:
camera_kwargs['max_vel'] = camera_max_vel
if camera_vel_std is not None:
camera_kwargs['vel_std'] = camera_vel_std
if camera_roll_delta is not None:
camera_kwargs['roll_delta'] = camera_roll_delta
if camera_max_roll_vel is not None:
camera_kwargs['max_roll_vel'] = camera_max_roll_vel
if camera_roll_std is not None:
camera_kwargs['roll_std'] = camera_roll_std
if camera_max_zoom_in_percent is not None:
camera_kwargs['max_zoom_in_percent'] = camera_max_zoom_in_percent
if camera_max_zoom_out_percent is not None:
camera_kwargs['max_zoom_out_percent'] = camera_max_zoom_out_percent
if camera_limit_to_upper_quadrant is not None:
camera_kwargs['limit_to_upper_quadrant'] = camera_limit_to_upper_quadrant
if camera_seed is not None:
camera_kwargs['seed'] = camera_seed
camera_kwargs = camera_kwargs if camera_kwargs else None
background_kwargs = {}
if background_dynamic is not None:
background_kwargs['dynamic'] = background_dynamic
if background_ground_plane_alpha is not None:
background_kwargs['ground_plane_alpha'] = background_ground_plane_alpha
if background_video_alpha is not None:
background_kwargs['video_alpha'] = background_video_alpha
if background_dataset_path is not None:
background_kwargs['dataset_path'] = background_dataset_path
if background_dataset_videos is not None:
background_kwargs['dataset_videos'] = background_dataset_videos
if background_num_videos is not None:
background_kwargs['num_videos'] = background_num_videos
if background_shuffle_buffer_size is not None:
background_kwargs['shuffle_buffer_size'] = background_shuffle_buffer_size
background_kwargs = background_kwargs if background_kwargs else None
color_kwargs = {}
if color_max_delta is not None:
color_kwargs['max_delta'] = color_max_delta
if color_step_std is not None:
color_kwargs['step_std'] = color_step_std
color_kwargs = color_kwargs if color_kwargs else None
return load_dm_env(env_name, frame_shape, episode_length, action_repeat,
frame_stack, task_kwargs, render_kwargs, camera_kwargs,
background_kwargs, color_kwargs, stack_within_repeat)
@gin.configurable
def load_dm_env_for_eval(
env_name,
frame_shape=(84, 84, 3),
episode_length=1000,
action_repeat=4,
frame_stack=3,
task_kwargs=None,
render_kwargs=None,
# Camera args
camera_camera_id=None,
camera_horizontal_delta=None,
camera_vertical_delta=None,
camera_max_vel=None,
camera_vel_std=None,
camera_roll_delta=None,
camera_max_roll_vel=None,
camera_roll_std=None,
camera_max_zoom_in_percent=None,
camera_max_zoom_out_percent=None,
camera_limit_to_upper_quadrant=None,
camera_seed=None,
# Background args
background_dynamic=None,
background_ground_plane_alpha=None,
background_video_alpha=None,
background_dataset_path=None,
background_dataset_videos=None,
background_num_videos=None,
background_shuffle_buffer_size=None,
# Color args
color_max_delta=None,
color_step_std=None,
# Other args
stack_within_repeat=False):
"""Gin-configurable builder of eval environment with augmentations."""
camera_kwargs = {}
if camera_camera_id is not None:
camera_kwargs['camera_id'] = camera_camera_id
if camera_horizontal_delta is not None:
camera_kwargs['horizontal_delta'] = camera_horizontal_delta
if camera_vertical_delta is not None:
camera_kwargs['vertical_delta'] = camera_vertical_delta
if camera_max_vel is not None:
camera_kwargs['max_vel'] = camera_max_vel
if camera_vel_std is not None:
camera_kwargs['vel_std'] = camera_vel_std
if camera_roll_delta is not None:
camera_kwargs['roll_delta'] = camera_roll_delta
if camera_max_roll_vel is not None:
camera_kwargs['max_roll_vel'] = camera_max_roll_vel
if camera_roll_std is not None:
camera_kwargs['roll_std'] = camera_roll_std
if camera_max_zoom_in_percent is not None:
camera_kwargs['max_zoom_in_percent'] = camera_max_zoom_in_percent
if camera_max_zoom_out_percent is not None:
camera_kwargs['max_zoom_out_percent'] = camera_max_zoom_out_percent
if camera_limit_to_upper_quadrant is not None:
camera_kwargs['limit_to_upper_quadrant'] = camera_limit_to_upper_quadrant
if camera_seed is not None:
camera_kwargs['seed'] = camera_seed
camera_kwargs = camera_kwargs if camera_kwargs else None
background_kwargs = {}
if background_dynamic is not None:
background_kwargs['dynamic'] = background_dynamic
if background_ground_plane_alpha is not None:
background_kwargs['ground_plane_alpha'] = background_ground_plane_alpha
if background_video_alpha is not None:
background_kwargs['video_alpha'] = background_video_alpha
if background_dataset_path is not None:
background_kwargs['dataset_path'] = background_dataset_path
if background_dataset_videos is not None:
background_kwargs['dataset_videos'] = background_dataset_videos
if background_num_videos is not None:
background_kwargs['num_videos'] = background_num_videos
if background_shuffle_buffer_size is not None:
background_kwargs['shuffle_buffer_size'] = background_shuffle_buffer_size
background_kwargs = background_kwargs if background_kwargs else None
color_kwargs = {}
if color_max_delta is not None:
color_kwargs['max_delta'] = color_max_delta
if color_step_std is not None:
color_kwargs['step_std'] = color_step_std
color_kwargs = color_kwargs if color_kwargs else None
logging.info('camera_kwargs are: %s', str(camera_kwargs))
logging.info('background_kwargs are: %s', str(background_kwargs))
logging.info('color_kwargs are: %s', str(color_kwargs))
return load_dm_env(env_name, frame_shape, episode_length, action_repeat,
frame_stack, task_kwargs, render_kwargs, camera_kwargs,
background_kwargs, color_kwargs, stack_within_repeat)
def load_dm_env(env_name,
frame_shape=(84, 84, 3),
episode_length=1000,
action_repeat=4,
frame_stack=3,
task_kwargs=None,
render_kwargs=None,
camera_kwargs=None,
background_kwargs=None,
color_kwargs=None,
stack_within_repeat=False):
"""Returns an environment from a domain name, task name."""
domain_name, task_name = env_name.split('-')
logging.info('Loading environment.')
render_kwargs = render_kwargs or {}
render_kwargs['width'] = frame_shape[0]
render_kwargs['height'] = frame_shape[1]
if 'camera_id' not in render_kwargs:
render_kwargs['camera_id'] = 2 if domain_name == 'quadruped' else 0
if camera_kwargs and 'camera_id' not in camera_kwargs:
camera_kwargs['camera_id'] = 2 if domain_name == 'quadruped' else 0
env = load_pixels(
domain_name,
task_name,
task_kwargs=task_kwargs,
render_kwargs=render_kwargs,
camera_kwargs=camera_kwargs,
background_kwargs=background_kwargs,
color_kwargs=color_kwargs)
env = FrameStackActionRepeatWrapper(
env,
action_repeat=action_repeat,
stack_size=frame_stack,
stack_within_repeat=stack_within_repeat)
# Shorten episode length
max_episode_steps = (episode_length + action_repeat - 1) // action_repeat
env = wrappers.TimeLimit(env, max_episode_steps)
return env
@gin.configurable
class FrameStackActionRepeatWrapper(wrappers.PyEnvironmentBaseWrapper):
"""Environment wrapper for stacking and action repeat."""
def _step(self, action):
"""Steps the environment."""
if self.current_time_step().is_last():
return self.reset()
total_reward = 0
for _ in range(self._action_repeat):
time_step = self._env.step(action)
if self._frames is not None and self._stack_within_repeat:
self._frames.append(time_step.observation['pixels'])
total_reward += time_step.reward
if time_step.is_first() or time_step.is_last():
break
# Only add the last frame of the action repeat if we don't stack within.
if self._frames is not None and not self._stack_within_repeat:
self._frames.append(time_step.observation['pixels'])
total_reward = np.asarray(
total_reward, dtype=np.asarray(time_step.reward).dtype)
# Stack frames.
if self._frames is not None:
time_step.observation['pixels'] = np.concatenate(self._frames, axis=2)
return ts.TimeStep(time_step.step_type, total_reward, time_step.discount,
time_step.observation)
def _reset(self):
"""Starts a new sequence and returns the first `TimeStep`."""
time_step = self._env.reset()
# Initial frame stacks
if self._frames is not None:
for _ in range(self._stack_size):
self._frames.append(time_step.observation['pixels'])
if self._frames:
time_step.observation['pixels'] = np.concatenate(self._frames, axis=2)
return ts.TimeStep(time_step.step_type, time_step.reward,
time_step.discount, time_step.observation)
def observation_spec(self):
"""Defines the observations provided by the environment."""
return self._new_observation_spec
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33160,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.732381 | 5,179 |
#
# Copyright 2019 Google LLC
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import subprocess
import logging
import tempfile
| [
2,
198,
2,
15069,
13130,
3012,
11419,
198,
2,
198,
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
... | 4.013699 | 219 |
import pytest
from elpis.wrappers.objects.interface import KaldiInterface
@pytest.fixture
def pipeline_upto_step_1(tmpdir):
"""
PyTest Fixture: returns a pipeline that executes once per session up to step 1.
"""
# Step 0
# ======
# Create a Kaldi interface directory (where all the associated files/objects
# will be stored).
kaldi = KaldiInterface(f'{tmpdir}/state')
# Step 1
# ======
# Setup a dataset to to train data on.
ds = kaldi.new_dataset('dataset_x')
ds.add_directory('/recordings/transcribed')
ds.select_importer('Elan')
ds.process()
return (kaldi, ds)
def test_model_training(pipeline_upto_step_1):
"""
Test training the model
"""
kaldi, ds = pipeline_upto_step_1
pd = kaldi.new_pron_dict('pron_dict_y')
pd.link(ds)
pd.set_l2s_path('/recordings/letter_to_sound.txt')
pd.generate_lexicon()
m = kaldi.new_model('model_z')
m.link(ds, pd)
assert m.has_been_trained() == False
m.build_structure()
m.train()
assert m.has_been_trained() == True
return
# TODO: Determine how to achieve further testing without wasting time (training takes a while).
| [
11748,
12972,
9288,
198,
198,
6738,
1288,
79,
271,
13,
29988,
11799,
13,
48205,
13,
39994,
1330,
509,
37566,
39317,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198,
4299,
11523,
62,
37623,
78,
62,
9662,
62,
16,
7,
22065,
15908,
2599,
19... | 2.489496 | 476 |
from .datathief import * | [
6738,
764,
19608,
776,
2086,
1330,
1635
] | 3.428571 | 7 |
"""AvailabilityTest class"""
from django.contrib.auth.models import User
from django.test import TestCase
from member_manager.models import Availability
from member_manager.models import Profile
class AvailabilityTest(TestCase):
"""Test Availability model"""
@classmethod
def test_str(self):
"""Test Availability string"""
period = Availability.objects.create(profile=self.profile,
weekday=Availability.SUNDAY,
start_time=Availability.NINE_AM,
end_time=Availability.ONE_PM,
created_by=self.user,
modified_by=self.user)
self.assertEqual(str(period), 'Sunday 9:00 am - 1:00 pm')
| [
37811,
29841,
14402,
1398,
37811,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
198,
6738,
2888,
62,
37153,
13,
27530,
1330,
43138,
198,
6738,
... | 2.01432 | 419 |
# /* BitCompression
# This sample is provided as-is, without any warranty.
#
# (c) 2007 ViALUX GmbH. All rights reserved.
# */
#
# // BlackWhiteSample.cpp : Defines the entry point for the console application.
# //
#
# #include <TCHAR.h>
# #include <windows.h>
# #include <conio.h>
# #include <stdio.h>
# #include <stdlib.h>
# #include "alp.h"
# #include <iostream>
# #include <string>
# #include <dirent.h>
# extern "C" int __fseeki64(FILE *, __int64, int);
# //extern "C" __int64 __cdec1__ftelli64(FILE *);
#
# int _tmain(int /*argc*/, _TCHAR** /* *argv[]*/)
# {
# ALP_ID nDevId, nSeqId1, nSeqId2, nSeqId3;
# long nDmdType;
# __int64 nSizeX, nSizeY;
# long nReturn;
# //const long nPictureTime = 1000000; // in us
# UCHAR *pImageData = NULL;
# long nOffset = 1024; // divide the frame LUT in 2
#
#
#
# FILE *file, *ordered;
# const char *filename, *filevec;
# int filesize;
# UCHAR *block1, *block2, *temp;
#
# long nCurrent;
#
# long nBit;
# long sortie;
# tAlpProjProgress QueueInfo;
# tFlutWrite LutArray;
#
#
# // Allocate the ALP high-speed device
# /* sortie = AlpDevAlloc( ALP_DEFAULT, ALP_DEFAULT, &nDevId);
# printf("%ld \n",sortie);
# _tprintf( _T("Press any key to stop ALP projection") );
# do { _getch(); } while (_kbhit()) ; */
# if (ALP_OK != AlpDevAlloc(ALP_DEFAULT, ALP_DEFAULT, &nDevId)) return 1;
#
# //SD : Reverses black into white. Caution : takes two frames to be complete
# //if (ALP_OK != AlpProjControl(nDevId, ALP_PROJ_INVERSION, 1)) printf("errorProjInv\n");
#
# // Inquire DMD type
# if (ALP_OK != AlpDevInquire(nDevId, ALP_DEV_DMDTYPE, &nDmdType)) return 1;
# switch (nDmdType) {
# case ALP_DMDTYPE_XGA_055A:
# case ALP_DMDTYPE_XGA_055X:
# case ALP_DMDTYPE_XGA_07A:
# nSizeX = 1024; nSizeY = 768;
# break;
# case ALP_DMDTYPE_DISCONNECT:
# case ALP_DMDTYPE_1080P_095A:
# nSizeX = 1920; nSizeY = 1080;
# break;
# case ALP_DMDTYPE_WUXGA_096A:
# nSizeX = 1920; nSizeY = 1200;
# break;
# default:
# // unsupported DMD type
#
# return 1;
# }
#
#
# //Prompt .bin and .vec filenames
# using namespace std;
# // const string Binvec_Path = "g:/BINVECS/";
# // const string Binvec_Path = "H:\BINVECS/";
# // const string Binvec_Path = "../../../BINVECS/";
# //const string Binvec_Path = "L:\BINVECS/";
# // const string Binvec_Path = "../../../BINVECS/";// BINVECS / ";
# const string Binvec_Path = "E:\BINVECS/";
# cout << "Hello\n";
# DIR *dir;
# struct dirent *ent;
# if ((dir = opendir(Binvec_Path.c_str())) != NULL) {
# /* print all the files and directories within directory */
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL) {
# printf("%d %s\n", j, ent->d_name);
# j = j + 1;
# }
# closedir(dir);
# }
# else {
# /* could not open directory */
# perror("empty dir");
# return EXIT_FAILURE;
# }
# cout << "Enter your user number : ";
# int user;
# cin >> user;
# if ((dir = opendir(Binvec_Path.c_str())) != NULL) {
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL && j< user) {
# j = j + 1;
# }
# }
# else {
# /* could not open directory */
# perror("");
# return EXIT_FAILURE;
# }
# printf("\n");
# const string user_name = (ent->d_name);
# printf("Welcome %s! \n", ent->d_name);
# closedir(dir);
#
# const string Bin_Path = Binvec_Path + user_name + "/Bin/";
# const string Vec_Path = Binvec_Path + user_name + "/Vec/";
#
# //printf ("%s", Bin_Path);
#
#
# if ((dir = opendir(Bin_Path.c_str())) != NULL) {
# /* print all the files and directories within directory */
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL) {
# printf("%d %s\n", j, ent->d_name);
# j = j + 1;
# }
# closedir(dir);
# }
# else {
# /* could not open directory */
# perror("empty dir");
# return EXIT_FAILURE;
# }
#
#
# cout << "Enter the number of a .bin file : ";
# int bin;
# cin >> bin;
#
# if ((dir = opendir(Bin_Path.c_str())) != NULL) {
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL && j< bin) {
# j = j + 1;
# }
# }
# else {
# /* could not open directory */
# perror("empty dir");
# return EXIT_FAILURE;
# }
# printf("\n");
#
# string bin_name;
# bin_name = (ent->d_name);
# bin_name = Bin_Path + bin_name;
# printf("Chosen .bin file : %s\n", ent->d_name);
# closedir(dir);
#
# printf("\n");
#
# if ((dir = opendir(Vec_Path.c_str())) != NULL) {
# /* print all the files and directories within directory */
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL) {
# printf("%d %s\n", j, ent->d_name);
# j = j + 1;
# }
# closedir(dir);
# }
# else {
# /* could not open directory */
# perror("");
# return EXIT_FAILURE;
# }
#
# //cout << "You choosed the bin file" << bin << ".\n";
# cout << "Enter the number of a .vec file : ";
# int vec;
# cin >> vec;
#
# if ((dir = opendir(Vec_Path.c_str())) != NULL) {
# ent = readdir(dir);
# ent = readdir(dir);
# int j = 0;
# while ((ent = readdir(dir)) != NULL && j< vec) {
# j = j + 1;
# }
# }
# else {
# /* could not open directory */
# perror("");
# return EXIT_FAILURE;
# }
# printf("\n");
# string vec_name;
# vec_name = (ent->d_name);
# vec_name = Vec_Path + vec_name;
# printf("Chosen .vec file : %s\n", ent->d_name);
# closedir(dir);
# printf("\n");
#
#
# //prompt Frequency of refresh
# cout << "Enter the frame rate (in Hz) : ";
# double rate;
# cin >> rate;
# const long nPictureTime = 1.0 / rate * 1000000;
# printf("\n");
#
# //prompt for advanced features
# cout << "Advanced features (y/n) ? ";
# string adv;
# cin >> adv;
# printf("\n");
#
#
# double nF = 200;
# if (adv.compare("y") == 0){
#
# //prompt for nFrames
# cout << "Number of frames in the LUT (usually 500) ? ";
# cin >> nF;
# printf("\n");
#
# //Prompt the bit depth
# /* cout << "Enter the bit depth (8 unless you want to go to high speeds) : ";
# double bitDepth;
# cin >> bitDepth;
# nBit = bitDepth;
# printf("\n");*/
# }
# const long nFrames = nF; // max 43690 binary frames
#
# ////////////////////////////////////////////////////////////////////////////////////////////
# //Read Vec file
#
# long int temp1, temp2, temp3, temp4, temp5;
#
# //filevec="compte2.vec";
# filevec = vec_name.c_str();
#
# __int64 numberOfVecFrames = 0;
#
# file = fopen(filevec, "r");
# if (file == NULL) {
# printf("Invalid file name. \n");
# }
# printf("File Vec opened. \n");
#
# fscanf(file, "%ld %ld %ld %ld %ld", &temp1, &temp2, &temp3, &temp4, &temp5);
#
# while (fscanf(file, "%ld %ld %ld %ld %ld", &temp1, &temp2, &temp3, &temp4, &temp5) != EOF)
# {
# numberOfVecFrames++;
# }
# //numberOfVecFrames = 40 * 3600 * 1;
# printf("NbFramesPlayed = %d. \n", numberOfVecFrames);
# rewind(file);
# fscanf(file, "%ld %ld %ld %ld %ld", &temp1, &temp2, &temp3, &temp4, &temp5);
#
# __int64 * FrameNumbers = new __int64[numberOfVecFrames];
# int i = 0;
# while (fscanf(file, "%ld %ld %ld %ld %ld", &temp1, &temp2, &temp3, &temp4, &temp5) != EOF)
# {
# FrameNumbers[i] = temp2;
# i++;
# }
# fclose(file);
#
#
# long Nloop, Nremain;
# Nloop = numberOfVecFrames / (nOffset * 2);
# Nremain = numberOfVecFrames % (nOffset * 2);
#
# printf("Nloop = %d, Nremain = %d \n", Nloop, Nremain);
#
#
# /////////////////////////////////////////////////////////////////////////////////////
#
#
# // Read .bin header
#
# // filename
# //filename="compte.bin";
# filename = bin_name.c_str();
#
# // open file
# file = fopen(filename, "rb");
# if (file == NULL)
# {
# printf("Failed to open file for read access.");
# }
#
# //check file size
# _fseeki64(file, 0L, SEEK_END);
# filesize = ftell(file);
# rewind(file);
#
# // read header
# short header[4];
# fread(header, sizeof(short), 4, file);
# printf("header SizeX = %d, SizeY = %d, NbFrames = %d . \n", header[0], header[1], header[2]);
# printf("Filesize : %d ", filesize);
#
# int header_size = sizeof(short)* 4;
#
# printf("OK1");
#
# // SD: the display window will be placed at the middle of the DMD screen
# nBit = header[3];
# int width = header[0] / 8 * nBit;
# int height = header[1];
#
# // SD: this trick allows to read compact movies, generated at the bit level. This compression is made at the expense
# // of the gray level precision.
# int nSizeX_bit_trick = nSizeX / 8 * nBit;
#
# // SD: coordinates of the display window
# int beg_w = (nSizeX_bit_trick - width) / 2;
# //int end_w = beg_w + width;
# int beg_h = (nSizeY - height) / 2;
# //int end_h = beg_h + height;
#
# /////////////////////////////////////////////////////////////////////////////////////
#
# // SEQ 1 Allocate a first sequence of nFrames
# if (ALP_OK != AlpSeqAlloc(nDevId, nBit, nFrames, &nSeqId1)) printf("errorSeqAlloc\n"); //8bit
#
# printf("buffer size %d", nFrames*nSizeX_bit_trick*nSizeY);
#
# // buffer
# block1 = NULL;
# block1 = (UCHAR *)calloc(nFrames*nSizeX_bit_trick*nSizeY, 1);
#
# if (block1 == NULL)
# {
# printf("Failed to read data 1.\n");
# }
#
# // read file
# for (int i = 0; i<nFrames; i++)
# {
# //printf("Writing frame %d \n",FrameNumbers[i]);
# _fseeki64(file, FrameNumbers[i] * width*height + header_size, SEEK_SET);
# // SD: the display window is located at the center of the screen of the DMD
# for (int line = 0; line < height; line++){
# fread(block1 + i*(nSizeX_bit_trick*nSizeY) + (beg_h + line)*nSizeX_bit_trick + beg_w, 1, width, file);
# }
# }
#
#
# // Set up image timing
# //if (ALP_OK != AlpSeqControl(nDevId, nSeqId1, ALP_BITNUM, nBit)) printf("errorBinaryMode\n");
# if (nBit == 1){
# if (ALP_OK != AlpSeqControl(nDevId, nSeqId1, ALP_DATA_FORMAT, ALP_DATA_BINARY_TOPDOWN)) {
# printf("errorBinaryMode3\n");
# }
# }
# if (ALP_OK != AlpSeqTiming(nDevId, nSeqId1, ALP_DEFAULT, nPictureTime,
# ALP_DEFAULT, ALP_DEFAULT, ALP_DEFAULT)) return 1;
#
# /////////////////////////////////////////////////////////////////////////////////////
# // SEQ 2 Allocate a second sequence of nFrames
#
# printf("OK1");
# if (ALP_OK != AlpSeqAlloc(nDevId, nBit, nFrames, &nSeqId2)) printf("errorSeqAlloc\n"); //8bit
#
# // Read more frames
#
# // buffer
# block2 = NULL;
# block2 = (UCHAR *)calloc(nFrames*nSizeX_bit_trick*nSizeY, 1);
#
# if (block2 == NULL)
# {
# printf("Failed to read data 2.\n");
# }
#
# // read file
# for (int i = nFrames; i<2 * nFrames; i++)
# {
# //printf("Writing frame %d \n",FrameNumbers[i]);
# _fseeki64(file, (FrameNumbers[i])*width*height + header_size, SEEK_SET);
# for (int line = 0; line < height; line++){
# fread(block2 + (i - nFrames)*(nSizeX_bit_trick*nSizeY) + (beg_h + line)*nSizeX_bit_trick + beg_w, 1, width, file);
# }
# }
#
#
# // Set up image timing
# if (nBit == 1){
# if (ALP_OK != AlpSeqControl(nDevId, nSeqId2, ALP_DATA_FORMAT, ALP_DATA_BINARY_TOPDOWN)){
# printf("errorBinaryMode3\n");
# }
# }
# if (ALP_OK != AlpSeqTiming(nDevId, nSeqId2, ALP_DEFAULT, nPictureTime,
# ALP_DEFAULT, ALP_DEFAULT, ALP_DEFAULT)) return 1;
#
#
# ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
# /// PLAY ON DMD
# printf("OK2");
# //Set up queue mode
# if (ALP_OK != AlpProjControl(nDevId, ALP_PROJ_QUEUE_MODE, ALP_PROJ_SEQUENCE_QUEUE)) return 1;
# printf("OK3");
# // Transmit images into ALP memory
# nReturn = AlpSeqPut(nDevId, nSeqId1, ALP_DEFAULT, ALP_DEFAULT, block1);
# if (ALP_OK != nReturn) return 1;
# printf("OK4");
# // Start Sequence 1 in queue mode
# if (ALP_OK != AlpProjStart(nDevId, nSeqId1)) return 1;
# _tprintf(_T("Seq1 \n"));
# printf("OK5");
# // Transmit images into ALP memory
# nReturn = AlpSeqPut(nDevId, nSeqId2, ALP_DEFAULT, ALP_DEFAULT, block2);
# if (ALP_OK != nReturn) return 1;
# printf("OK6");
# // Start sequence 2
# if (ALP_OK != AlpProjStart(nDevId, nSeqId2)) return 1;
# _tprintf(_T("Seq2 \n"));
#
# _tprintf(_T("infernal loop start \n"));
#
# int ts = 30;
# long j = 2 * nFrames;
#
# while (j<numberOfVecFrames)
# { /////////////////////// SEQ1
#
#
# //fread(block1,1,nFrames*nSizeX*nSizeY,file);
#
# i = 0;
# while (i<nFrames && j<numberOfVecFrames)
# {
# //printf("Writing frame %d \n",FrameNumbers[j]);
# _fseeki64(file, (FrameNumbers[j])*width*height + header_size, SEEK_SET);
# for (int line = 0; line < height; line++){
# fread(block1 + i*(nSizeX_bit_trick*nSizeY) + (beg_h + line)*nSizeX_bit_trick + beg_w, 1, width, file);
# }
# i++;
# j++;
# }
#
# /// test sequence in play
# AlpProjInquireEx(nDevId, ALP_PROJ_PROGRESS, &QueueInfo);
# printf("SeqID = %d , Queue NWaiting = %d, Nframes = %d . \n",
# QueueInfo.SequenceId, QueueInfo.nWaitingSequences, QueueInfo.nFramesPerSubSequence
# );
#
# while (QueueInfo.nWaitingSequences >= 1 || QueueInfo.SequenceId == 0)
# {
# if (QueueInfo.SequenceId == 0)
# printf("Bug with AlpProjInquireEx: I will try again \n");
# AlpProjInquireEx(nDevId, ALP_PROJ_PROGRESS, &QueueInfo);
# Sleep(ts);
# }
#
#
# AlpProjControl(nDevId, ALP_PROJ_RESET_QUEUE, ALP_DEFAULT);
# printf("Seq 1 Finished . \n");
#
# if (_kbhit()) break;
#
# // Free Seq1
# nReturn = AlpSeqFree(nDevId, nSeqId1);
# _tprintf(_T(" ALP STATUS %d \n"), nReturn);
#
# //Reallocate Sequence 1
# if (ALP_OK != AlpSeqAlloc(nDevId, nBit, nFrames, &nSeqId1)) printf("errorSeqAlloc\n"); // 1bit
# _tprintf(_T("okalloc \n"));
#
# // Set up image timing
# if (nBit == 1){
# if (ALP_OK != AlpSeqControl(nDevId, nSeqId1, ALP_DATA_FORMAT, ALP_DATA_BINARY_TOPDOWN)) {
# printf("errorBinaryMode3\n");
# }
# }
# if (ALP_OK != AlpSeqTiming(nDevId, nSeqId1, ALP_DEFAULT, nPictureTime,
# ALP_DEFAULT, ALP_DEFAULT, ALP_DEFAULT)) return 1;
#
# // Transmit images into Seq1
# nReturn = AlpSeqPut(nDevId, nSeqId1, ALP_DEFAULT, ALP_DEFAULT, block1);
# _tprintf(_T(" ALP STATUS %d \n"), nReturn);
#
# // Start Sequence 1 in queue mode
# if (ALP_OK != AlpProjStart(nDevId, nSeqId1)) return 1;
# _tprintf(_T("Seq1 \n"));
#
#
# if (_kbhit()) break;
#
# ///////////////// SEQ2
#
# //fread(block2,1,nFrames*nSizeX*nSizeY,file);
# i = 0;
# while (i<nFrames && j<numberOfVecFrames)
# {
# //printf("Writing frame %d \n",FrameNumbers[j]);
# _fseeki64(file, (FrameNumbers[j])*width*height + header_size, SEEK_SET);
# for (int line = 0; line < height; line++){
# fread(block2 + i*(nSizeX_bit_trick*nSizeY) + (beg_h + line)*nSizeX_bit_trick + beg_w, 1, width, file);
# }
# i++;
# j++;
# }
#
# AlpProjInquireEx(nDevId, ALP_PROJ_PROGRESS, &QueueInfo);
# printf("SeqID = %d , Queue NWaiting = %d, Nframes = %d . \n",
# QueueInfo.SequenceId, QueueInfo.nWaitingSequences, QueueInfo.nFramesPerSubSequence
# );
#
# while (QueueInfo.nWaitingSequences >= 1 || QueueInfo.SequenceId==0)
# {
# if (QueueInfo.SequenceId == 0)
# printf("Bug with AlpProjInquireEx: I will try again \n");
# AlpProjInquireEx(nDevId, ALP_PROJ_PROGRESS, &QueueInfo);
# Sleep(ts);
# }
#
#
# AlpProjControl(nDevId, ALP_PROJ_RESET_QUEUE, ALP_DEFAULT);
# printf("Seq 2 Finished . \n");
#
# if (_kbhit()) break;
#
# // Free Seq2
# nReturn = AlpSeqFree(nDevId, nSeqId2);
# _tprintf(_T(" ALP STATUS %d \n"), nReturn);
#
#
# //Reallocate Sequence 2
# if (ALP_OK != AlpSeqAlloc(nDevId, nBit, nFrames, &nSeqId2)) printf("errorSeqAlloc\n"); // 1bit
# _tprintf(_T("okalloc \n"));
#
# // Set up image timing
# if (nBit == 1){
# if (ALP_OK != AlpSeqControl(nDevId, nSeqId2, ALP_DATA_FORMAT, ALP_DATA_BINARY_TOPDOWN)) {
# printf("errorBinaryMode3\n");
# }
# }
# if (ALP_OK != AlpSeqTiming(nDevId, nSeqId2, ALP_DEFAULT, nPictureTime,
# ALP_DEFAULT, ALP_DEFAULT, ALP_DEFAULT)) return 1;
#
# // Transmit images into Seq2
# nReturn = AlpSeqPut(nDevId, nSeqId2, ALP_DEFAULT, ALP_DEFAULT, block2);
# _tprintf(_T(" ALP STATUS %d \n"), nReturn);
#
# // Start Sequence 2 in queue mode
# if (ALP_OK != AlpProjStart(nDevId, nSeqId2)) return 1;
# _tprintf(_T("Seq2 \n"));
#
# if (_kbhit()) break;
#
# }
#
# // Wait for key stroke
# _tprintf(_T("Press any key to stop ALP projection"));
# do { _getch(); } while (_kbhit());
#
# // Done
# AlpDevHalt(nDevId);
# AlpDevFree(nDevId);
#
# fclose(file);
# return 0;
# }
#
| [
2,
11900,
4722,
7293,
2234,
198,
2,
770,
6291,
318,
2810,
355,
12,
271,
11,
1231,
597,
18215,
13,
198,
2,
198,
2,
357,
66,
8,
4343,
16049,
1847,
31235,
402,
2022,
39,
13,
1439,
2489,
10395,
13,
198,
2,
9466,
198,
2,
198,
2,
33... | 2.159749 | 7,637 |
# KartAI https://github.com/eritzyg/KartAI/
# Copyright (c) 2017 Eritz Yerga Gutierrez and Iker García Ferrero
# MIT License https://github.com/eritzyg/KartAI/blob/master/LICENSE
import Global as glv
import Player as player
import TrackManager as tm
import Timer as timer
import Points as points | [
2,
32872,
20185,
3740,
1378,
12567,
13,
785,
14,
263,
4224,
35641,
14,
42,
433,
20185,
14,
198,
2,
15069,
357,
66,
8,
2177,
412,
29574,
575,
263,
4908,
48283,
290,
314,
6122,
16364,
29690,
12880,
34785,
198,
2,
17168,
13789,
3740,
1... | 3.182796 | 93 |
# -*- coding: utf-8 -*-
import argparse
import tqdm
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
import numpy as np
import yaml
from terminaltables import AsciiTable
from models.Yolo3Body import YOLOV3
from utils.util import xywh2xyxy, non_max_suppression, get_batch_statistics, ap_per_class
from utils.datasets import ListDataSet
from utils.transforms import DEFAULT_TRANSFORMS
# 创建验证数据集
# 输出评估的结果状态
def _evaluate(model, dataloader, class_names, img_size, iou_thres, conf_thres, nms_thres, verbose, device):
"""Evaluate model on validation dataset.
:param model: Model to evaluate
:type model: models.Darknet
:param dataloader: Dataloader provides the batches of images with targets
:type dataloader: DataLoader
:param class_names: List of class names
:type class_names: [str]
:param img_size: Size of each image dimension for yolo
:type img_size: int
:param iou_thres: IOU threshold required to qualify as detected
:type iou_thres: float
:param conf_thres: Object confidence threshold
:type conf_thres: float
:param nms_thres: IOU threshold for non-maximum suppression
:type nms_thres: float
:param verbose: If True, prints stats of model
:type verbose: bool
:return: Returns precision, recall, AP, f1, ap_class
"""
model.eval() # Set model to evaluation mode
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
for imgs, targets in tqdm.tqdm(dataloader, desc="Validating"):
# Extract labels
labels += targets[:, 1].tolist()
# Rescale target
targets[:, 2:] = xywh2xyxy(targets[:, 2:])
targets[:, 2:] *= img_size
imgs = Variable(imgs.to(device, non_blocking=True), requires_grad=False)
with torch.no_grad():
outputs = model(imgs, training=False)
outputs = non_max_suppression(outputs, conf_thres=conf_thres, iou_thres=nms_thres)
sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)
if len(sample_metrics) == 0: # No detections over whole validation set.
print("---- No detections over whole validation set ----")
return None
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [
np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
metrics_output = ap_per_class(
true_positives, pred_scores, pred_labels, labels)
print_eval_stats(metrics_output, class_names, verbose)
return metrics_output
if __name__ == '__main__':
run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
198,
11748,
256,
80,
36020,
198,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
6738,
28034,
13,
2306,
519,
6... | 2.545898 | 1,024 |
import datetime as dt
import random
import functions.models as models
#import sys
def create_task(from_user, from_user_id,
owner, content, priority=3):
'''
Create a task with given params.
'''
models.database.connect()
db_task = models.Task.create(from_user = from_user,
from_user_id = from_user_id,
owner_slack_username = owner,
content = content,
create_time = dt.datetime.now(),
priority = int(priority),
status = 0) # 0 -> todo
models.database.close()
return db_task
def complete_task(task_id):
'''
Mark task as completed in the db.
'''
models.database.connect()
temp_task = models.Task.select().where(
models.Task.id == task_id).get()
temp_task.status = 2; # deleted
temp_task.save()
models.database.close()
def list_task(owner):
'''
list active tasks for the given owner in the
orcer of priority
owner: slack_username
'''
print("#list owner:" + owner)
models.database.connect()
try:
# here tasks is a SelectQuery
tasks = models.Task.select().where(
(models.Task.owner_slack_username == owner) &
(models.Task.status == 0)
).order_by(models.Task.priority)
return convert_select_query_to_list(tasks)
except:
print("Unexpected error:", sys.exc_info()[0])
finally:
models.database.close()
def convert_tasks_to_string(owner, tasks):
'''
Order the given list in priority order.
owner: the user's name
takss: list of tasks
return a ready-to-display string
'''
out = "*" + owner + "\'s list:" + "*"
num = 1
if tasks:
for task in tasks:
out = out + "\n" + "*" + str(num) + "*" + " - " + "_" + \
task_to_string(task) + "_" + ";\n"
num += 1
else:
# remove the last :
out = out[:-1] + " is empty! :thumbsup:"
return out
class Task:
'''
The task object; this is a representation on top of the
db task object (models.Task).
'''
class TodoList:
'''
A Todo list class; this class provides functions to compose the tasks
into a list.
'''
def add_task(self, task):
'''add a task to this todo list.
the task should be a string'''
# add to Task table
models.database.connect()
models.Task.create(from_user = task.from_user,
from_user_id = task.from_user_id,
owner_first_name = task.owner,
content = task.content,
create_time = task.time,
priority = task.priority,
status = 0)
models.database.close()
# insert based on priority
# the smaller the higher priority
if self.tasks:
for i in range(0, len(self.tasks)):
if self.tasks[i].priority > task.priority:
self.tasks.insert(i, task)
return
elif i == len(self.tasks) - 1:
self.tasks.append(task)
return
else:
self.tasks.append(task)
return
# heappush(self.tasks, (priority, task))
def remove_tasks(self, indices):
'''remove and return the removed tasks in a list'''
# sort indicies from highest to lowerest
indices = list(set(indices))
indices.sort(reverse=True)
removed = []
for index in map(int, indices):
if index >= 1 and index <= len(self.tasks):
removed.append(self.tasks.pop(index - 1))
self.remove_tasks_in_db(removed)
return removed
def remove_tasks_in_db(self, tasks):
'''
Remove the tasks.
The input are the todo.Task
'''
models.database.connect()
for task_id in task_ids:
temp_task = models.Task.select().where(Task.id == task_id)
temp_task.status = 2; # deleted
temp_task.save()
deleted.append(temp_task)
models.database.close()
return
| [
11748,
4818,
8079,
355,
288,
83,
198,
11748,
4738,
198,
11748,
5499,
13,
27530,
355,
4981,
198,
198,
2,
11748,
25064,
198,
198,
4299,
2251,
62,
35943,
7,
6738,
62,
7220,
11,
422,
62,
7220,
62,
312,
11,
220,
198,
220,
220,
220,
220... | 2.053131 | 2,108 |
"""
Code illustration: 9.02
Lock Demo
Tkinter GUI Application Development Blueprints
"""
import threading
if __name__ == "__main__":
for i in range(100):
LockDemo()
| [
37811,
198,
10669,
20936,
25,
860,
13,
2999,
198,
220,
220,
220,
13656,
34588,
198,
51,
74,
3849,
25757,
15678,
7712,
4518,
17190,
198,
37811,
198,
198,
11748,
4704,
278,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
12... | 2.787879 | 66 |
shiftLeftTable = [
1, 1, 2, 2, 2, 2, 2, 2,
1, 2, 2, 2, 2, 2, 2, 1
] | [
30846,
18819,
10962,
796,
685,
198,
220,
220,
220,
352,
11,
352,
11,
362,
11,
362,
11,
362,
11,
362,
11,
362,
11,
362,
11,
198,
220,
220,
220,
352,
11,
362,
11,
362,
11,
362,
11,
362,
11,
362,
11,
362,
11,
352,
198,
60
] | 1.630435 | 46 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import io
from xlwt import *
class BaseExcelOutput:
"""
基础单表数据导出
"""
def run(self, data: [dict, dict], format_data: dict) -> bytes:
"""
:param data: example——[
{'number': '123', 'status': 1, 'is_pay': False},
{'number': '124', 'status': 3, 'is_pay': True}
]
:param format_data: example——{
'title_data': [['单号', 5918], ['状态', 3000], ['是否结算', 3400]],
'format': {'status': ['', '待付款', '待发货', '已完成', '已撤单'], 'is_pay': ['否', '是']}
}
①title_data中“单号”、“状态”、“是否结算”和data中列表中字典的键按顺序一一对应;
②5918等数字表示当前列宽;
③4个中文字符取3400左右的值即可;
④format中“status”可以将格式为int的id转换成对应的中文选项,
本例的选项为((1, 待付款), (2, 待发货), (3, 已完成), (4, 已撤单))。“is_pay”可以将Boolean数据格式转换成是或否;
⑤title_data、format为固定格式,当format为空时设置成空字典“{}”即可;
:return: excel文件二进制流,可保存为.xls格式
"""
# 开始制表
ws = Workbook(encoding="utf-8")
style = XFStyle() # 全局初始化样式
title_style = XFStyle() # 标题样式
# 对齐样式
al = Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
style.alignment = al
title_style.alignment = al
# 加粗字体样式
font = Font()
font.name = '微软雅黑'
font.height = 20 * 12
font.bold = True
title_style.font = font
# 普通字体样式
font = Font()
font.name = '微软雅黑'
font.bold = False
style.font = font
w = ws.add_sheet('sheet1', cell_overwrite_ok=True)
self.excel_format(w, style, title_style, data, format_data)
bio = io.BytesIO()
ws.save(bio)
bio.seek(0)
return bio.getvalue()
@staticmethod
class DetailExcelOutput(BaseExcelOutput):
"""
订单+详情类数据的导出
data: [
{'number': '1', 'status': 1, 'is_pay': False, 'details': [{'名称': '门', '数量': 2}, {'名称': '窗', '数量': 4}]},
{'number': '2', 'status': 3, 'is_pay': True, 'details': [{'名称': '墙纸', '数量': 2}, {'名称': '龙头', '数量': 3}]}
]
format_data: {
'title_data': [['单号', 5918], ['状态', 3000], ['是否结算', 3400], ['名称', 4000], ['数量', 3000]],
'format': {'status': ['', '待付款', '待发货', '已完成', '已撤单'], 'is_pay': ['否', '是']}
}
title_data中“单号”、“状态”、“是否结算”和data列表中字典的键按顺序一一对应,
后面的“名称”、“数量”和details列表中字典的键按顺序一一对应。
"""
@staticmethod
if __name__ == '__main__':
with open('a.xls', 'wb') as fp:
a = DetailExcelOutput()
data_ = [
{'number': '1', 'status': 1, 'is_pay': False, 'details': [{'名称': '门', '数量': 2}, {'名称': '窗', '数量': 4}]},
{'number': '2', 'status': 3, 'is_pay': True, 'details': [{'名称': '墙纸', '数量': 2}, {'名称': '龙头', '数量': 3}]}
]
format_data_ = {
'title_data': [['单号', 5918], ['状态', 3000], ['是否结算', 3400], ['名称', 4000], ['数量', 3000]],
'format': {'status': ['', '待付款', '待发货', '已完成', '已撤单'], 'is_pay': ['否', '是']}
}
s = a.run(data=data_, format_data=format_data_)
fp.write(s)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
33245,
198,
6738,
2124,
75,
46569,
1330,
1635,
628,
198,
4871,
7308,
3109,
5276,
26410,
25,
198,
220,
220,
220,
372... | 1.348919 | 2,267 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
from termcolor import colored
# from apex import amp
PATIENT = 3
if __name__ == '__main__':
opt = opts().parse()
main(opt) | [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
4808,
15003,
62,
6978,
82,
198,
198,
11748,
28686,
198,
198,
11748,
28034,
19... | 3.368421 | 152 |
# -*- coding:utf-8 -*-#
import gradient_descend_optimization as gd
import eval.eval_by_effort as ebe
import eval.layout as lyt
import time as timestamp
import process_layout as pl
#import genetic_optimization as gen
import simulated_annealing_optimization as sa
#import neural_network_optimization as nn
import sys
# print "We have these search methods:"
# print " 1. gradient descend optimization"
# print " 2. simulated annealing optimization"
# print " 3. ..."
#search_method = raw_input("Please choose a search method:")
search_method = 2
switch = {
'1': "gd",
'2': "sa"
}
print run(50)
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
2,
198,
11748,
31312,
62,
20147,
437,
62,
40085,
1634,
355,
308,
67,
198,
11748,
5418,
13,
18206,
62,
1525,
62,
14822,
419,
355,
304,
1350,
198,
11748,
5418,
13,
39786,
355,
3... | 3.034483 | 203 |
URL_COMPANY_FILING_SEARCH = 'https://www.sec.gov/cgi-bin/browse-edgar?CIK=[cik]&action=getcompany'
URL_13F_QR_XML = 'https://www.sec.gov/Archives/edgar/data/[cik]/[acc_no]/form13fInfoTable.xml'
| [
21886,
62,
9858,
47,
31827,
62,
46700,
2751,
62,
5188,
31315,
796,
705,
5450,
1378,
2503,
13,
2363,
13,
9567,
14,
37157,
12,
8800,
14,
25367,
325,
12,
276,
4563,
30,
34,
18694,
41888,
979,
74,
60,
5,
2673,
28,
1136,
39722,
6,
198,... | 2.119565 | 92 |
# Copyright (c) 2017, 2019 Oracle and/or its affiliates. All rights reserved.
""" Build an rpm from oci-utils.
"""
import os
import subprocess
import sys
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils import log
sys.path.insert(0, os.path.abspath('lib'))
try:
from setuptools import setup, find_packages
except ImportError:
print("oci-utils needs setuptools in order to build. Install it using "
"your package manager (usually python-setuptools) or via pip (pip "
"install setuptools).")
sys.exit(1)
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
if not install_requirements:
print("Unable to read requirements from the requirements.txt file "
"That indicates this copy of the source code is incomplete.")
sys.exit(2)
def read(fname):
"""
Read a file.
Parameters
----------
fname : str
The full path of the file.
Returns
-------
The contents of the file.
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
class create_rpm(Command):
"""
Build an RPM of oci_utils.
Setuptools provides bdist_rpm command but it does not (as we speak) support
custom spec file
The run() method call sdist command to build the tarbal and then rpmbuild
against our own spec file
Attributes
----------
rpm_top_dir : str
Root directory for rpmbuild.
spec_file_path : str
Path for spec files.
"""
description = 'Build an RPM base on tarball generated by sdist command'
user_options = [('rpm-top-dir=', 'D', 'rpm to directory'), ('spec-file-path=', 'S', 'path to spec file')]
def finalize_options(self):
"""
No action.
Returns
-------
No return value.
"""
pass
def initialize_options(self):
"""
Initialisation.
Returns
-------
No return value.
"""
self.rpm_top_dir = None
self.spec_file_path = None
def run(self, do_cleanup=True):
"""
Run the actual sdist command and create the tarball under tarball_dir.
Returns
-------
No return value.
Raises
------
DistutilsExecError
On any error.
"""
log.info("runnig sdist command now...")
self.run_command('sdist')
log.info("tarball created, building the RPM now...")
_cwd = os.path.dirname(os.path.abspath(__file__))
log.info('current wd [%s]' % _cwd)
redefined_top_dir = os.path.join(_cwd, self.rpm_top_dir)
spec_file_abs_path = os.path.join(_cwd, self.spec_file_path)
v_opt = '--quiet'
if self.verbose:
v_opt = '-v'
if do_cleanup:
rpmbuild_cmd = ('/bin/rpmbuild',
v_opt,
'--define',
'_topdir %s' % redefined_top_dir,
'-ba',
spec_file_abs_path)
else:
rpmbuild_cmd = ('/bin/rpmbuild',
v_opt,
'--define',
'_topdir %s' % redefined_top_dir,
'--noclean',
'-ba',
spec_file_abs_path)
log.info('executing %s' % ' '.join(rpmbuild_cmd))
ec = subprocess.call(rpmbuild_cmd)
if ec != 0:
raise DistutilsExecError("rpmbuild execution failed")
setup(
name="oci-utils",
version="0.10.2",
author="Laszlo Peter, Qing Lin, Guido Tijskens, Emmanuel Jannetti",
author_email="laszlo.peter@oracle.com, qing.lin@oracle.com, guido.tijskens@oracle.com, emmanuel.jannetti@oracle.com",
description="Oracle Cloud Infrastructure utilities",
license="UPL",
install_requires=install_requirements,
keywords="Oracle Cloud Infrastructure",
url="http://github.com/oracle/oci-utils/",
package_dir={'': 'lib'},
packages=find_packages('lib'),
setup_requires=[],
long_description=read('README'),
data_files=[(os.path.join(sys.prefix, 'libexec'),
['libexec/ocid',
'libexec/secondary_vnic_all_configure.sh',
'libexec/oci-image-cleanup',
'libexec/oci-utils-config-helper',
'libexec/oci_vcn_iface.awk',
'libexec/oci-kvm-upgrade',
'libexec/oci-growfs',
'libexec/oci-kvm-config.sh',
'libexec/oci-kvm-network-script'
]),
("/etc/systemd/system",
['data/ocid.service', 'data/oci-kvm-config.service']),
("/etc/oci-utils",
['data/oci-image-cleanup.conf',
]),
("/etc/oci-utils.conf.d",
['data/00-oci-utils.conf',
'data/10-oci-kvm.conf',
]),
('/usr/lib/systemd/system-preset',
['data/91-oci-utils.preset', 'data/91-oci-kvm.preset']),
(os.path.join(sys.prefix, "share", "man", "man1"),
['man/man1/oci-public-ip.1',
'man/man1/oci-metadata.1',
'man/man1/oci-network-inspector.1',
'man/man1/oci-iscsi-config.1',
'man/man1/oci-network-config.1',
'man/man1/oci-kvm.1',
]),
(os.path.join(sys.prefix, "share", "man", "man5"),
['man/man5/oci-utils.conf.d.5',
]),
(os.path.join(sys.prefix, "share", "man", "man8"),
['man/man8/ocid.8',
'man/man8/oci-growfs.8',
'man/man8/oci-image-cleanup.8',
])],
scripts=['bin/oci-public-ip',
'bin/oci-metadata',
'bin/oci-iscsi-config',
'bin/oci-network-config',
'bin/oci-network-inspector',
'bin/oci-kvm',
],
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: Universal Permissive License (UPL)'],
cmdclass={'create_rpm': create_rpm, 'sync_rpm': sync_rpm})
| [
2,
15069,
357,
66,
8,
2177,
11,
13130,
18650,
290,
14,
273,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
198,
37811,
10934,
281,
37542,
422,
267,
979,
12,
26791,
13,
198,
37811,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
... | 1.988249 | 3,489 |
import nengo
import numpy as np
| [
11748,
299,
1516,
78,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 2.833333 | 12 |