content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
Se dă o listă de cuburi de latură l_i și culoare c_i.
Să se construiască un turn de înălțime maximă astfel încât
laturile cuburilor succesive sunt în ordine crescătoare și
culorile cuburilor alăturate sunt diferite.
Laturile _nu_ sunt distincte.
Să se găsească înălțimea maximă posibilă a unui turn și
numărul de moduri în care se poate obține acel turn.
Sortăm cuburile descrescător după latură.
Definim:
- H[i] = înălțimea maximă a unui turn care are la bază cubul i
- nr[i] = câte turnuri de înălțime H[i] se pot forma având la bază cubul
Inițializăm cuburile de latură minimă:
- înălțimea = înălțimea cubului
- numărul de turnuri = 1
Exemplu:
(9, 3) (8, 1) (8, 2) (7, 3) (7, 1) (6, 1) (5, 2) (3, 4) (3, 5)
38 29 29 21 15 14 8 3 3
4 2 2 2 2 2 2 1 1
Recurența este:
- nr[i] = sumă de nr[j] pentru j unde
H[i] = H[j] + latura cubului i,
și culoarea cubului i != culoarea cubului j
- H[i] = max(H[j] + latura lui i) pentru j unde
pot adăuga cubul i peste cubul j
"""
from typing import NamedTuple
cubes = []
with open('cuburi.txt') as fin:
n, _ = map(int, next(fin).split())
for _ in range(n):
line = next(fin)
length, color = map(int, line.split())
cubes.append(Cube(length, color))
cubes.sort()
max_heights = [cubes[i].length for i in range(n)]
max_counts = [1 for _ in range(n)]
preds = [-1 for _ in range(n)]
for i in range(n):
max_height = cubes[i].length
for j in range(i):
height = cubes[i].length + max_heights[j]
if cubes[i].color != cubes[j].color and cubes[i].length != cubes[j].length:
if height > max_height:
max_height = height
preds[i] = j
max_heights[i] = max_height
if max_height == cubes[i].length:
max_counts[i] = 1
else:
max_count = 0
for j in range(i):
if cubes[i].color != cubes[j].color and max_height == max_heights[j] + cubes[i].length:
max_count += max_counts[j]
max_counts[i] = max_count
max_height = 0
max_idx = -1
for idx, height in enumerate(max_heights):
if height > max_height:
max_height = height
max_idx = idx
current_idx = max_idx
print('Turn:')
while current_idx != -1:
print(cubes[current_idx])
current_idx = preds[current_idx]
print('Număr de turnuri:')
print(sum(max_counts[i] for i in range(n) if max_heights[i] == max_height))
| [
37811,
198,
4653,
288,
128,
225,
267,
1351,
128,
225,
390,
13617,
9900,
390,
300,
2541,
128,
225,
300,
62,
72,
220,
132,
247,
72,
10845,
78,
533,
269,
62,
72,
13,
198,
198,
50,
128,
225,
384,
1500,
622,
72,
3372,
128,
225,
555,
... | 1.874819 | 1,382 |
zahlen = []
with open('AdventOfCode_01_1_Input.txt') as f:
for zeile in f:
zahlen.append(int(zeile))
print(sum(zahlen)) | [
89,
993,
11925,
796,
17635,
198,
198,
4480,
1280,
10786,
2782,
1151,
5189,
10669,
62,
486,
62,
16,
62,
20560,
13,
14116,
11537,
355,
277,
25,
198,
220,
329,
41271,
576,
287,
277,
25,
198,
220,
220,
220,
1976,
993,
11925,
13,
33295,
... | 2.114754 | 61 |
import platform
__all__ = ()
if platform.system() == 'Linux':
from pathlib import Path
from constants import ROOT_DIR, VENV_DIR, SYSTEMD_DIR
from utils.config import asgi_framework
from . import run_subprocess
__all__ = ('create_systemd_unit', 'enable_systemd_unit', 'start_service',
'disable_systemd_unit', 'stop_service', 'restart_service',
'service_running')
| [
11748,
3859,
198,
198,
834,
439,
834,
796,
7499,
198,
198,
361,
3859,
13,
10057,
3419,
6624,
705,
19314,
10354,
198,
220,
220,
220,
422,
3108,
8019,
1330,
10644,
198,
220,
220,
220,
422,
38491,
1330,
15107,
2394,
62,
34720,
11,
569,
... | 2.59375 | 160 |
import requests
import inspect
import urllib
import pandas as pd
#####################################
### ###
### Define constant ###
### ###
#####################################
CONST_ENDPOINT = '206.189.149.240'
CONST_PORT = 4000
CONST_LIBRARY = 'HotDog'
def convert_dict_format(old_dict):
"""
Convert dictionary with key in underscore format to dot foramt.
And values to be quoted. Used for R param conversion
Args:
old_dict (dict): Old dictionary with underscore as key
Returns:
new_dict (dict): New dictionary with dot separated key and quoted values
Example:
old_dict = {'ref.date': '2020-01-10'}
new_dict = convert_dict_key(old_dict)
TODO:
1. Based on type of values, e.g. not quote bool
"""
new_keys = [k.replace('_', '.') for k in old_dict.keys()]
new_values = ["'{}'".format(str(v)) for v in old_dict.values()]
new_dict = dict(zip(new_keys, new_values))
return(new_dict)
def json_to_df(json):
""" json to dataframe with id column dropped """
try:
df = pd.read_json(json)
df.drop(columns=['id'], axis=1, inplace=True, errors='ignore') # drop id column if exists
# Convert datetime columns to date
# if 'date' in df.columns:
# df['date'] = df['date'].dt.date
except:
return(json) # Return error message from R
return(df)
@postit
def GetSignalPerformance(code, option_only=True):
"""
Get signal history performace
Args:
code (str): Stock code
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df (Dataframe):
Example:
GetSignalPerformance(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def LoadHitSignal(ref_date, option_only=True):
"""
Load signal hit history in database.
Return all or option only signal with wide or long format
Args:
ref_date (str): Date in YYYY-MM-DD format, e.g. 2018-01-01
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df.signal (Dataframe): Stock price dataframe with calculated signal in the input date only
Example:
LoadHitSignal(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def check_cronjob():
"""
Return the latest date of records in the cronjob tables
Args:
None
Returns:
df.res (Dataframe): Dataframe of latest date of cronjob tables
Example:
df.res = check_cronjob()
"""
func_name = inspect.stack()[0][3]
return(func_name)
| [
11748,
7007,
198,
11748,
10104,
198,
11748,
2956,
297,
571,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
29113,
4242,
2,
198,
21017,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.497317 | 1,118 |
# from . import algorithms
# from . import controller
# from . import environment
# from . import experiment
# from . import models
# from . import optimizers
# from . import plotting
# from . import utils
| [
2,
422,
764,
1330,
16113,
198,
2,
422,
764,
1330,
10444,
198,
2,
422,
764,
1330,
2858,
198,
2,
422,
764,
1330,
6306,
198,
2,
422,
764,
1330,
4981,
198,
2,
422,
764,
1330,
6436,
11341,
198,
2,
422,
764,
1330,
29353,
198,
2,
422,
... | 4.12 | 50 |
import numpy as np
import math
# ls = np.array([[-1,2,1], [0,-3,2], [1,1,-4]])
# plane = getPlane(ls)
# incident = np.array([1,0,0])
# print(getReflectionFromPlane(plane,incident))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
198,
2,
43979,
796,
45941,
13,
18747,
26933,
58,
12,
16,
11,
17,
11,
16,
4357,
685,
15,
12095,
18,
11,
17,
4357,
685,
16,
11,
16,
12095,
19,
11907,
8,
198,
2,
6614,
796,
651... | 2.22619 | 84 |
from django.contrib import admin
from .models import TriviaQuiz, TriviaQuestion
admin.site.register(TriviaQuiz)
admin.site.register(TriviaQuestion)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
7563,
8869,
4507,
528,
11,
7563,
8869,
24361,
198,
198,
28482,
13,
15654,
13,
30238,
7,
23854,
4507,
528,
8,
198,
28482,
13,
15654,
13,
30238,
7,
23854,
2436... | 3.386364 | 44 |
import unittest
from silence_exception import silence_exception, SilenceException
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
9550,
62,
1069,
4516,
1330,
9550,
62,
1069,
4516,
11,
26354,
16922,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
1... | 3.116279 | 43 |
import sys, getopt
import utils
import pandas as pd
import DatasetPaths
import yaml
KEY = 'Hospital'
COLUMNS_TO_KEEP = ['Hospital','km0_x','km0_y']
# Execute only if script run standalone (not imported)
if __name__ == '__main__':
df_samur = pd.read_csv(DatasetPaths.SAMUR)
df_hospitals = pd.read_csv(DatasetPaths.HOSPITALS)
df = merge_hospitals(df_samur, df_hospitals)
print(df.head())
df.to_csv(DatasetPaths.SAMUR_MERGED.format('hospitals'),index = False);
df = assign_ambulances(df,df_hospitals,utils.NUMBER_OF_AMBULANCES)
# Transform to dictionary and save to yaml
df_dict = [{0:{'available_amb':0,'name':'NaN','loc':{'district_code':0,'x':0.0,'y':0.0}}}]
for index,r in df.iterrows():
df_dict.append({index+1:{'available_amb':r.Ambulances,'name':r.Hospital,'loc':{'district_code':r.district_code,'x':r.hospital_x,'y':r.hospital_y}}})
yaml_file = open(DatasetPaths.HOSPITALS_YAML,"w+",encoding='utf8')
yaml.dump(df_dict,yaml_file,allow_unicode = True)
| [
11748,
25064,
11,
651,
8738,
198,
11748,
3384,
4487,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
16092,
292,
316,
15235,
82,
198,
11748,
331,
43695,
198,
198,
20373,
796,
705,
39,
3531,
6,
198,
25154,
5883,
8035,
62,
10468,
62,
... | 2.293706 | 429 |
# -*- coding: utf-8 -*-
"""
"""
import argparse
import os
import sys
from abc import abstractmethod, ABCMeta
from pathlib import Path
try:
from bpdb import set_trace
except ImportError:
from pdb import set_trace
class Book(object):
"""
Args:
"""
if __name__ == '__main__':
pass
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
450,
66,
1330,
12531,
24396,
11,
9738,
48526,
198,
6738,
3108,
8019,
1330,
106... | 2.655172 | 116 |
from testing_helpers import wrap
@wrap
| [
6738,
4856,
62,
16794,
364,
1330,
14441,
220,
198,
198,
31,
37150,
628,
628
] | 3.142857 | 14 |
import numpy
n = int(input())
a = numpy.array([input().split() for _ in range(n)], int)
b = numpy.array([input().split() for _ in range(n)], int)
print(numpy.dot(a, b)) | [
11748,
299,
32152,
198,
198,
77,
796,
493,
7,
15414,
28955,
198,
64,
796,
299,
32152,
13,
18747,
26933,
15414,
22446,
35312,
3419,
329,
4808,
287,
2837,
7,
77,
8,
4357,
493,
8,
198,
65,
796,
299,
32152,
13,
18747,
26933,
15414,
2244... | 2.522388 | 67 |
#!/usr/bin/env python
from bottle import route, run, static_file
import os
import rospy
@route('/freeboard/<filename:path>')
@route('/freeboard/')
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
9294,
1330,
6339,
11,
1057,
11,
9037,
62,
7753,
198,
11748,
28686,
198,
11748,
686,
2777,
88,
198,
198,
31,
38629,
10786,
14,
5787,
3526,
14,
27,
34345,
25,
6978,
29,
1153... | 2.652778 | 72 |
#!/usr/local/bin/python
import pdb
import sys,os,platform,matplotlib
#
# import matplotlib.animation as animation
# import matplotlib.pyplot as plt
import sys
import params
sys.path.append("..")
import scipy.io
import numpy as np
from scipy.integrate import odeint
from numpy import cos, sin, tan, arctan2, sqrt, sign, diag,arctan
from numpy.linalg import norm
current_path = os.path.dirname(os.path.abspath(__file__))
from PIL import Image
from math import pi
from scipy.optimize import newton_krylov, fsolve, anderson, broyden1, broyden2
car_colors = {'blue', 'gray', 'white', 'yellow', 'brown',
'white1','green', 'white_cross', 'cyan', 'red1', 'orange'}
#car_colors = {'blue', 'gray', 'black', 'white', 'yellow', 'brown', 'white1','green', 'white_cross', 'cyan', 'red1', 'orange', 'white2'}
car_figs = dict()
for color in car_colors:
car_figs[color] = current_path + '/imglib/cars/' + color + '_car.png'
class KinematicCar():
'''Kinematic car class
'''
def state_dot(self, state,time, acc,steer):
"""
This function defines the system dynamics
Inputs
acc: acceleration input
steer: steering input
"""
# if already at maximum speed, can't no longer accelerate
if state[2] >= self._vmax and acc>0:
v_dot = 0
elif state[2]<=0 and acc<-1e-3:
v_dot = -state[2]
else:
v_dot = np.clip(acc, self.acc_range[0], self.acc_range[1])
theta_dot = state[2] / self._length * tan(np.clip(steer, self.steer_range[0], self.steer_range[1]))
x_dot = state[2] * cos(state[3])
y_dot = state[2] * sin(state[3])
dstate = [x_dot, y_dot, v_dot, theta_dot ]
return dstate
def next(self, inputs, dt):
"""
next is a function that updates the current position of the car when inputs are applied for a duration of dt
Inputs:
inputs: acceleration and steering inputs
dt: integration time
Outputs:
None - the states of the car will get updated
"""
acc, steer = inputs
# take only the real part of the solution
if dt>0.1:
self.state = odeint(self.state_dot, self.state, t=(0, dt), args=(acc,steer))[1]
else:
self.state = self.state + np.array(self.state_dot(self.state,0,acc,steer))*dt
if self.segment==1:
self.wait_time += dt
def find_corner_coordinates(x_state_center_before, y_state_center_before, x_desired, y_desired, theta, square_fig):
"""
This function takes an image and an angle then computes
the coordinates of the corner (observe that vertical axis here is flipped).
If we'd like to put the point specfied by (x_state_center_before, y_state_center_before) at (x_desired, y_desired),
this function returns the coordinates of the lower left corner of the new image
"""
w, h = square_fig.size
theta = -theta
if abs(w - h) > 1:
print('Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur')
# warnings.warn("Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur")
R = np.array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
x_corner_center_before, y_corner_center_before = -w/2., -h/2. # lower left corner before rotation
x_corner_center_after, y_corner_center_after = -w/2., -h/2. # doesn't change since figure size remains unchanged
x_state_center_after, y_state_center_after = R.dot(np.array([[x_state_center_before], [y_state_center_before]])) # relative coordinates after rotation by theta
x_state_corner_after = x_state_center_after - x_corner_center_after
y_state_corner_after = y_state_center_after - y_corner_center_after
# x_corner_unknown + x_state_corner_after = x_desired
x_corner_unknown = int(x_desired - x_state_center_after + x_corner_center_after)
# y_corner_unknown + y_state_corner_after = y_desired
y_corner_unknown = int(y_desired - y_state_center_after + y_corner_center_after)
return x_corner_unknown, y_corner_unknown
offset = [-1.3,0.0]
# TESTING
# x0 = np.array([params.X1+1,0,0,pi/2-0.1])
# veh = KinematicCar(x0)
# veh_set = [veh]
# intersection_fig = current_path + '/imglib/intersection_stop1.png'
# intersection = Image.open(intersection_fig)
# background = Image.open(intersection_fig)
# fig = plt.figure()
# ax = fig.add_axes([0,0,1,1]) # get rid of white border
# plt.axis('off')
# ts = 0.05
# def animate(frame_idx,veh_set): # update animation by dt
# global background
# ax.clear()
# for veh in veh_set:
# u = turning_con(veh.state,'N','L',veh._length)
# veh.next(u,ts)
# draw_cars(veh_set, background)
# the_intersection = [ax.imshow(background, origin="lower")]
# background.close()
# background = Image.open(intersection_fig)
# return the_intersection
# ani = animation.FuncAnimation(fig, animate, fargs=(veh_set,),frames=int(5/ts), interval=ts*1000, blit=True, repeat=False)
# plt.show()
# pdb.set_trace()
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
198,
11748,
279,
9945,
198,
11748,
25064,
11,
418,
11,
24254,
11,
6759,
29487,
8019,
198,
2,
198,
2,
1330,
2603,
29487,
8019,
13,
11227,
341,
355,
11034,
198,
2,
1330,
2603,
29487,
80... | 2.446635 | 2,080 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Walson Tung'
'url handlers'
import re,time,json,logging,hashlib,base64,asyncio
import markdown2
from aiohttp import web
from coreweb import get,post
from apis import *
from models import User,Comment,Blog,next_id
from config import configs
COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret
def user2cookie(user,max_age):
'''
Generate cookie str by user.
:param user:
:param max_age:
:return:
'''
#build cookie string by:id-expires-sha1
expires = str(time.time() + max_age)
s = '%s-%s-%s-%s' % (user.id,user.passwd,expires,_COOKIE_KEY)
L = [user.id,expires,hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
async def cookie2user(cookie_str):
'''
Parse cookie and load user if cookie is valid.
:param cookie_str:
:return:
'''
if not cookie_str:
return None
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
uid,expires,sha1 = L
if float(expires) < time.time():
return None
user = await User.find(uid)
if user is None:
return None
s = '%s-%s-%s-%s' % (uid,user.passwd,expires,_COOKIE_KEY)
if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('invalid sha1')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None
@get('/')
@get('/blog/{id}')
@get('/register')
@get('/signin')
@post('/api/authenticate')
@get('/signout')
@get('/manage/blogs')
@get('/manage/blogs/create')
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_SHA1 = re.compile(r'^[0-9a-f]{40}$')
@post('/api/users')
@get('/api/blogs')
@get('/api/blogs/{id}')
@post('/api/blogs') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
834,
9800,
834,
796,
705,
54,
874,
261,
309,
2150,
6,
198,
198,
6,
6371,
32847,
6,
198,
198,
11748,
302,
... | 2.043011 | 930 |
from .fruit_fly_net import FruitFlyNet, bio_hash_loss | [
6738,
764,
34711,
62,
12254,
62,
3262,
1330,
22826,
33771,
7934,
11,
13401,
62,
17831,
62,
22462
] | 3.117647 | 17 |
#!/usr/bin/env python3
#
# iso3166-lookup.py 0.0.1
#
# Copyright 2020 Alan Orth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---
#
# Queries the ISO 3166 dataset for countries read from a text file. Text file
# should have one organization per line. Results are saved to a CSV including
# the country name, whether it matched or not, and the type of match.
#
# This script is written for Python 3.6+ and requires several modules that you
# can install with pip (I recommend using a Python virtual environment):
#
# $ pip install colorama pycountry requests requests-cache
#
import argparse
import csv
import signal
import sys
import pycountry
from colorama import Fore
# read countries from a text file, one per line
parser = argparse.ArgumentParser(
description="Query ISO 3166-1 to validate countries from a text file and save results in a CSV."
)
parser.add_argument(
"-d",
"--debug",
help="Print debug messages to standard error (stderr).",
action="store_true",
)
parser.add_argument(
"-i",
"--input-file",
help="File name containing countries to look up in ISO 3166-1 and ISO 3166-3.",
required=True,
type=argparse.FileType("r"),
)
parser.add_argument(
"-o",
"--output-file",
help="Name of output file to write results to (CSV).",
required=True,
type=argparse.FileType("w", encoding="UTF-8"),
)
args = parser.parse_args()
# set the signal handler for SIGINT (^C) so we can exit cleanly
signal.signal(signal.SIGINT, signal_handler)
# create empty lists to hold country names
country_names = []
country_official_names = []
country_common_names = []
# iterate over countries and append names to the appropriate lists. We can't use
# a list comprehension here because some countries don't have official_name, etc
# and they raise an AttributeError. Anyways, it's more efficient to iterate over
# the list of countries just once.
for country in pycountry.countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
# Add names for historic countries from ISO 3166-3
for country in pycountry.historic_countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
read_countries_from_file()
exit()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
47279,
18,
23055,
12,
5460,
929,
13,
9078,
657,
13,
15,
13,
16,
198,
2,
198,
2,
15069,
12131,
12246,
47664,
13,
198,
2,
198,
2,
770,
1430,
318,
1479,
3788,
25,
3... | 3.194857 | 1,011 |
"""Rotate a newick tree to put the leaf with a given label first."""
from argparse import ArgumentParser
import newick
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__)
parser.add_argument('newick_file')
parser.add_argument('label')
opts = parser.parse_args()
tree = newick.read(opts.newick_file)[0]
rotate(tree, opts.label)
print(newick.dumps(tree))
| [
37811,
24864,
378,
257,
649,
624,
5509,
284,
1234,
262,
12835,
351,
257,
1813,
6167,
717,
526,
15931,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
11748,
649,
624,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
... | 2.772414 | 145 |
# b is base
# n is exponent
print(power(5,7))
print()
print(power2(5,7))
| [
2,
275,
318,
2779,
198,
2,
299,
318,
28622,
198,
198,
4798,
7,
6477,
7,
20,
11,
22,
4008,
198,
4798,
3419,
198,
4798,
7,
6477,
17,
7,
20,
11,
22,
4008,
198
] | 2.242424 | 33 |
from pathlib import Path
from pkg_resources import resource_filename
"""
Paths to data files in this Python module.
"""
# The path to the data files.
DATA_DIRECTORY = Path(resource_filename(__name__, "data"))
# The path to object data.
OBJECT_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("objects")
# The path to object categories dictionary.
OBJECT_CATEGORIES_PATH = OBJECT_DATA_DIRECTORY.joinpath("categories.json")
# Data for the Magnebot torso's y values.
TORSO_Y = OBJECT_DATA_DIRECTORY.joinpath("torso_y.csv")
# The path to the scene data.
SCENE_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("scenes")
# The path to the dictionary of where the robot can spawn.
SPAWN_POSITIONS_PATH = SCENE_DATA_DIRECTORY.joinpath("spawn_positions.json")
# The directory for occupancy maps.
OCCUPANCY_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("occupancy_maps")
# The directory for room maps.
ROOM_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("room_maps")
# The path to the scene bounds data.
SCENE_BOUNDS_PATH = SCENE_DATA_DIRECTORY.joinpath("scene_bounds.json")
# The directory of Magnebot data.
MAGNEBOT_DIRECTORY = DATA_DIRECTORY.joinpath("magnebot")
# The path to the turn constants data.
TURN_CONSTANTS_PATH = MAGNEBOT_DIRECTORY.joinpath("turn_constants.csv")
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
279,
10025,
62,
37540,
1330,
8271,
62,
34345,
198,
198,
37811,
198,
15235,
82,
284,
1366,
3696,
287,
428,
11361,
8265,
13,
198,
37811,
198,
198,
2,
383,
3108,
284,
262,
1366,
3696,
13,
198,
... | 2.936768 | 427 |
# -*- coding:utf-8 -*-
# This file is adapted from the SinglePathOneShot library at
# https://github.com/megvii-model/SinglePathOneShot
# 2020.6.29-Changed for Modular-NAS search space.
# Huawei Technologies Co., Ltd. <linyunfeng5@huawei.com>
# Copyright 2020 Huawei Technologies Co., Ltd.
"""ShuffleNetV2 architectures."""
import torch
import torch.nn as nn
from modnas.registry.construct import register as register_constructor
from modnas.registry.construct import DefaultMixedOpConstructor, DefaultSlotTraversalConstructor
from modnas.registry.arch_space import build, register
from ..slot import register_slot_ccs
from .. import ops
from ..slot import Slot
kernel_sizes = [3, 5, 7, 9]
for k in kernel_sizes:
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnit(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHU{}'.format(k))
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnitXception(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHX{}'.format(k))
def channel_split(x, split):
"""Return data split in channel dimension."""
if x.size(1) == split * 2:
return torch.split(x, split, dim=1)
else:
raise ValueError('Failed to return data split in channel dimension.')
def shuffle_channels(x, groups=2):
"""Return data shuffled in channel dimension."""
batch_size, channels, height, width = x.size()
if channels % groups == 0:
channels_per_group = channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, channels, height, width)
return x
else:
raise ValueError('Failed to return data shuffled in channel dimension.')
class ShuffleUnit(nn.Module):
"""ShuffleNetV2 unit class."""
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleUnitXception(nn.Module):
"""ShuffleNetV2 Xception unit class."""
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2 class."""
def _get_stem(self, chn_in, chn, stride=2):
"""Return stem layers."""
if stride == 4:
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, 2, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1),
)
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, stride, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
)
def forward(self, x):
"""Return network output."""
x = self.features(x)
x = self.globalpool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
"""Initialize weights for all modules."""
first_conv = True
for m in self.modules():
if isinstance(m, nn.Conv2d):
if first_conv:
nn.init.normal_(m.weight, 0, 0.01)
first_conv = False
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@register_constructor
class ShuffleNetV2SearchConstructor(DefaultMixedOpConstructor):
"""ShuffleNetV2 mixed operator search space constructor."""
def convert(self, slot):
"""Convert slot to mixed operator."""
cands = self.candidates[:]
if self.add_identity_op and slot.stride == 1 and slot.chn_in == slot.chn_out:
self.candidates.append('IDT')
ent = super().convert(slot)
self.candidates = cands
return ent
@register_constructor
class ShuffleNetV2PredefinedConstructor(DefaultSlotTraversalConstructor):
"""ShuffleNetV2 original network constructor."""
def convert(self, slot):
"""Convert slot to module."""
return build('SHU3', slot)
@register
def shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model."""
cfgs = [
[16, 1, 2, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 1, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model."""
cfgs = [
[24, 1, 4, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
2,
770,
2393,
318,
16573,
422,
262,
14206,
15235,
3198,
28512,
5888,
379,
198,
2,
3740,
1378,
12567,
13,
785,
14,
28917,
85,
4178,
12,
19849,
14,
28008,
15235,
3198,
... | 2.016497 | 3,334 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-20 18:04
from __future__ import unicode_literals
from django.db import migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
21,
319,
2864,
12,
2998,
12,
1238,
1248,
25,
3023,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.690909 | 55 |
# -*- coding: utf-8 -*-
"""Test the capabilities of the ResourceManager.
"""
import gc
import logging
import re
import pytest
from pyvisa import InvalidSession, ResourceManager, VisaIOError, errors
from pyvisa.constants import AccessModes, InterfaceType, StatusCode
from pyvisa.highlevel import VisaLibraryBase
from pyvisa.rname import ResourceName
from pyvisa.testsuite import BaseTestCase
from . import RESOURCE_ADDRESSES, require_virtual_instr
@require_virtual_instr
class TestResourceManager:
"""Test the pyvisa ResourceManager."""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
if self.rm is not None:
self.rm.close()
del self.rm
gc.collect()
def test_lifecycle(self, caplog):
"""Test creation and closing of the resource manager."""
assert self.rm.session is not None
assert self.rm.visalib is not None
assert self.rm is self.rm.visalib.resource_manager
assert not self.rm.list_opened_resources()
assert self.rm.visalib is ResourceManager(self.rm.visalib).visalib
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
self.rm.close()
assert caplog.records
with pytest.raises(InvalidSession):
self.rm.session
assert self.rm.visalib.resource_manager is None
def test_cleanup_on_del(self, caplog):
"""Test that deleting the rm does clean the VISA session"""
# The test seems to assert what it should even though the coverage report
# seems wrong
rm = self.rm
self.rm = None
with caplog.at_level(logging.DEBUG, logger="pyvisa"):
del rm
gc.collect()
assert "Closing ResourceManager" in caplog.records[0].message
def test_resource_manager_unicity(self):
"""Test the resource manager is unique per backend as expected."""
new_rm = ResourceManager()
assert self.rm is new_rm
assert self.rm.session == new_rm.session
def test_str(self):
"""Test computing the string representation of the resource manager"""
assert re.match(r"Resource Manager of .*", str(self.rm))
self.rm.close()
assert re.match(r"Resource Manager of .*", str(self.rm))
def test_repr(self):
"""Test computing the repr of the resource manager"""
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
self.rm.close()
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
def test_last_status(self):
"""Test accessing the status of the last operation."""
assert self.rm.last_status == StatusCode.success
# Access the generic last status through the visalib
assert self.rm.last_status == self.rm.visalib.last_status
# Test accessing the status for an invalid session
with pytest.raises(errors.Error) as cm:
self.rm.visalib.get_last_status_in_session("_nonexisting_")
assert "The session" in cm.exconly()
def test_list_resource(self):
"""Test listing the available resources."""
# Default settings
resources = self.rm.list_resources()
for v in (v for v in RESOURCE_ADDRESSES.values() if v.endswith("INSTR")):
assert str(ResourceName.from_string(v)) in resources
# All resources
resources = self.rm.list_resources("?*")
for v in RESOURCE_ADDRESSES.values():
assert str(ResourceName.from_string(v)) in resources
def test_accessing_resource_infos(self):
"""Test accessing resource infos."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rinfo_ext = self.rm.resource_info(rname)
rinfo = self.rm.resource_info(rname, extended=False)
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
assert rinfo.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo.interface_board_number == int(rname.board)
def test_listing_resource_infos(self):
"""Test listing resource infos."""
infos = self.rm.list_resources_info()
for rname, rinfo_ext in infos.items():
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
def test_opening_resource(self):
"""Test opening and closing resources."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, timeout=1234)
# Check the resource is listed as opened and the attributes are right.
assert rsc in self.rm.list_opened_resources()
assert rsc.timeout == 1234
# Close the rm to check that we close all resources.
self.rm.close()
assert not self.rm.list_opened_resources()
with pytest.raises(InvalidSession):
rsc.session
def test_opening_resource_bad_open_timeout(self):
"""Test opening a resource with a non integer open_timeout."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError) as cm:
self.rm.open_resource(rname, open_timeout="")
assert "integer (or compatible type)" in str(cm.exconly())
def test_opening_resource_with_lock(self):
"""Test opening a locked resource"""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Timeout when accessing a locked resource
with pytest.raises(VisaIOError):
self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Success to access an unlocked resource.
rsc.unlock()
with self.rm.open_resource(
rname, access_mode=AccessModes.exclusive_lock
) as rsc2:
assert rsc.session != rsc2.session
assert len(self.rm.list_opened_resources()) == 2
def test_opening_resource_specific_class(self):
"""Test opening a resource requesting a specific class."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(TypeError):
self.rm.open_resource(rname, resource_pyclass=object)
assert len(self.rm.list_opened_resources()) == 0
def test_open_resource_unknown_resource_type(self, caplog):
"""Test opening a resource for which no registered class exist."""
rc = ResourceManager._resource_classes
old = rc.copy()
rc[(InterfaceType.unknown, "")] = FakeResource
del rc[(InterfaceType.tcpip, "INSTR")]
rm = ResourceManager()
try:
caplog.clear()
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
with pytest.raises(RuntimeError):
rm.open_resource("TCPIP::192.168.0.1::INSTR")
assert caplog.records
finally:
ResourceManager._resource_classes = old
def test_opening_resource_unknown_attribute(self):
"""Test opening a resource and attempting to set an unknown attr."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError):
self.rm.open_resource(rname, unknown_attribute=None)
assert len(self.rm.list_opened_resources()) == 0
def test_get_instrument(self):
"""Check that we get the expected deprecation warning."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.warns(FutureWarning):
self.rm.get_instrument(rname)
@require_virtual_instr
class TestResourceParsing(BaseTestCase):
"""Test parsing resources using the builtin mechanism and the VISA lib.
Those tests require that the interface exist (at least in Keysight
implementation) so we cannot test arbitrary interfaces (PXI for example).
"""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
del self.rm
gc.collect()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
14402,
262,
9889,
286,
262,
20857,
13511,
13,
198,
198,
37811,
198,
11748,
308,
66,
198,
11748,
18931,
198,
11748,
302,
198,
198,
11748,
12972,
9288,
198,
198,
67... | 2.467349 | 3,614 |
#!/usr/bin/env python
'''
This module contains helper functions to make plugins simpler to read and write,
centralising common functionality easy to reuse
'''
import os
import re
import cgi
import logging
from tornado.template import Template
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.exceptions import FrameworkAbortException, PluginAbortException
from framework.lib.general import *
from framework.utils import FileOperations
PLUGIN_OUTPUT = {"type": None, "output": None} # This will be json encoded and stored in db as string
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
1212,
8265,
4909,
31904,
5499,
284,
787,
20652,
18599,
284,
1100,
290,
3551,
11,
198,
31463,
1710,
2219,
11244,
2562,
284,
32349,
198,
7061,
6,
198,
198,
11748,
28686,
19... | 4.047945 | 146 |
#! /usr/bin/env python
# Run the default regression test for the PICMI version of the EB test
# using the same reference file as for the non-PICMI test since the two
# tests are otherwise the same.
import sys
sys.path.append('../../../../warpx/Regression/Checksum/')
import checksumAPI
my_check = checksumAPI.evaluate_checksum(
'ElectrostaticSphereEB', 'Python_ElectrostaticSphereEB_plt00001',
do_particles=False, atol=1e-12
)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
5660,
262,
4277,
20683,
1332,
329,
262,
350,
2149,
8895,
2196,
286,
262,
43374,
1332,
198,
2,
1262,
262,
976,
4941,
2393,
355,
329,
262,
1729,
12,
47,
2149,
8895,
1332,
1... | 2.966216 | 148 |
from project.elf import Elf | [
6738,
1628,
13,
7046,
1330,
19067
] | 4.5 | 6 |
from django.db import models
from djaveDT import now
from djavError.models.fixable import Fixable
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
1015,
24544,
1330,
783,
198,
6738,
42625,
615,
12331,
13,
27530,
13,
13049,
540,
1330,
13268,
540,
628
] | 3.535714 | 28 |
#!/usr/bin/env python
if __name__ == "__main__":
plot_stacked_clustered_bars()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
220,
220,
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
7110,
62,
301,
6021,
62,
565,
436,
1068,
62,
34046,
3419,
198
] | 2.2 | 40 |
from datetime import datetime, timedelta
import azure.batch.batch_auth as batchauth
import azure.batch._batch_service_client as batch
import uuid
import datetime
import time
# Batch account credentials
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_URL = ''
BATCH_ACCOUNT_KEY = ''
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage.
credentials = batchauth.SharedKeyCredentials(BATCH_ACCOUNT_NAME,
BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=BATCH_ACCOUNT_URL)
pool = batch_client.pool.get(
pool_id='testPool'
)
##ToDO: Create nodes prior to run.
poolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=1
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=poolResizeParam
)
job = batch.models.JobAddParameter(
id=str(uuid.uuid1()),
display_name='myBatchJob',
pool_info=batch.models.PoolInformation(
pool_id=pool.id
),
uses_task_dependencies = 'true'
)
job1 = batch_client.job.add(job)
task1 = batch.models.TaskAddParameter(
id='task1',
command_line='cmd /c echo "Hello From Batch" >task.txt'
)
dependentTasks = list()
dependentTasks.append(task1.id)
task2 = batch.models.TaskAddParameter(
id='task2',
command_line = 'cmd /c echo "this is task2 - should execute after task 1" >task2.txt',
depends_on = batch.models.TaskDependencies(task_ids=dependentTasks)
)
tasks = list()
tasks.append(task1)
tasks.append(task2)
batch_client.task.add_collection(
job_id=job.id,
value=tasks
)
# Perform action with the batch_client
jobs = batch_client.job.list()
for job in jobs:
print(job.id)
##Todo, watch tasks for completion and resize pool to zero
job_timeout = timedelta(minutes=30)
timeout_expiration = datetime.datetime.now() + job_timeout
while datetime.datetime.now() < timeout_expiration:
tasks = batch_client.task.list(job.id)
incomplete_tasks = [task for task in tasks if
task.state != batch.models.TaskState.completed]
if not incomplete_tasks:
time.sleep(600)
newpoolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=0
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=newpoolResizeParam
)
else:
time.sleep(1) | [
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
11748,
35560,
495,
13,
43501,
13,
43501,
62,
18439,
355,
15458,
18439,
198,
11748,
35560,
495,
13,
43501,
13557,
43501,
62,
15271,
62,
16366,
355,
15458,
220,
198,
11748,
3... | 2.463169 | 991 |
"""
Base class for objects that are backed by database documents.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.serial as etas
class Document(object):
"""Base class for objects that are associated with
:class:`fiftyone.core.dataset.Dataset` instances and are backed by
documents in database collections.
Args:
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which the
document belongs
"""
@property
def id(self):
"""The ID of the document, or ``None`` if it has not been added to the
database.
"""
return str(self._doc.id) if self._in_db else None
@property
def _id(self):
"""The ObjectId of the document, or ``None`` if it has not been added
to the database.
"""
return self._doc.id if self._in_db else None
@property
def ingest_time(self):
"""The time the document was added to the database, or ``None`` if it
has not been added to the database.
"""
return self._doc.ingest_time
@property
def in_dataset(self):
"""Whether the document has been added to a dataset."""
return self.dataset is not None
@property
def dataset(self):
"""The dataset to which this document belongs, or ``None`` if it has
not been added to a dataset.
"""
return self._dataset
@property
def field_names(self):
"""An ordered tuple of the names of the fields of this document."""
return self._doc.field_names
@property
def _in_db(self):
"""Whether the underlying :class:`fiftyone.core.odm.Document` has
been inserted into the database.
"""
return self._doc.in_db
@property
def _skip_iter_field_names(self):
"""A tuple of names of fields to skip when :meth:`iter_fields` is
called.
"""
return tuple()
def _get_field_names(self, include_private=False):
"""Returns an ordered tuple of field names of this document.
Args:
include_private (False): whether to include private fields
Returns:
a tuple of field names
"""
return self._doc._get_field_names(include_private=include_private)
def get_field(self, field_name):
"""Gets the value of a field of the document.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
return self._doc.get_field(field_name)
def set_field(self, field_name, value, create=True):
"""Sets the value of a field of the document.
Args:
field_name: the field name
value: the field value
create (True): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
self._doc.set_field(field_name, value, create=create)
def update_fields(self, fields_dict, create=True):
"""Sets the dictionary of fields on the document.
Args:
fields_dict: a dict mapping field names to values
create (True): whether to create fields if they do not exist
"""
for field_name, value in fields_dict.items():
self.set_field(field_name, value, create=create)
def clear_field(self, field_name):
"""Clears the value of a field of the document.
Args:
field_name: the name of the field to clear
Raises:
ValueError: if the field does not exist
"""
self._doc.clear_field(field_name)
def iter_fields(self):
"""Returns an iterator over the ``(name, value)`` pairs of the fields
of the document.
Private fields are omitted.
Returns:
an iterator that emits ``(name, value)`` tuples
"""
field_names = tuple(
f for f in self.field_names if f not in self._skip_iter_field_names
)
for field_name in field_names:
yield field_name, self.get_field(field_name)
def merge(self, document, overwrite=True):
"""Merges the fields of the document into this document.
``None``-valued fields are always omitted.
Args:
document: a :class:`Document` of the same type
overwrite (True): whether to overwrite existing fields. Note that
existing fields whose values are ``None`` are always
overwritten
"""
existing_field_names = self.field_names
for field_name, value in document.iter_fields():
if value is None:
continue
if (
not overwrite
and (field_name in existing_field_names)
and (self[field_name] is not None)
):
continue
self.set_field(field_name, value)
def copy(self):
"""Returns a deep copy of the document that has not been added to the
database.
Returns:
a :class:`Document`
"""
kwargs = {k: deepcopy(v) for k, v in self.iter_fields()}
return self.__class__(**kwargs)
def to_dict(self):
"""Serializes the document to a JSON dictionary.
Sample IDs and private fields are excluded in this representation.
Returns:
a JSON dict
"""
d = self._doc.to_dict(extended=True)
return {k: v for k, v in d.items() if not k.startswith("_")}
def to_json(self, pretty_print=False):
"""Serializes the document to a JSON string.
Sample IDs and private fields are excluded in this representation.
Args:
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
return etas.json_to_str(self.to_dict(), pretty_print=pretty_print)
def to_mongo_dict(self):
"""Serializes the document to a BSON dictionary equivalent to the
representation that would be stored in the database.
Returns:
a BSON dict
"""
return self._doc.to_dict(extended=False)
def save(self):
"""Saves the document to the database."""
self._doc.save()
def reload(self):
"""Reloads the document from the database."""
self._doc.reload()
def _delete(self):
"""Deletes the document from the database."""
self._doc.delete()
@classmethod
def from_dict(cls, d):
"""Loads the document from a JSON dictionary.
The returned document will not belong to a dataset.
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CLS.from_dict(d, extended=True)
return cls.from_doc(doc)
@classmethod
def from_json(cls, s):
"""Loads the document from a JSON string.
Args:
s: the JSON string
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CL.from_json(s)
return cls.from_doc(doc)
@classmethod
def _rename_field(cls, collection_name, field_name, new_field_name):
"""Renames any field values for in-memory document instances that
belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to rename
new_field_name: the new field name
"""
for document in cls._instances[collection_name].values():
data = document._doc._data
data[new_field_name] = data.pop(field_name, None)
@classmethod
def _purge_field(cls, collection_name, field_name):
"""Removes values for the given field from all in-memory document
instances that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to purge
"""
for document in cls._instances[collection_name].values():
document._doc._data.pop(field_name, None)
@classmethod
def _reload_docs(cls, collection_name):
"""Reloads the backing documents for all in-memory document instances
that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
"""
for document in cls._instances[collection_name].values():
document.reload()
def _set_backing_doc(self, doc, dataset=None):
"""Sets the backing doc for the document.
Args:
doc: a :class:`fiftyone.core.odm.SampleDocument`
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which
the document belongs, if any
"""
# Ensure the doc is saved to the database
if not doc.id:
doc.save()
self._doc = doc
# Save weak reference
dataset_instances = self._instances[doc.collection_name]
if self.id not in dataset_instances:
dataset_instances[self.id] = self
self._dataset = dataset
@classmethod
def _reset_backing_docs(cls, collection_name, doc_ids):
"""Resets the document(s) backing documents.
Args:
collection_name: the name of the MongoDB collection
doc_ids: a list of document IDs
"""
dataset_instances = cls._instances[collection_name]
for doc_id in doc_ids:
document = dataset_instances.pop(doc_id, None)
if document is not None:
document._reset_backing_doc()
@classmethod
def _reset_all_backing_docs(cls, collection_name):
"""Resets the backing documents for all documents in the collection.
Args:
collection_name: the name of the MongoDB collection
"""
if collection_name not in cls._instances:
return
dataset_instances = cls._instances.pop(collection_name)
for document in dataset_instances.values():
document._reset_backing_doc()
| [
37811,
198,
14881,
1398,
329,
5563,
326,
389,
9763,
416,
6831,
4963,
13,
198,
198,
91,
15069,
2177,
12,
42334,
11,
28035,
417,
4349,
11,
3457,
13,
198,
91,
4600,
85,
1140,
417,
4349,
13,
785,
1279,
5450,
1378,
85,
1140,
417,
4349,
... | 2.348662 | 4,523 |
from copy import deepcopy
import datetime as dt
from marshmallow.exceptions import ValidationError
import pytest
import pytz
from sfa_api.conftest import (VALID_OBS_JSON, VALID_FORECAST_JSON,
VALID_CDF_FORECAST_JSON, VALID_FORECAST_AGG_JSON,
VALID_AGG_JSON)
from sfa_api.utils.errors import StorageAuthError
from sfa_api.utils import validators
@pytest.mark.parametrize('thetime', [
'09:00', '9:00', '00:00'
])
@pytest.mark.parametrize('bad', [
'25:00', '00:00:00', 'ab:cd', '10:88'
])
@pytest.mark.parametrize('thestring', [
'mysite', 'Site 1', 'A really long but otherwise OK site',
"apostrophe '", 'site_99', 'site tucson, az',
"Test (site)", 'w,', 'test-hyphen'
])
@pytest.mark.parametrize('thestring', [
'<script>bac</script>', '<', ';delete',
'site:a:b', 'site+1', 'site\\G',
'site\n', '', ' ', "'", "' ", '_', ',',
',_', '()', "'()',", "(){ :|:& };"
])
@pytest.mark.parametrize('tz', [
'America/Phoenix',
'Etc/GMT+7'
])
@pytest.mark.parametrize('tz', ['PDT', 'Germany/Berlin'])
@pytest.mark.parametrize('time_', [
dt.datetime(2019, 1, 1, 12, 3, tzinfo=pytz.timezone('MST')),
dt.datetime(2019, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 17, 0, 1, tzinfo=pytz.timezone('MST')),
])
@pytest.mark.parametrize('time_', [
dt.datetime(2049, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 14, 0, 1, tzinfo=pytz.timezone('MST')),
])
@pytest.mark.parametrize("valid", [
None, "observation_uncertainty", "0.0",
] + list(range(0, 101, 10))
)
@pytest.mark.parametrize("invalid", [
"None", "bad string", "101", "-1.0"
])
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'event'},
{'variable': 'notevent', 'interval_label': 'notevent'},
])
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'notevent'},
{'variable': 'notevent', 'interval_label': 'event'},
])
# Create objects for testing report object pairs
VALID_CDF_SINGLE_JSON = deepcopy(VALID_CDF_FORECAST_JSON)
VALID_CDF_SINGLE_JSON.pop('constant_values')
VALID_CDF_SINGLE_JSON.update({
'axis': 'x',
'constant_value': '5.0'
})
VALID_FORECAST_AGG_JSON_60 = deepcopy(VALID_FORECAST_AGG_JSON)
VALID_FORECAST_AGG_JSON_60['interval_length'] = 60
VALID_AGG_JSON_WITH_ID = deepcopy(VALID_AGG_JSON)
VALID_AGG_JSON_WITH_ID.update({
'aggregate_id': VALID_FORECAST_AGG_JSON_60['aggregate_id'],
})
VALID_EVENT_FORECAST_JSON = deepcopy(VALID_FORECAST_JSON)
VALID_EVENT_FORECAST_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
VALID_EVENT_OBS_JSON = deepcopy(VALID_OBS_JSON)
VALID_EVENT_OBS_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
@pytest.fixture()
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad')])
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad'), ('axis', 'y'), ('constant_value', 13.2)])
@pytest.mark.parametrize('fx, forecast_type', [
(VALID_FORECAST_JSON, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, 'forecast'),
(VALID_CDF_FORECAST_JSON, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, 'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, 'event_forecast'),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None, 'event_forecast'),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
@pytest.fixture()
@pytest.mark.parametrize('fx,obs,agg,failure_mode', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'observation'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'reference_forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'aggregate'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'reference_forecast'),
])
@pytest.mark.parametrize("data", [13, 17, 52])
@pytest.mark.parametrize("data", [1, 3, 5, 15, 30, 60, 90])
| [
6738,
4866,
1330,
2769,
30073,
198,
11748,
4818,
8079,
355,
288,
83,
628,
198,
6738,
22397,
42725,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
11748,
12972,
9288,
198,
11748,
12972,
22877,
628,
198,
6738,
264,
13331,
62,
15042,
13,
... | 2.139656 | 3,258 |
from Tkinter import *
from simplex import Simplex
from textOperator import Operator
vertices = []
master = Tk()
master.title = "master"
Label(master, text="enter number of vertices").grid(row=0)
Label(master, text="enter complete connection without "+"'.'").grid(row=1)
Label(master, text="enter connection with "+"'.'").grid(row=2)
Label(master, text="enter connection for delete with "+"'.'").grid(row=4)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=4, column=1)
Button(master, text='Quit', command=master.quit).grid(row=9, column=0)
Button(master, text='Create', command=decision_mode).grid(row=9, column=1)
Button(master, text='Delete', command=delVertices).grid(row=10, column=1)
master.wm_title("Simplex Tree")
mainloop() | [
6738,
309,
74,
3849,
1330,
1635,
198,
6738,
2829,
87,
1330,
220,
3184,
11141,
198,
6738,
2420,
18843,
1352,
1330,
35946,
198,
198,
1851,
1063,
796,
17635,
628,
628,
198,
9866,
796,
309,
74,
3419,
198,
9866,
13,
7839,
796,
366,
9866,
... | 2.772727 | 308 |
from typing import Tuple, Any
from amino import _, Either, Map, Left, Right, do, Do
from amino.state import State
from ribosome.nvim.io.compute import NvimIO, NvimIOSuspend, NvimIOPure
from ribosome.nvim.io.api import N
from ribosome.nvim.api.function import nvim_call_function, nvim_call_tpe
from ribosome.nvim.api.command import nvim_command
from ribosome import NvimApi
@do(NvimIO[int])
@do(NvimIO[Any])
__all__ = ('plugin_name', 'api_info', 'channel_id', 'rpcrequest', 'rpcrequest_current', 'nvim_quit', 'nvim_api',
'nvim_pid',)
| [
6738,
19720,
1330,
309,
29291,
11,
4377,
198,
198,
6738,
23206,
1330,
4808,
11,
15467,
11,
9347,
11,
9578,
11,
6498,
11,
466,
11,
2141,
198,
6738,
23206,
13,
5219,
1330,
1812,
198,
198,
6738,
12183,
418,
462,
13,
77,
31124,
13,
952,... | 2.502242 | 223 |
"""
Add Two Numbers II: Leetcode 445
You are given two non-empty linked lists representing two non-negative integers.
The most significant digit comes first and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Follow up:
What if you cannot modify the input lists? In other words, reversing the lists is not allowed.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# 0(max(n+m)) time | 0(n+m) space
"""
Example:
Input: (7 -> 2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 8 -> 0 -> 7
input:
[7,2,4,3]
[5,6,4]
[9,8,7,6,6,7,8,9]
[9,8,7,6,6,7,8,9]
[1,2,3,4,5,5,6,9]
[1,2,3,4,5,5,6,9]
output:
[7,8,0,7]
[7,8,0,7]
[1,9,7,5,3,3,5,7,8]
[2,4,6,9,1,1,3,8]
[1,5]
"""
| [
37811,
198,
4550,
4930,
27797,
2873,
25,
1004,
316,
8189,
48655,
198,
198,
1639,
389,
1813,
734,
1729,
12,
28920,
6692,
8341,
10200,
734,
1729,
12,
31591,
37014,
13,
220,
198,
464,
749,
2383,
16839,
2058,
717,
290,
1123,
286,
511,
137... | 2.214452 | 429 |
import networkx as nx
import time as t
# Generating Watts_strogatz_graph using networkx.
# Four parameters:
# n (int) – The number of nodes
# k (int) – Each node is joined with its k nearest neighbors in a ring topology.
# p (float) – The probability of rewiring each edge
# seed (int, optional) – Seed for random number generator (default=None)
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 1000000,200"
print time2-time1 | [
11748,
3127,
87,
355,
299,
87,
201,
198,
11748,
640,
355,
256,
201,
198,
201,
198,
201,
198,
2,
2980,
803,
27555,
62,
301,
3828,
27906,
62,
34960,
1262,
3127,
87,
13,
220,
201,
198,
2,
6675,
10007,
25,
220,
201,
198,
2,
299,
357... | 2.156997 | 879 |
###############################
# Import Python modules
###############################
import sys,os
# NOTE: Uncomment these lines if you are putting the modules in the local directory
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pyserial.serial'))
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'crcmod'))
import serial
import crcmod.predefined
import time
####################
# DNP Command Setup
# NOTE: Use strings in hex where you can to be consistent with
# bytes.fromhex(str) for functions.
####################
src_address = 1023
dst_address = 1
SRC_ADDR = src_address.to_bytes(2,'little')
DST_ADDR = dst_address.to_bytes(2,'little')
DNP_HEADER = '0564'
####################
# Helper Functions DNP Commands
####################
# Generate DNP3 CRC
# Build Header Packet
# Build Object Packet
####################
# Full DNP Commands
####################
# Data Link Layer Control Codes
## Producer
DLLCC_P_ACK = '80'
DLLCC_P_NACK = '81'
DLLCC_P_LINK_STATUS = '8B'
DLLCC_P_NOT_SUPPORTED = '8F'
DLLCC_P_RESET_LINK_STATES = 'C0'
DLLCC_P_UNCONFIRMED_USER_DATA = 'C4'
DLLCC_P_REQUEST_LINK_STATUS = 'C9'
DLLCC_P_TEST_LINK_STATES = 'D2'
DLLCC_P_CONFIRMED_USER_DATA_D = 'D3'
DLLCC_P_CONFIRMED_USER_DATA_F = 'F3'
## Consumer
DLLCC_O_ACK = '00'
DLLCC_O_NACK = '01'
DLLCC_O_LINK_STATUS = 'OF'
DLLCC_O_NOT_SUPPORTED = '0F'
DLLCC_O_RESET_LINK_STATES = '40'
DLLCC_O_UNCONFIRMED_USER_DATA = '44'
DLLCC_O_REQUEST_LINK_STATUS = '49'
DLLCC_O_TEST_LINK_STATES = '52'
DLLCC_O_CONFIRMED_USER_DATA_D = '53'
DLLCC_O_CONFIRMED_USER_DATA_F = '73'
# Function Codes
FC_CONFIRM = '00'
FC_READ = '01'
FC_WRITE = '02'
FC_SELECT = '03'
FC_OPERATOR = '04'
FC_DIR_OPERATE = '05'
FC_DIR_OPERATE_NO_RESP = '06'
FC_FREEZE = '07'
FC_FREEZE_NO_RESP = '08'
FC_FREEZE_CLEAR = '09'
FC_FREEZE_CLEAR_NO_RESP = '0A'
FC_FREEZE_AT_TIME = '0B'
FC_FREEZE_AT_TIME_NO_RESP = '0C'
FC_COLD_RESTART = '0D'
FC_WARM_RESTART = '0E'
FC_INIT_DATA = '0F'
FC_INIT_APP = '10'
FC_START_APP = '11'
FC_STOP_APP = '12'
FC_SAVE_CONFIG = '13'
FC_ENABLE_UNSOL = '14'
FC_DISABLE_UNSOL = '15'
FC_ASSIGN_CLASS = '16'
FC_DELAY_MEASURE = '17'
FC_RECORD_TIME = '18'
FC_OPEN_FILE = '19'
FC_CLOSE_FILE = '1A'
FC_DELETE_FILE = '1B'
FC_FILE_INFO = '1C'
FC_AUTH_FILE = '1D'
FC_ABORT_FILE = '1E'
FC_ACTIVATE_CONFIG = '1F'
FC_AUTH_REQ = '20'
FC_AUTH_REQ_NO_ACK = '21'
FC_RESP = '81'
FC_UNSOL_RESP = '82'
FC_AUTH_RESP = '83'
TCAC_FIRST_FIN = 'C0C0'
# Broadcast Commands
COLD_RESTART_BROADCAST = '056408C4FFFFFFFF4451C0C00D9C86'
LINK_STATUS_BROADCAST = '056405C9FFFFFFFF46C9'
# Build commands
LINK_STATUS_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_REQUEST_LINK_STATUS)
RESET_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_RESET_LINK_STATES)
TEST_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_TEST_LINK_STATES)
UNCONFIRMED_USER_DATA = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_UNCONFIRMED_USER_DATA)
COLD_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_COLD_RESTART)
WARM_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_WARM_RESTART)
# Wrapper for sending broadcast messages
# s = open serial port
# cmd = string of hex bytes
# Wrapper for sending direct messages
# s = open serial port
# cmd = byte string built from build_dnp_header function
# cmd = byte string built from build_dnp_object
###############
# Setup Serial
###############
port = '/dev/ttyUSB0'
baudrate = 19200
timeout = 1
bytesize = 8
stopbits = serial.STOPBITS_ONE
serialPort = serial.Serial(port=port, baudrate=baudrate,
bytesize=bytesize, timeout=timeout, stopbits=stopbits)
response = b''
print('Starting DNP3 Stalker. Cntl-C to stop sending commands.\n')
while True:
try:
if len(sys.argv) < 2:
print(' Provide a command. Read the code.\n')
break
if sys.argv[1] == 'COLD_BROADCAST': send_broadcast(serialPort, COLD_RESTART_BROADCAST)
if sys.argv[1] == 'LINK_BROADCAST': send_broadcast(serialPort, LINK_STATUS_BROADCAST)
if sys.argv[1] == 'LINK_STAT': send_direct(serialPort, LINK_STATUS_DIRECT)
if sys.argv[1] == 'COLD_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=COLD_RESTART_OBJ)
if sys.argv[1] == 'WARM_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=WARM_RESTART_OBJ)
time.sleep(1)
# TODO: Remove old methods
'''
serialPort.write(bytes.fromhex(COLD_RESTART_BROADCAST))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
serialPort.write(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9'))
print("%s"%(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9').hex()))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
'''
except KeyboardInterrupt:
break
serialPort.close()
| [
14468,
7804,
4242,
21017,
198,
2,
17267,
11361,
13103,
198,
14468,
7804,
4242,
21017,
198,
11748,
25064,
11,
418,
198,
2,
24550,
25,
791,
23893,
777,
3951,
611,
345,
389,
5137,
262,
13103,
287,
262,
1957,
8619,
198,
2,
17597,
13,
6978... | 1.95784 | 2,870 |
from datetime import timedelta
from typing import Optional
from fastapi import HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from passlib.context import CryptContext
# defining algorithms
from Shared.functions.helperFunctions import get_now_with_tz
from Shared.functions.readSettingsFile import get_setting
_SECRET_KEY = None
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
CREDENTIALS_EXCEPTION = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
# get the secret key from a file if exists, otherwise generate one
async def get_secret_key():
"""Get the secret key used to create a jwt token"""
return get_setting("SECRET")
# define auth schemes
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/token")
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Make sure the hashed password is correct"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(plain_password: str) -> str:
"""Hash the password"""
return pwd_context.hash(plain_password)
async def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
"""Create a jwt token to authenticate with"""
to_encode = data.copy()
if expires_delta:
expire = get_now_with_tz() + expires_delta
else:
expire = get_now_with_tz() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, await get_secret_key(), algorithm=ALGORITHM)
return encoded_jwt
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
3049,
15042,
1330,
14626,
16922,
11,
3722,
198,
6738,
3049,
15042,
13,
12961,
1330,
440,
30515,
17,
35215,
3856,
11258,
198,
6738,
474,
577,
1330,
474,
... | 2.955326 | 582 |
#!/usr/bin/env python
from __future__ import with_statement
from contextlib import closing
from subprocess import call, Popen, PIPE
import os
from math import log,sqrt
import numpy as np
def main( ):
'''Write some help documentation here
'''
print "# leading comments can be given a '#' character"
my_dictionary = {}
old_err = i = 0
old_err2 = 0
while( 1 ):
directory_num = my_dictionary['dir_num'] = i
folder = (os.getcwd() + '/output_%(dir_num)03i/') % my_dictionary
# print folder
if( not os.path.exists(folder) ):
print 'did not find folder: %s' % folder
break
my_dictionary['curr_folder'] = folder
# we want to do:
# data = open('dogpack.data','w')
# print >> data, dogpack_data_template % { 'mx': mx_now, 'ts_method': ts_method}
# data.close()
# and we avoid the .close() (even in case of exception) with 'with':
directory_num = i
try:
qex = np.loadtxt(folder + "/q0000.dat")[1:]
qapp = np.loadtxt(folder + "/q0001.dat")[1:]
except IOError:
print('''Did not find the data file.
Please Wait for simulation to finish running.''')
break
qlength = len(qex)/5
m = sqrt(qlength)
dx = dy = 10.0/m
print 'm = %(mm)d' % {'mm':m}
qex = qex[:qlength] # only density for this error
qapp = qapp[:qlength] # only density
diff = qex - qapp
new_err = sum(abs(diff)) * dx * dy /100.0
new_err2 = max(abs(diff)) # / max(abs(qex))
r1 = 'L1-error = %(new).3e; ' % {'old': old_err, 'new' : new_err}
if( old_err > 0 and new_err > 0 ):
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err/new_err), 2) }
else:
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err, 'new' : new_err, 'rat' : (old_err/new_err) }
r2 = 'Linf-error = %(new).3e; ' % {'old': old_err2, 'new' : new_err2}
if( old_err2 > 0 and new_err2 > 0 ):
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err2/new_err2), 2) }
else:
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err2, 'new' : new_err2, 'rat' : (old_err2/new_err2) }
# This is exactly the format I want:
#{\normalsize $25$} & {\normalsize $1.747\times 10^{-4}$} & {\normalsize --} & {\normalsize $8.292\times 10^{-5}$} & {\normalsize --} \\
print result
print result2
old_err = new_err
old_err2 = new_err2
i = i + 1
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='''%%prog (-h |
%s''' % main.__doc__)
opts, args = parser.parse_args()
main( )
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
11593,
37443,
834,
1330,
351,
62,
26090,
198,
6738,
4732,
8019,
1330,
9605,
198,
6738,
850,
14681,
1330,
869,
11,
8099,
268,
11,
350,
4061,
36,
198,
11748,
28686,
198,
6738,
1068... | 1.985003 | 1,467 |
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test Shannon entropy calculations """
import unittest
import math
from sparktkregtests.lib import sparktk_test
if __name__ == '__main__':
unittest.main()
| [
2,
43907,
25,
900,
21004,
28,
40477,
12,
23,
198,
198,
2,
220,
15069,
1849,
7,
66,
8,
1849,
5304,
8180,
1849,
10606,
1819,
341,
1849,
198,
2,
198,
2,
220,
49962,
1849,
4625,
1849,
1169,
1849,
25189,
4891,
1849,
34156,
11,
1849,
14... | 2.563107 | 309 |
import base64
| [
11748,
2779,
2414,
628
] | 3.75 | 4 |
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
10662,
17721,
21282,
74,
7295,
13,
25927,
1330,
19390,
198
] | 2.576923 | 26 |
import subprocess
# e.g.
dl_osm_from_extents(-77,-78,45,46)
| [
11748,
850,
14681,
198,
198,
2,
304,
13,
70,
13,
198,
25404,
62,
418,
76,
62,
6738,
62,
2302,
658,
32590,
3324,
12095,
3695,
11,
2231,
11,
3510,
8,
198
] | 2.033333 | 30 |
clientes1 = lista_de_clientes(["joao", 'maria', 'jose'])
clientes2 = lista_de_clientes(["dani", 'tiago', 'luana'])
print(clientes1)
print(clientes2) | [
198,
198,
16366,
274,
16,
796,
1351,
64,
62,
2934,
62,
16366,
274,
7,
14692,
7639,
5488,
1600,
705,
3876,
544,
3256,
705,
73,
577,
6,
12962,
198,
16366,
274,
17,
796,
1351,
64,
62,
2934,
62,
16366,
274,
7,
14692,
67,
3216,
1600,
... | 2.220588 | 68 |
import pkg_resources
import ConfigParser
from configobj import ConfigObj, flatten_errors
import os
import validate
validator = validate.Validator()
import os.path
# from http://stackoverflow.com/questions/4028904
ank_user_dir = os.path.join(os.path.expanduser("~"), ".autonetkit")
#NOTE: this only gets loaded once package-wide if imported as import autonetkit.config
settings = load_config()
| [
11748,
279,
10025,
62,
37540,
198,
11748,
17056,
46677,
198,
6738,
4566,
26801,
1330,
17056,
49201,
11,
27172,
268,
62,
48277,
198,
11748,
28686,
198,
198,
11748,
26571,
198,
12102,
1352,
796,
26571,
13,
47139,
1352,
3419,
198,
198,
11748... | 3.336134 | 119 |
# Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
import random
import operator
import functools
import numpy as np
import pythia.modules.layers as layers
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
11748,
555,
715,
395,
198,
198,
11748,
28034,
198,
11748,
4738,
198,
11748,
10088,
198,
11748,
1257,
310,
10141,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
... | 3.5 | 54 |
# Example code from Aaron Hall StackOverflow response
# https://stackoverflow.com/questions/2627002/whats-the-pythonic-way-to-use-getters-and-setters/36943813#36943813
foo = Protective()
foo.protected_value = 35
print(foo.__dict__)
foo.protected_value = 200 # raises ValueError
del foo.protected_value # raises AttributeError
# Another example from Python Cookbook
| [
2,
17934,
2438,
422,
12139,
4789,
23881,
5886,
11125,
2882,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
2075,
1983,
21601,
14,
1929,
1381,
12,
1169,
12,
29412,
291,
12,
1014,
12,
1462,
12,
1904,
12,
1136,
101... | 3.189655 | 116 |
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from astropy.visualization import simple_norm
from scipy.integrate import simps
# Generate fake data
from scipy.stats.kde import gaussian_kde
from lightcurve import generate_lightcurve
###
### Plot data from instruments on graphs
###
elems=['SiII','MgII']
inst_names=['MIKE1','MIKE2','Xshooter']
data=[]
for i,elem in enumerate(elems):
fig = plt.figure(i)
ax = fig.add_subplot(1, 1, 1)
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/'+elem+'_'+inst_name+'.csv', delimiter=',', unpack=True)
data.append((x,y))
area = simps(y-1,x)
y=(y-1)/area
print(simps(y,x))
ax.plot(x,y, linewidth=1,label=inst_name)
ax.legend()
plt.xlabel("Wavelength")
plt.ylabel("Normalized flux")
plt.title(elem)
fig.savefig('figures/'+elem+'.png')
| [
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
6738,
6468,
28338,
13,
41464,
1634,
1330,
2829,
62,
27237,... | 2.205607 | 428 |
import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| [
11748,
299,
32152,
198,
77,
32152,
13,
25120,
13,
28826,
7,
1821,
8,
198,
926,
796,
299,
32152,
13,
5235,
6738,
14116,
10786,
40720,
40720,
7890,
14,
83,
726,
14,
2032,
747,
2487,
62,
8043,
13,
19608,
3256,
403,
8002,
28,
17821,
737... | 2.319358 | 1,121 |
from django.shortcuts import render
from django.views.generic import TemplateView
from .models import Slider
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
6738,
764,
27530,
1330,
3454,
1304,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
628,
62... | 2.209125 | 263 |
"""Test geomconv script."""
import os
import pytest
import geomconv
fixtures_dir = os.path.join('tests', 'fixtures')
@pytest.fixture
def chdir_fixtures(request):
"""Change the directory to the fixtures dir and back to the root directory
after finished."""
cwd = os.getcwd()
os.chdir(fixtures_dir)
request.addfinalizer(fin)
def test_main_single(capsys):
"""Supplying a single outfile should print out the correct geomconv."""
outfile = os.path.join(fixtures_dir, 'one.out')
expected_file = os.path.join(fixtures_dir, 'one_expected.geomconv')
geomconv.main([outfile], 'emptyrc')
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
@pytest.mark.parametrize('outfile', [
[],
['*.out']
])
def test_main_globbing(outfile, capsys, chdir_fixtures):
"""Supplying a glob pattern should also get the correct file."""
geomconv.main(outfile, 'emptyrc')
out, err = capsys.readouterr()
with open('one_expected.geomconv', 'r') as f:
expected = f.read()
assert out == expected
def test_side_view(capsys):
"""Supplying two outfile should print out the two outputs side-by-side."""
outfiles = [os.path.join(fixtures_dir, 'one.out'),
os.path.join(fixtures_dir, 'two.in')]
expected_file = os.path.join(fixtures_dir, 'side_view_expected.geomconv')
geomconv.main(outfiles)
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
| [
37811,
14402,
4903,
296,
42946,
4226,
526,
15931,
198,
11748,
28686,
198,
11748,
12972,
9288,
198,
11748,
4903,
296,
42946,
198,
198,
69,
25506,
62,
15908,
796,
28686,
13,
6978,
13,
22179,
10786,
41989,
3256,
705,
69,
25506,
11537,
628,
... | 2.557536 | 617 |
escreva('Olá, mundo!')
escreva('Eu sou o Lucas :)')
escreva('Estou aprendendo Python')
escreva('Com o Professor Guanabara')
escreva('No CursoEmVideo')
| [
198,
198,
3798,
260,
6862,
10786,
30098,
6557,
11,
27943,
78,
0,
11537,
198,
3798,
260,
6862,
10786,
36,
84,
24049,
267,
15257,
14373,
11537,
198,
3798,
260,
6862,
10786,
22362,
280,
2471,
10920,
31110,
11361,
11537,
198,
3798,
260,
686... | 2.508197 | 61 |
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
if get_backend() == "tf":
import tensorflow as tf
class SegmentTree(object):
"""
TensorFlow Segment tree for prioritized replay.
"""
def __init__(
self,
storage_variable,
capacity=1048
):
"""
Helper to represent a segment tree in pure TensorFlow.
Args:
storage_variable (tf.Variable): TensorFlow variable to use for storage.
capacity (int): Capacity of the segment tree.
"""
self.values = storage_variable
self.capacity = capacity
def insert(self, index, element, insert_op=None):
"""
Inserts an element into the segment tree by determining
its position in the tree.
Args:
index (int): Insertion index.
element (any): Element to insert.
insert_op (Union(tf.add, tf.minimum, tf, maximum)): Insert operation on the tree.
"""
insert_op = insert_op or tf.add
index += self.capacity
# Use a TensorArray to collect updates to the segment tree, then perform them all at once.
index_updates = tf.TensorArray(
dtype=tf.int32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
element_updates = tf.TensorArray(
dtype=tf.float32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
index_updates = index_updates.write(index=0, value=index)
element_updates = element_updates.write(index=0, value=element)
# Search and update values while index >=1
loop_update_index = tf.div(x=index, y=2)
# Return the TensorArrays containing the updates.
loop_update_index, index_updates, element_updates, _ = tf.while_loop(
cond=cond,
body=insert_body,
loop_vars=[loop_update_index, index_updates, element_updates, 1],
parallel_iterations=1,
back_prop=False
)
indices = index_updates.stack()
updates = element_updates.stack()
assignment = tf.scatter_update(ref=self.values, indices=indices, updates=updates)
with tf.control_dependencies(control_inputs=[assignment]):
return tf.no_op()
def get(self, index):
"""
Reads an item from the segment tree.
Args:
index (int):
Returns: The element.
"""
return self.values[self.capacity + index]
def index_of_prefixsum(self, prefix_sum):
"""
Identifies the highest index which satisfies the condition that the sum
over all elements from 0 till the index is <= prefix_sum.
Args:
prefix_sum .float): Upper bound on prefix we are allowed to select.
Returns:
int: Index/indices satisfying prefix sum condition.
"""
assert_ops = list()
# 0 <= prefix_sum <= sum(priorities)
priority_sum = tf.reduce_sum(input_tensor=self.values, axis=0)
# priority_sum_tensor = tf.fill(dims=tf.shape(prefix_sum), value=priority_sum)
assert_ops.append(tf.Assert(
condition=tf.less_equal(x=prefix_sum, y=priority_sum),
data=[prefix_sum]
))
# Vectorized loop -> initialize all indices matching elements in prefix-sum,
index = 1
with tf.control_dependencies(control_inputs=assert_ops):
index, _ = tf.while_loop(cond=cond, body=search_body, loop_vars=[index, prefix_sum])
return index - self.capacity
def reduce(self, start, limit, reduce_op=None):
"""
Applies an operation to specified segment.
Args:
start (int): Start index to apply reduction to.
limit (end): End index to apply reduction to.
reduce_op (Union(tf.add, tf.minimum, tf.maximum)): Reduce op to apply.
Returns:
Number: Result of reduce operation
"""
reduce_op = reduce_op or tf.add
# Init result with neutral element of reduce op.
# Note that all of these are commutative reduce ops.
if reduce_op == tf.add:
result = 0.0
elif reduce_op == tf.minimum:
result = float('inf')
elif reduce_op == tf.maximum:
result = float('-inf')
else:
raise ValueError("Unsupported reduce OP. Support ops are [tf.add, tf.minimum, tf.maximum]")
start += self.capacity
limit += self.capacity
_, _, result = tf.while_loop(cond=cond, body=reduce_body, loop_vars=(start, limit, result))
return result
def get_min_value(self):
"""
Returns min value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.minimum)
def get_sum(self):
"""
Returns sum value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.add)
| [
2,
15069,
2864,
14,
23344,
383,
45715,
34960,
7035,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.413017 | 2,443 |
# Copyright 2021 QHAna plugin runner contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from flask.app import Flask
from qhana_plugin_runner.api.util import SecurityBlueprint
from qhana_plugin_runner.util.plugins import QHAnaPluginBase, plugin_identifier
_plugin_name = "costume-loader"
__version__ = "v0.1.0"
_identifier = plugin_identifier(_plugin_name, __version__)
COSTUME_LOADER_BLP = SecurityBlueprint(
_identifier, # blueprint name
__name__, # module import name!
description="Costume loader API.",
template_folder="costume_loader_templates",
)
try:
# It is important to import the routes **after** COSTUME_LOADER_BLP and CostumeLoader are defined, because they are
# accessed as soon as the routes are imported.
import plugins.costume_loader_pkg.routes
except ImportError:
# When running `poetry run flask install`, importing the routes will fail, because the dependencies are not
# installed yet.
pass
| [
2,
15069,
33448,
1195,
39,
2025,
64,
13877,
17490,
20420,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
... | 3.427918 | 437 |
from django.db import models
# Create your models here.
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
201,
198,
201,
198,
201,
198,
2,
13610,
534,
4981,
994,
13,
201,
198
] | 2.952381 | 21 |
from random import choice, randint
words = []
names = []
with open("EnglishWords.txt", "r") as f:
line = f.readline().strip()
while line:
words.append(line)
line = f.readline().strip()
with open("names.txt", "r") as f:
line = f.readline().strip()
while line:
names.append(line)
line = f.readline().strip()
with open("sql_insert.txt", "w") as f:
for i in range(0, 100):
username = ""
password = ""
email = ""
first_name = ""
last_name = ""
username = choice(words) + choice(words)
if randint(0, 1):
username = username[0].upper() + username[1:]
if randint(0, 1):
username += str(randint(0, 100))
pw1 = choice(words)
if randint(0, 1):
pw1 = pw1.upper()
pw2 = choice(words)
if randint(0, 1):
pw2 = pw2.upper()
pw3 = choice(words)
if randint(0, 1):
pw3 = pw3.upper()
password = pw1 + pw2 + pw3 + str(randint(0, 1000))
first_name = choice(names)
last_name = choice(names)
email = first_name.lower() + "." + last_name.lower() + "@student.manchester.ac.uk"
sql = f"INSERT INTO USERS (username, password, email, first_name, last_name) VALUES ('{username}', '{password}', '{email}', '{first_name}', '{last_name}');"
f.write(sql) | [
6738,
4738,
1330,
3572,
11,
43720,
600,
198,
198,
10879,
796,
17635,
198,
14933,
796,
17635,
198,
198,
4480,
1280,
7203,
15823,
37117,
13,
14116,
1600,
366,
81,
4943,
355,
277,
25,
198,
220,
220,
220,
1627,
796,
277,
13,
961,
1370,
... | 2.067947 | 677 |
import os
import discord
import requests
| [
11748,
28686,
220,
198,
11748,
36446,
198,
11748,
7007,
198
] | 4.2 | 10 |
# Copyright (c) 2020 Hugh Wade
# SPDX-License-Identifier: MIT
import slotserver.slot_repository as sr
MAX_ID_LEN = 1024
MAX_DATA_LEN = 1024 * 8
MAX_BATCH_SLOTS = 10
MAX_BATCH_SUBSLOTS = 10
class SlotOverflowException(Exception):
'''
Raised when something is bigger than allowed
'''
pass
class SlotUnderflowException(Exception):
'''
Raised when something is smaller than allowed
'''
pass
class SlotConsumerService():
'''
Read only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
def get_slotdata(self, slot_ids: object, subslot_ids: object) -> object:
'''
Get data for a set of slot/subslots.
Returned as a Dictionary of Dictionaries: data[slot_id][subslot_id]
'''
if(len(slot_ids) > MAX_BATCH_SLOTS or
len(subslot_ids) > MAX_BATCH_SUBSLOTS):
raise SlotOverflowException()
if(len(slot_ids) == 0 or
len(subslot_ids) == 0):
raise SlotUnderflowException()
results = {}
for slot_id in slot_ids:
results[slot_id] = {}
for subslot_id in subslot_ids:
results[slot_id][subslot_id] = \
self.repo.get(slot_id, subslot_id, False)
return results
class SlotProducerService():
'''
Write only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
| [
2,
15069,
357,
66,
8,
12131,
25464,
19136,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
11748,
17314,
18497,
13,
43384,
62,
260,
1930,
37765,
355,
19677,
198,
198,
22921,
62,
2389,
62,
43,
1677,
796,
28119,
198,
22... | 2.37055 | 618 |
from __future__ import annotations
import logging
from typing import Any, Dict, Sequence, Union
import sentry_sdk
from asphalt.core import Context, resolve_reference
from sentry_sdk.integrations import Integration
from typeguard import check_argument_types
from asphalt.exceptions.api import ExceptionReporter
logger = logging.getLogger(__name__)
class SentryExceptionReporter(ExceptionReporter):
"""
Reports exceptions using the Sentry_ service.
To use this backend, install asphalt-exceptions with the ``sentry`` extra.
All keyword arguments are directly passed to :func:`sentry_sdk.init`.
The following defaults are set for the client arguments:
* environment: "development" or "production", depending on the ``__debug__`` flag
Integrations can be added via the ``integrations`` option which is a list where each item is
either an object that implements the :class:`sentry_sdk.integrations.Integration` interface,
or a dictionary where the ``type`` key is a module:varname reference to a class implementing
the aforementioned interface. The ``args`` key, when present, should be a sequence that is
passed to the integration as positional arguments, while the ``kwargs`` key, when present,
should be a mapping of keyword arguments to their values.
The extras passed to this backend are passed to :func:`sentry_sdk.capture_exception` as keyword
arguments.
For more information, see the `Sentry SDK documentation`_.
.. _Sentry: https://sentry.io/
.. _Sentry SDK documentation: https://docs.sentry.io/platforms/python/
"""
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
45835,
11,
4479,
198,
198,
11748,
1908,
563,
62,
21282,
74,
198,
6738,
48292,
13,
7295,
1330,
30532,
11,
10568,
62,
35790,
1... | 3.62754 | 443 |
#!/usr/bin/env python3
# encoding: utf-8
"""
@author: Medivh Xu
@file: send_email.py
@time: 2020-03-04 21:27
"""
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
37811,
198,
31,
9800,
25,
2019,
452,
71,
33591,
198,
31,
7753,
25,
3758,
62,
12888,
13,
9078,
198,
31,
2435,
25,
12131,
12,
3070,
12,
302... | 2.589474 | 95 |
import numpy as np
import numpy.linalg as la
from collections import deque
from .LayerReservoir import LayerReservoir
"""
Notes (from scholarpedia):
-The SPECTRAL RADIUS of the reservoir weights codetermines:
(1): (?)
(2): amount of nonlinear interaction of input components through time
(larger spectral radius ==> longer-range interactions)
-INPUT SCALING codetermines the degree of nonlinearity of the reservoir dynamics. Examples:
(1): very small input amplitudes ==> reservoir behaves almost like linear medium.
(2): very large input amplitudes ==> drives the reservoir neurons to the saturation of the
sigmoid, and a binary switching dynamic results.
-OUTPUT FEEDBACK SCALING determines the extent to which the trained ESN has an autonomous
generation component.
(1): no output feedback: ESN unable to generate predictions for future time steps.
(2): nonzero output feedbacl: danger of dynamical instability.
-CONNECTIVITY/SPARSITY of reservoir weight matrix:
(1) todo
"""
class LayerEsnReservoir(LayerReservoir):
"""
(args):
input_size : input signal is input_size dimensions.
num_units : reservoir has num_units units.
idx : unique ID of the reservoir (default=None) -- good for debug/multiple reservoirs
echo_param : leaky rate of the reservoir units
activation : activation function of the reservoir units (default=tanh)
debug : when True, this will print live information (default=False)
(description): reservoir class. Extend this class to create different reservoirs
"""
def info(self):
"""
(args): None
(description):
Print live info about the reservoir
"""
out = u'Reservoir(num_units=%d, input_size=%d, output_size=%d, \u03B5=%.2f)\n' % (self.num_units, self.input_size, self.output_size, self.echo_param)
out += 'W_res - spec_scale: %.2f, %s init\n' % (self.spectral_scale, self.W_res_init_strategy)
out += 'W_in - scale: %.2f, %s init' % (self.input_weights_scale, self.W_in_init_strategy)
return out
def forward(self, x):
"""
Forward propagate input signal u(n) (at time n) through reservoir.
x: input_size-dimensional input vector
"""
super(LayerEsnReservoir, self).forward(x)
assert self.ins_init, "Res. input weights not yet initialized (ID=%d)." % self.idx
assert self.res_init, "Res. recurrent weights not yet initialized (ID=%d)." % self.idx
# prob =
# dropped = (np.random.rand(*np.shape(self.W_res)) < prob).astype(float)
# mask_n = (np.random.rand(self.num_units,1) > self.drop_probability).astype(float)
# print("V", np.repeat(mask_n, self.num_units, axis=1))
# print("H", np.repeat(mask_n.T, self.num_units, axis=0))
# mask_v = np.repeat(mask_n, self.num_units, axis=1)
# dropped = mask_v * mask_v.T
in_to_res = np.dot(self.W_in, x).squeeze()
self.prev_in_to_res = np.copy(in_to_res)
res_to_res = np.dot(self.state.reshape(1, -1), self.W_res)
self.prev_res_to_res = np.copy(res_to_res)
# Equation (1) in "Formalism and Theory" of Scholarpedia page
self.prev_state = np.copy(self.state)
self.state = (1. - self.echo_param) * self.state + self.echo_param * self.activation(in_to_res + res_to_res)
# self.signals.append(self.state[:self.num_to_store].tolist())
#if self.output_size == self.num_units:
output = self.state.squeeze()
#else:
# return the reservoir state appended to the input
#output = np.hstack((self.state.squeeze(), x))
return output
| [
11748,
299,
32152,
355,
45941,
198,
11748,
299,
32152,
13,
75,
1292,
70,
355,
8591,
198,
198,
6738,
17268,
1330,
390,
4188,
198,
198,
6738,
764,
49925,
4965,
712,
10840,
1330,
34398,
4965,
712,
10840,
198,
198,
37811,
198,
16130,
357,
... | 2.447284 | 1,565 |
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pickle
import re
word2int_filepath = "./train data/word2int.p"
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
1330,
374,
20471,
198,
11748,
2298,
293,
198,
11748,
302,
198,
198,
4775,
17,
600,
62,
7753,
6978,
796,
366,
19571,
... | 2.921569 | 51 |
import os
import subprocess
from lsst.pipe.tasks.ingest import IngestTask
from lsst.utils import getPackageDir
from lsst.meas.algorithms import IngestIndexedReferenceTask
# from lsst.pipe.drivers.constructCalibs import BiasTask, FlatTask
from huntsman.drp.utils import date_to_ymd
def ingest_raw_data(filename_list, butler_directory, mode="link", ignore_ingested=False):
"""
"""
# Create the ingest task
task = IngestTask()
task = task.prepareTask(root=butler_directory, mode=mode, ignoreIngested=ignore_ingested)
# Ingest the files
task.ingestFiles(filename_list)
def ingest_reference_catalogue(butler_directory, filenames, output_directory=None):
"""
"""
if output_directory is None:
output_directory = butler_directory
# Load the config file
pkgdir = getPackageDir("obs_huntsman")
config_file = os.path.join(pkgdir, "config", "ingestSkyMapperReference.py")
config = IngestIndexedReferenceTask.ConfigClass()
config.load(config_file)
# Convert the files into the correct format and place them into the repository
args = [butler_directory,
"--configfile", config_file,
"--output", output_directory,
"--clobber-config",
*filenames]
IngestIndexedReferenceTask.parseAndRun(args=args)
def ingest_master_biases(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master bias of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/bias/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestBiases.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def ingest_master_flats(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master flat of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/flat/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestFlats.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def constructBias(calib_date, exptime, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructBias.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='bias'"
cmd += f" expTime={exptime}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId expTime={exptime} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def constructFlat(calib_date, filter_name, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructFlat.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='flat'"
cmd += f" filter={filter_name}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId filter={filter_name} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def processCcd(butler_directory, calib_directory, rerun, filter_name, dataType='science'):
"""Process ingested exposures."""
cmd = f"processCcd.py {butler_directory} --rerun {rerun}"
cmd += f" --id dataType={dataType} filter={filter_name}"
cmd += f" --calib {calib_directory}"
subprocess.check_output(cmd, shell=True)
def makeDiscreteSkyMap(butler_directory='DATA', rerun='processCcdOutputs:coadd'):
"""Create a sky map that covers processed exposures."""
cmd = f"makeDiscreteSkyMap.py {butler_directory} --id --rerun {rerun} "
cmd += f"--config skyMap.projection='TAN'"
subprocess.check_output(cmd, shell=True)
def makeCoaddTempExp(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Warp exposures onto sky map."""
cmd = f"makeCoaddTempExp.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2 "
cmd += f"--config doApplyUberCal=False"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
def assembleCoadd(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Assemble the warped exposures into a coadd"""
cmd = f"assembleCoadd.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
| [
11748,
28686,
198,
11748,
850,
14681,
198,
198,
6738,
43979,
301,
13,
34360,
13,
83,
6791,
13,
278,
395,
1330,
554,
3495,
25714,
198,
6738,
43979,
301,
13,
26791,
1330,
651,
27813,
35277,
198,
198,
6738,
43979,
301,
13,
1326,
292,
13,... | 2.50713 | 2,244 |
#!/usr/bin/python3
import discord
import json
import requests
import math
client = discord.Client()
sessions = []
session_id = 0
API_HOST = "99.189.77.224"
API_PORT = "8000"
with open("discord_token", "r") as f:
token = f.read()
f.close
turns = {'O': Player.O, 'E': Player.E, 'X': Player.X}
reverse_turns = {value: key for (key, value) in turns.items()}
# convert string to Player
# convert Player to string
# format http uri with host and port
# make json get request
# make an api request to tcubed api and return best move given board and player
# ttt main function (process args etc)
@client.event
@client.event
client.run(token)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
36446,
198,
11748,
33918,
198,
11748,
7007,
198,
11748,
10688,
198,
198,
16366,
796,
36446,
13,
11792,
3419,
198,
198,
82,
6202,
796,
17635,
198,
29891,
62,
312,
796,
657,
198,
198... | 2.848739 | 238 |
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
"tests/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
# not real python files
"tests/**/repl_*.py",
# needs careful attention before applying automatic formatting
"tests/basics/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = ((re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),)
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
262,
4527,
37906,
1628,
11,
2638,
1378,
9383,
1773,
7535,
13,
2398,
14,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,... | 2.778539 | 876 |
"""Proxmox plugin for Let's Encrypt client"""
import os
import subprocess
import logging
import zope.component
import zope.interface
from letsencrypt import interfaces
from letsencrypt.plugins import common
from letsencrypt import errors
from shutil import copyfile
logger = logging.getLogger(__name__)
| [
37811,
2964,
87,
76,
1140,
13877,
329,
3914,
338,
14711,
6012,
5456,
37811,
198,
11748,
28686,
198,
11748,
850,
14681,
198,
11748,
18931,
198,
11748,
1976,
3008,
13,
42895,
198,
11748,
1976,
3008,
13,
39994,
198,
6738,
8781,
12685,
6012,
... | 3.719512 | 82 |
import base64
import json
from decimal import Decimal
from .cast_handlers import bool_cast_handler
| [
11748,
2779,
2414,
198,
11748,
33918,
198,
6738,
32465,
1330,
4280,
4402,
198,
198,
6738,
764,
2701,
62,
4993,
8116,
1330,
20512,
62,
2701,
62,
30281,
628,
628
] | 3.678571 | 28 |
#! /usr/bin/python3
import sys
import re as regex
# Our scripts
import tools
import settings
if __name__ == '__main__':
tools.move_to_script_dir(sys.argv[0])
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
198,
11748,
302,
355,
40364,
198,
198,
2,
3954,
14750,
198,
11748,
4899,
198,
11748,
6460,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10... | 2.535211 | 71 |
import click
import requests
import os
import json
import questionary
from questionary import Separator, Choice, prompt
from tabulate import tabulate
import gridly_cli.api as api
from gridly_cli.utils import records_data_to_json, dump_to_json_file, dump_to_csv_file
headers = {
'Content-Type': 'application/json',
'Authorization': 'ApiKey ' + str(os.environ["GRIDLY_API_KEY"])
}
@click.group()
def gridly():
"""A CLI wrapper for the API of Gridly."""
pass
####### Grid #######
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all Grids')
@click.option('-u', 'action', flag_value='u', help='To update Grid name')
def grid(action):
"""
List all Grids / Update Grid name
"""
if action == 'ls':
db_id = choose_database()
response = api.get_grids(db_id)
for grid in response:
click.echo(grid["name"])
elif action == 'u':
grid_id = choose_grid()
grid_name = questionary.text("New Grid name:").ask()
data = {
"name": grid_name
}
api.update_grid(grid_id, data)
click.echo("Your Grid has been changed")
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def project(action):
"""
List all Projects
"""
if action == 'ls':
response = api.get_projects()
for project in response:
click.echo(project["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def database(action):
"""
List all Databases
"""
if action == 'ls':
project_id = choose_project()
response = api.get_databases(project_id)
for database in response:
click.echo(database["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', help='To list all views')
@click.argument('view_id', required=False)
def view(action, view_id):
"""
List all views / Get info of a specified view
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_views(grid_id)
for view in response:
click.echo(view["name"])
elif view_id is not None:
view = api.get_view(view_id)
click.echo(json.dumps(view, indent=4))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def column(action):
"""
List all columns of a Grid
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_grid(grid_id)
columns = response.get("columns")
ls_column = []
for column in columns:
ls_column.append([column["id"], column["name"], column["type"]])
click.echo("Grid name: " + response.get("name"))
click.echo(tabulate(ls_column, headers=["Column ID", "Column Name", "Column Type"], tablefmt="grid"))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all records of a view')
@click.option('-d', 'action', flag_value='d', help='To delete records')
def record(action):
"""
List all records of a view / Delete records
"""
if action == 'ls':
view_id = choose_view()
response_columns = api.get_view(view_id)
columns = response_columns.get("columns")
response_records = api.get_records(view_id)
# Set up column keys before add value to each column
ls_cell = {} # ls_cell is a dictionary
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
ls_cell.setdefault(value["columnId"], [])
# Map value to column
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
if value["columnId"] in ls_cell and "value" in value:
ls_cell[value["columnId"]].append(value["value"])
elif value["columnId"] in ls_cell and "value" not in value:
ls_cell[value["columnId"]].append("")
else:
continue
for column in columns:
if column["id"] in ls_cell:
ls_cell[column["name"]] = ls_cell.pop(column["id"])
else:
continue
click.echo(tabulate(ls_cell, headers="keys", tablefmt="grid"))
elif action == 'd':
view_id = choose_view()
response_records = api.get_records(view_id)
ls_record_id = []
for record in response_records:
ls_record_id.append(record["id"])
ls_chosen_record = questionary.checkbox(
'Select records which you want delete',
choices=ls_record_id).ask()
data = {
"ids": ls_chosen_record
}
api.delete_records(view_id, data)
else:
gridly()
@gridly.command()
@click.option('-json', 'type_json', flag_value='json', default=False, help="To export to JSON file type")
@click.option('-csv', 'type_csv', flag_value='csv', default=False, help="To export to CSV file type")
@click.option('-lang', 'target', flag_value='lang', default=False, help="To export translation language columns to separate files")
@click.argument('view_id')
@click.argument('dest', type=click.Path(exists=True), default='./', required=False)
def export(type_json, type_csv , target, view_id, dest):
"""
Export all records of a view to JSON and/or CSV files
"""
rs_records = api.get_records(view_id)
records = records_data_to_json(rs_records)
lang_columns = []
lang_records = {}
if target == 'lang':
view = api.get_view(view_id)
for column in view["columns"]:
if 'languageCode' in column:
lang_columns.append(column["languageCode"])
for lang in lang_columns:
lang_records[lang] = api.split_column(records, lang)
else:
lang_records["all"] = records
if type_json == 'json':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.json'
dump_to_json_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if type_csv == 'csv':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.csv'
dump_to_csv_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if __name__ == '__main__':
gridly() | [
11748,
3904,
198,
11748,
7007,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
1808,
560,
198,
6738,
1808,
560,
1330,
8621,
283,
1352,
11,
18502,
11,
6152,
198,
6738,
7400,
5039,
1330,
7400,
5039,
198,
198,
11748,
10706,
306,
62,
4450... | 2.275921 | 2,932 |
from math import ceil
from typing import Any, List
from unittest import TestCase
from unittest.mock import MagicMock, patch
from rich.table import Table
from kaskade.renderables.paginated_table import PaginatedTable
from tests import faker
| [
6738,
10688,
1330,
2906,
346,
198,
6738,
19720,
1330,
4377,
11,
7343,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
198,
198,
6738,
5527,
13,
11487,
1330,
8655,
19... | 3.536232 | 69 |
#
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class LocalSite(models.Model):
"""
A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a single
LocalSite, at which point only members will be able to see or manipulate
these objects. Access control is performed at every level, and consistency
is enforced through a liberal sprinkling of assertions and unit tests.
"""
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (user.is_authenticated() and
self.users.filter(pk=user.pk).exists())
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
| [
2,
198,
2,
4981,
13,
9078,
1377,
32329,
329,
262,
366,
19023,
3526,
13,
15654,
1,
598,
13,
198,
2,
198,
2,
15069,
357,
66,
8,
3050,
220,
3271,
309,
808,
9458,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
... | 3.166331 | 992 |
import os
import numpy
from dnachisel import (
CircularDnaOptimizationProblem,
DnaOptimizationProblem,
random_dna_sequence,
sequence_to_biopython_record,
Specification,
annotate_record,
)
def test_circular_example():
"""This example has a BsmBI cross origin site (location -3 -- 3)"""
path = os.path.join(
"tests", "tests_from_genbanks", "genbanks", "circular_example_1.gb"
)
problem = CircularDnaOptimizationProblem.from_record(path)
evals = problem.constraints_evaluations()
assert str(evals.evaluations[0].locations[0]) == "-3-3(+)"
problem.resolve_constraints()
assert problem.all_constraints_pass()
def test_all_shorthands():
"""This test compiles all shorthands as a check that nothing is broken."""
numpy.random.seed(123)
sequence = random_dna_sequence(1000)
record = sequence_to_biopython_record(sequence)
annotate_record(record, (100, 900), label="@no(CATG)")
annotate_record(record, (100, 900), label="@gc(40-60%)")
annotate_record(record, (100, 900), label="@insert(AarI_site)")
annotate_record(record, (650, 752), label="@cds")
annotate_record(record, (100, 200), label="@keep")
annotate_record(record, (250, 273), label="@primer")
annotate_record(record, (250, 280), label="@change")
annotate_record(record, (943, 950), label="@sequence(AKGNTKT)")
annotate_record(record, (955, 958), label="@sequence(ATT|ATC|GGG)")
problem = DnaOptimizationProblem.from_record(record)
assert len(problem.constraints) == 13 # AllowPrimer counts for 4 specs.
assert not problem.all_constraints_pass()
problem.resolve_constraints()
assert problem.all_constraints_pass()
| [
11748,
28686,
198,
11748,
299,
32152,
198,
6738,
288,
77,
620,
36811,
1330,
357,
198,
220,
220,
220,
7672,
934,
35,
2616,
27871,
320,
1634,
40781,
11,
198,
220,
220,
220,
360,
2616,
27871,
320,
1634,
40781,
11,
198,
220,
220,
220,
4... | 2.616564 | 652 |
from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.transfer.api.manager import Manager
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from pprint import pprint
from cloudmesh.common.debug import VERBOSE
from cloudmesh.shell.command import command, map_parameters
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
6279,
76,
5069,
13,
29149,
13,
21812,
1330,
3141,
198,
6738,
6279,
76,
5069,
13,
29149,
13,
21812,
1330,
42636,
21575,
198,
6738,
6279,
76,
5069,
13,
39437,
13,
15042,
13,
3715... | 3.801887 | 106 |
import unittest
from talipp.indicators import DEMA
from TalippTest import TalippTest
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
3305,
3974,
13,
521,
44549,
1330,
360,
27630,
198,
198,
6738,
7193,
3974,
14402,
1330,
7193,
3974,
14402,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
22... | 2.795918 | 49 |
from example_python_package_shim import multi
| [
198,
6738,
1672,
62,
29412,
62,
26495,
62,
1477,
320,
1330,
5021,
628,
198
] | 3.5 | 14 |
from django.urls import path, include, re_path
from .import views
urlpatterns=[
path('',views.page1,name="index"),
path('Resources/',views.MembersListView.as_view(),name="resources"),
path('Resource/<int:pk>', views.MembersDetailView.as_view(), name='Resource-detail'),
path('Technologies/',views.TechnologiesListView.as_view(),name="technologies"),
#re_path(r'^member/(?P<pk>\d+)$', views.MembersDetailView.as_view(), name='member-detail')
] | [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
11,
302,
62,
6978,
198,
6738,
764,
11748,
5009,
198,
6371,
33279,
82,
41888,
198,
220,
220,
220,
3108,
10786,
3256,
33571,
13,
7700,
16,
11,
3672,
2625,
9630,
12340,
198,
220,
... | 2.806061 | 165 |
from CCICApp.zhihu.zhihu_login import ZhiHuLogin
| [
6738,
12624,
2149,
4677,
13,
23548,
48406,
13,
23548,
48406,
62,
38235,
1330,
1168,
5303,
38202,
47790,
198
] | 2.722222 | 18 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198
] | 3.444444 | 9 |
from django.core.management import call_command
from conf.celery import app
@app.task(autoretry_for=(TimeoutError,))
@app.task()
| [
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
198,
6738,
1013,
13,
7015,
88,
1330,
598,
628,
198,
31,
1324,
13,
35943,
7,
2306,
9997,
563,
62,
1640,
16193,
48031,
12331,
11,
4008,
628,
198,
31,
1324,
13,
35943,... | 2.977778 | 45 |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DemGenerator
Random DEM generator
--------------------
begin : 2017-08-29
git sha : $Format:%H$
copyright : (C) 2017 by Alexandre Delahaye
email : menoetios@gmail.com
***************************************************************************/
"""
import argparse
import numpy as np
import sys
from math import *
from osgeo import gdal
from osgeo.gdalconst import *
parser = argparse.ArgumentParser(description='Generates a random DEM.')
parser.add_argument("dempath", metavar='path', help='output DEM path')
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
parser.add_argument("--height", type=int, default=1000, help="DEM height (default: 1000)")
parser.add_argument("--width", type=int, default=1000, help="DEM width (default: 1000)")
parser.add_argument("--waterratio", type=float, default=0.5, help="water ratio (default: 0.5)")
parser.add_argument("--island", action="store_true", help="set island mode")
parser.add_argument("--scale", type=float, default=20, help="features scale (default: 20)")
parser.add_argument("--detailslevel", type=float, default=3, help="level of features details (default: 3)")
parser.add_argument("--spread", type=float, default=3, help="features spread (default: 3)")
parser.add_argument("--roughness", type=float, default=5, help="features roughness (default: 5)")
parser.add_argument("--directionality", type=float, default=5, help="features directionality (default: 5)")
parser.add_argument("--preset", type=str, choices=['archipelago', 'mountainous_island'], \
help="predefined set of parameters (overrides all parameters except height and width)")
args = parser.parse_args()
dem = DemGenerator()
dem.setParams(
verbose=args.verbose,
height=args.height,
width=args.width,
waterRatio=args.waterratio,
island=args.island,
scale=args.scale,
detailsLevel=args.detailslevel,
spread=args.spread,
roughness=args.roughness,
directionality=args.directionality,
preset=args.preset)
dem.generate()
dem.writeToFile(args.dempath)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
14,
17174,
17174,
4557,
8162,
198,
1897,
8645,
1352,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.875792 | 789 |
'''
Make a figure of the thin pv-slices stacked on top of each other.
'''
from spectral_cube import SpectralCube, Projection
from astropy.io import fits
from astropy import units as u
import numpy as np
from glob import glob
import os
from os.path import join as osjoin
import matplotlib.pyplot as plt
from aplpy import FITSFigure
from paths import (fourteenB_HI_data_wGBT_path, allfigs_path,
fourteenB_wGBT_HI_file_dict)
from constants import hi_freq
from plotting_styles import twocolumn_figure, default_figure
# Make sure the figure directory exists
fig_path = allfigs_path("pvslices")
if not os.path.exists(fig_path):
os.mkdir(fig_path)
pvslice_dir = fourteenB_HI_data_wGBT_path("downsamp_1kms/")
# I need the beam in the cube to convert to K
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_1kms/M33_14B-088_HI.clean.image.GBT_feathered.1kms.fits"))
jybeam_to_K = cube.beam.jtok(hi_freq)
del cube
# Get all pv-slices
filenames = glob(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_PA_*_pvslice_40.0arcsec_width.fits"))
# The slices go from a PA of 0 to 175 in increments of 5 deg
ordered_filenames = []
pas = np.arange(0, 180, 5)
for pa in pas:
for fname in filenames:
if "PA_{}_".format(pa) in fname:
ordered_filenames.append(fname)
break
# Want to put on a common scale. Grab the max from all slices.
max_val = 0
for fname in ordered_filenames:
hdu = fits.open(fname)[0]
max_slice_val = np.nanmax(hdu.data)
if max_slice_val > max_val:
max_val = max_slice_val
# Split into figures of 6
for i in range(6):
fig = plt.figure(figsize=(8.1, 11.))
for j, n in enumerate(np.arange(6 * i, 6 * (i + 1))):
hdu = fits.open(ordered_filenames[n])[0]
fig_n = FITSFigure(hdu, subplot=(6, 1, j + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh')
fig_n.show_contour(hdu,
levels=[2 / jybeam_to_K.value,
3 / jybeam_to_K.value,
4 / jybeam_to_K.value],
smooth=3)
zero_vel_posn = hdu.header['CRVAL2'] / \
np.abs(hdu.header['CDELT2'])
fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
fig_n.hide_axis_labels()
fig_n.hide_ytick_labels()
# Put the PA in the upper corner
if i < 4:
fig_n.add_label(0.81, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
else:
fig_n.add_label(0.2, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
if j != 5:
fig_n.hide_xaxis_label()
fig_n.hide_xtick_labels()
# if j == 0:
# fig_n.add_colorbar()
# fig_n.colorbar.set_location('top')
# fig_n.colorbar.set_label_properties(size=11)
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.png".format(i)))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.pdf".format(i)))
plt.close()
Now make a figure of all of the pv-slice paths on the zeroth moment map
mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['Moment0'])[0])
mom0.quicklook()
mom0.FITSFigure.show_regions(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_pvslice_40.0arcsec_width.reg"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.png"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.pdf"))
# Make a smaller figure for the paper. Include pv-slices along the major,
# minor and warped major (135 deg) axes
twocolumn_figure()
fig = plt.figure(figsize=(8.4, 4.2))
max_size = fits.open(ordered_filenames[0])[0].shape[1]
maj_header = fits.open(ordered_filenames[0])[0].header
for i, pa in zip(range(3), [0, 90, 135]):
idx = np.where(pas == pa)[0]
hdu = fits.open(ordered_filenames[idx])[0]
# Convert to K
hdu = fits.PrimaryHDU(hdu.data * jybeam_to_K.value, hdu.header)
# Reverse the direction of the 135 slice
if i == 2:
hdu = fits.PrimaryHDU(hdu.data[::-1], hdu.header)
if pa != 0:
# Match the major axis slice length to make each the same shape
padded_slice = np.zeros((hdu.shape[0], max_size)) * np.NaN
# Velocity axes will match
pad_size = (max_size - hdu.shape[1]) / 2
if hdu.shape[1] % 2 == 0:
padded_slice[:, pad_size:max_size - pad_size] = hdu.data
else:
padded_slice[:, pad_size:max_size - pad_size - 1] = hdu.data
hdu = fits.PrimaryHDU(padded_slice, maj_header)
fig_n = FITSFigure(hdu, subplot=(3, 1, i + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh', vmin=0)
fig_n.show_contour(hdu,
levels=[2,
3,
4],
smooth=3)
# zero_vel_posn = hdu.header['CRVAL2'] / \
# np.abs(hdu.header['CDELT2'])
# fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
# linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
# fig_n.hide_axis_labels()
# fig_n.hide_ytick_labels()
if i == 1:
fig_n.set_axis_labels(ylabel='Velocity (km/s)')
else:
fig_n.axis_labels.hide_y()
if i < 2:
fig_n.axis_labels.hide_x()
fig_n.tick_labels.hide_x()
else:
fig_n.set_axis_labels(xlabel='Offset (deg)')
# Put the PA in the upper corner
fig_n.add_label(0.9, 0.75, "{} deg".format(int(pa)),
relative=True, size=12,
bbox={"boxstyle": "square", "facecolor": "w"})
fig_n.add_colorbar()
fig_n.colorbar.set_ticks([0, 5, 10, 20, 40])
fig_n.colorbar.set_font(size=11)
if i == 1:
fig_n.colorbar.set_axis_label_text('Intensity (K)')
fig_n.colorbar.set_axis_label_font(size=12)
plt.subplots_adjust(hspace=0.01)
plt.tight_layout()
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.png"))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.pdf"))
plt.close()
default_figure()
| [
198,
7061,
6,
198,
12050,
257,
3785,
286,
262,
7888,
279,
85,
12,
82,
677,
274,
24167,
319,
1353,
286,
1123,
584,
13,
198,
7061,
6,
198,
198,
6738,
37410,
62,
40296,
1330,
13058,
1373,
29071,
11,
4935,
295,
198,
6738,
6468,
28338,
... | 1.98238 | 3,689 |
import socket
import time
import struct
import threading
import select
import random
import Queue
import warnings
if __name__ == "__main__":
markerserver = MarkerServer(port=55555, sync_interval=15)
markerserver.start()
sockets = []
for i in range(25):
c = MarkerSocket(ip="127.0.0.1", port=55555, name=str("client%d" % i))
c.start()
sockets.append(c)
for s in sockets:
mark = str("S%3d" % int(random.random()*255))
print("sending marker %s with client %s" % (mark, s.name))
s.send(mark)
time.sleep(random.random()*1)
while True:
m = markerserver.read()
if None in m:
break
print m
print markerserver
for c in sockets:
c.stop()
c.join()
sockets = []
for i in range(markerserver.sync_interval*1, 0, -1):
print("waiting.. %d" % i)
time.sleep(1)
print markerserver
markerserver.stop()
markerserver.join() | [
11748,
17802,
198,
11748,
640,
198,
11748,
2878,
198,
11748,
4704,
278,
198,
11748,
2922,
198,
11748,
4738,
198,
11748,
4670,
518,
198,
11748,
14601,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,... | 2.200873 | 458 |
from spotty.config.abstract_instance_config import AbstractInstanceConfig
from spotty.providers.gcp.config.validation import validate_instance_parameters
VOLUME_TYPE_DISK = 'disk'
DEFAULT_IMAGE_NAME = 'spotty'
| [
6738,
4136,
774,
13,
11250,
13,
397,
8709,
62,
39098,
62,
11250,
1330,
27741,
33384,
16934,
198,
6738,
4136,
774,
13,
15234,
4157,
13,
70,
13155,
13,
11250,
13,
12102,
341,
1330,
26571,
62,
39098,
62,
17143,
7307,
198,
198,
44558,
383... | 3.261538 | 65 |
import argparse
import itertools
import logging
import os
import os.path as osp
import StringIO
import signal
import sys
import threading
import time
from dnslib import RR,QTYPE,RCODE
from dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
from .config import (
DEFAULT_CONFIG_PATH,
GSDriver,
GStorageKeybaseProfile,
Profile,
Profiles,
)
def config_push(profile, bucket, **kwargs):
"""Push encryted configuration of a profile on Google Storage
:param profile: profile to push (a directory in ~/.config/cloud-dns/)
:param bucket: the destination Google Storage bucket
:param config_dir: absolute path to Cloud DNS root config dir
(default ~/.config/cloud-dns)
"""
profile = GStorageKeybaseProfile(profile, GSDriver, bucket, **kwargs)
profile.push()
class ZoneResolver(BaseResolver):
"""
Simple fixed zone file resolver.
"""
def __init__(self, zone_file_generator, glob=False, ttl=3600):
"""
Initialise resolver from zone file.
Stores RRs as a list of (label,type,rr) tuples
If 'glob' is True use glob match against zone file
"""
self.glob = glob
self.eq = 'matchGlob' if glob else '__eq__'
self.zone_file_generator = zone_file_generator
self.load()
if ttl > 0:
thread = threading.Thread(target=self.reload, args=(ttl,))
thread.daemon = True
thread.start()
def resolve(self,request,handler):
"""
Respond to DNS request - parameters are request packet & handler.
Method is expected to return DNS response
"""
reply = request.reply()
qname = request.q.qname
qtype = QTYPE[request.q.qtype]
local_zone = self.zone
for name, rtype, rr in local_zone:
# Check if label & type match
if getattr(qname,self.eq)(name) and (qtype == rtype or
qtype == 'ANY' or
rtype == 'CNAME'):
# If we have a glob match fix reply label
if self.glob:
a = copy.copy(rr)
a.rname = qname
reply.add_answer(a)
else:
reply.add_answer(rr)
# Check for A/AAAA records associated with reply and
# add in additional section
if rtype in ['CNAME','NS','MX','PTR']:
for a_name,a_rtype,a_rr in local_zone:
if a_name == rr.rdata.label and a_rtype in ['A','AAAA']:
reply.add_ar(a_rr)
if not reply.rr:
reply.header.rcode = RCODE.SERVFAIL
return reply
def update_etc_hosts_file(hostip_tuples, output_file=None):
"""Update specified nodes in /etc/hosts
Previous content is not lost
:param hostip_tuples: generator of tuple (host, ip)
:param output_file: destination file, default is /etc/hosts
"""
BEGIN_MARKUP = '# CloudDNS prelude - DO NOT REMOVE\n'
END_MARKUP = '# CloudDNS epilogue - DO NOT REMOVE\n'
output_file = output_file or '/etc/hosts'
if not osp.isfile(output_file):
with open(output_file, 'a'):
os.utime(output_file, None)
with open(output_file, 'r+') as etc_hosts:
lines = etc_hosts.readlines()
etc_hosts.seek(0)
etc_hosts.truncate(0)
previous_content_replaced = False
between_markups = False
for line in lines:
if not between_markups:
if line == BEGIN_MARKUP:
between_markups = True
etc_hosts.write(line)
else:
if line == END_MARKUP:
previous_content_replaced = True
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
between_markups = False
etc_hosts.write(line)
if not previous_content_replaced:
etc_hosts.write(BEGIN_MARKUP)
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
etc_hosts.write(END_MARKUP)
def etc_hosts_update(output_file=None, **kwargs):
"""Update /etc/hosts with all nodes available in configured projects
:param output_file: destination file, default is /etc/hosts
"""
update_etc_hosts_file(etc_hosts_generator(**kwargs), output_file)
def etc_hosts_generator(**kwargs):
"""Provides a generator of tuple (hosts, ip) for all nodes registered
in the configured projects
"""
generators = []
for profile in Profiles(**kwargs).list():
for project in profile.projects.values():
generators.append(project.get_hostip_tuples())
return itertools.chain(*generators)
def etc_hosts_list(**kwargs):
"""Print to standard output nodes available in all configured projects
"""
for hosts, ip in etc_hosts_generator(**kwargs):
print "{} {}".format(ip.ljust(15, ' '), ' '.join(hosts))
def cloud_dns(args=None):
"""cloud-dns entry point"""
args = args or sys.argv[1:]
from .version import version
parser = argparse.ArgumentParser(
description="DNS utilities on top of Apache libcloud"
)
parser.add_argument(
'-V', '--version',
action='version',
version='%(proj)s ' + version
)
parser.add_argument(
'-v', '--verbose',
action='count',
help='Verbose mode, -vv for more details, -vvv for 3rd-parties logs as well'
)
parser.add_argument(
'-c', '--config-dir',
help='Specify config root path [default: %(default)s]',
dest='config_path',
default=DEFAULT_CONFIG_PATH
)
subparsers = parser.add_subparsers(help='top commands')
config_parser = subparsers.add_parser(
'config',
help='Manipulate DNS cloud configuration'
)
config_subparsers = config_parser.add_subparsers(help='config commands')
config_push_parser = config_subparsers.add_parser(
'push',
help='Push configuration to Google Storage'
)
config_push_parser.add_argument('profile')
config_push_parser.add_argument('bucket')
config_push_parser.set_defaults(func=config_push)
config_pull_parser = config_subparsers.add_parser(
'pull',
help='Retrieve latest configuration from Google Storage'
)
config_pull_parser.add_argument('profile')
config_pull_parser.add_argument('bucket')
config_pull_parser.add_argument(
"identity",
help='Keybase signature to use to decrypt configuration, for instance: github://tristan0x'
)
etc_hosts_parser = subparsers.add_parser(
'etc-hosts',
help='Manipulate DNS cloud configuration'
)
etc_hosts_subparsers = etc_hosts_parser.add_subparsers(help='etc-hosts commands')
etc_hosts_update_parser = etc_hosts_subparsers.add_parser(
"update",
help='Required super-user privileges'
)
etc_hosts_update_parser.add_argument(
'-o', '--ouput',
dest='output_file',
default='/etc/hosts',
help='Output file [default: %(default)s]'
)
etc_hosts_update_parser.set_defaults(func=etc_hosts_update)
etc_hosts_list_parser = etc_hosts_subparsers.add_parser(
"list",
help="List nodes in /etc/hosts format"
)
etc_hosts_list_parser.set_defaults(func=etc_hosts_list)
dns_server_parser = subparsers.add_parser(
'server',
help='Start DNS server'
)
dns_server_subparsers = dns_server_parser.add_subparsers(help='server commands')
dns_server_zone_parser = dns_server_subparsers.add_parser(
"zone",
help='Show DNS zone file'
)
dns_server_zone_parser.set_defaults(func=server_zone_list)
dns_server_start_parser = dns_server_subparsers.add_parser(
"start",
help='Start DNS server'
)
dns_server_start_parser.add_argument(
'--zone',
default=None,
help='Optional DNS zone file ("-" for stdin)'
)
dns_server_start_parser.add_argument(
'--ttl',
default=3600,
type=int,
help='Profile reload interval (in seconds) [default: %(default)s]'
)
dns_server_start_parser.set_defaults(func=server_start)
config_pull_parser.set_defaults(func=config_pull)
args = parser.parse_args(args)
log_level = logging.WARN
third_parties_log_level = logging.WARN
if args.verbose:
if args.verbose > 1:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if args.verbose >= 3:
third_parties_log_level = logging.INFO
logging.basicConfig(level=log_level)
for logger in [
'boto',
'gnupg',
'oauth2client',
'oauth2_client',
'requests',
]:
logging.getLogger(logger).setLevel(third_parties_log_level)
args.func(**vars(args))
| [
198,
11748,
1822,
29572,
198,
11748,
340,
861,
10141,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
267,
2777,
198,
11748,
10903,
9399,
198,
11748,
6737,
198,
11748,
25064,
198,
11748,
4704,
278,
198,
11748,
640,... | 2.195151 | 4,166 |
# %%
import collections
import os
import sys
import time
from pprint import pformat
import ipdb
import numpy as np
import torch
import torch.autograd
import torch.nn as nn
from loguru import logger
from sklearn.metrics import precision_recall_fscore_support
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataloader import InfiniteDataLoader
from dataset import ActivityDataset
from helpers import make_arg_parser
from net_utils import set_deterministic_and_get_rng
from nets import Classifier, Discriminator, SpatialTransformerBlock
logger.remove()
logger.add(sys.stdout, colorize=True, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{message}</level>")
class StranGAN(object):
"""
STranGAN: Adversarially-learnt Spatial Transformer for scalable human activity recognition
"""
@torch.no_grad()
def train_clf(self, source_loader_train, source_loader_val, target_loader_val, args):
"""
Trains the source classifier
"""
source_metrics_train = {}
source_metrics_val = {}
target_metrics_val = {}
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
else:
for epoch in range(1, args.n_epochs + 1):
ts = time.time()
self.classifier.train()
for batch_idx, (X_source, y_source) in enumerate(source_loader_train):
X_source = X_source.to(self.device).float()
y_source = y_source.to(self.device)
self.optim_c.zero_grad()
y_source_pred = self.classifier(X_source)
loss_fc = self.clf_loss(y_source_pred, y_source)
loss_fc.backward()
self.optim_c.step()
if batch_idx % args.log_interval == 0:
logger.info(
f'CLF train epoch: {epoch:2d} {100. * batch_idx / len(source_loader_train):3.0f}%'
+ f' {batch_idx * len(X_source):5d}/{len(source_loader_train.dataset)} lC={loss_fc.item():.6f}'
)
te = time.time()
logger.info(f'Took {(te - ts):.2f} seconds this epoch')
logger.info('------------------------------------------------')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
logger.info('------------------------------------------------')
save_path = os.path.join(args.save_dir, 'clf.pt')
logger.info(f'Saving the Classifier in {save_path}')
torch.save(self.classifier.state_dict(), save_path)
return {
'source-train': source_metrics_train,
'source-val' : source_metrics_val,
'target-val' : target_metrics_val
}
@torch.no_grad()
def interpret(self, source_loader, target_loader, args):
"""
Save the transformed target samples and corresponding thetas for further analysis
:param source_loader:
:param target_loader:
:param args:
:return:
"""
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
if args.gen_ckpt != '' and os.path.exists(args.gen_ckpt):
logger.info(f'Loading Generator from {args.gen_ckpt} ...')
self.generator.load_state_dict(torch.load(args.gen_ckpt))
logger.success('Model loaded!')
self.classifier.eval()
self.generator.eval()
thetas, target_data, xformed, source_data = [], [], [], []
for data, target in target_loader:
data = data.to(self.device).float()
data_xformed, theta = self.generator(data)
thetas.append(theta)
target_data.append(data)
xformed.append(data_xformed)
for data, target in source_loader:
data = data.to(self.device).float()
source_data.append(data)
thetas = torch.cat(thetas).cpu().numpy()
target_data = torch.cat(target_data).cpu().numpy()
source_data = torch.cat(source_data).cpu().numpy()
xformed = torch.cat(xformed).cpu().numpy()
theta_path = os.path.join(args.save_dir, 'thetas')
logger.info('Saving theta, target, transformed target and source data to {}'.format(theta_path))
np.savez_compressed(theta_path,
thetas=thetas, target_data=target_data, source_data=source_data, xformed=xformed)
logger.success('Data saved!')
# %%
parser = make_arg_parser()
args = parser.parse_args()
rng, seed_worker = set_deterministic_and_get_rng(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
logger.add(os.path.join(args.save_dir, "training.log"))
logger.info(f'Current experiment parameters:\n{pformat(vars(args))}')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with np.load(args.data_path, mmap_mode='r', allow_pickle=True) as npz:
if args.subject_source.find(',') > 0:
data_source = np.concatenate([
npz['data_{}_{}'.format(ss, args.position_source)]
for ss in tqdm(args.subject_source.split(','), 'creating source dataset')
])
else:
data_source = npz['data_{}_{}'.format(args.subject_source,
args.position_source)]
if args.subject_target.find(',') > 0:
data_target = np.concatenate([
npz['data_{}_{}'.format(st, args.position_target)]
for st in tqdm(args.subject_target.split(','), 'creating target dataset')
])
else:
data_target = npz['data_{}_{}'.format(args.subject_target,
args.position_target)]
source_train_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling,
shuffle=False, train_set=True, train_frac=args.train_frac)
lencoder = source_train_dataset.lencoder
source_val_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling, lencoder=lencoder,
shuffle=False, train_set=False, train_frac=args.train_frac)
target_train_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=True,
train_frac=args.train_frac)
target_val_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=False,
train_frac=args.train_frac)
# data loader for DA training
# -----------------------------------------------------------------------------------------------------------------------
source_loader_da = InfiniteDataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_da = InfiniteDataLoader(target_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# data loader for classification
# -----------------------------------------------------------------------------------------------------------------------
# training
source_loader_clf_train = DataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# validation
source_loader_clf_val = DataLoader(source_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_clf_val = DataLoader(target_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
strangan = StranGAN(device, args)
strangan.train_gan(source_loader_da, target_loader_da, source_loader_clf_train, source_loader_clf_val,
target_loader_clf_val, args)
strangan.interpret(source_loader_clf_val, target_loader_clf_val, args)
| [
2,
43313,
198,
11748,
17268,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
279,
4798,
1330,
279,
18982,
198,
198,
11748,
20966,
9945,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
2306,
... | 2.209628 | 4,279 |
# -*- coding: utf-8 -*-
"""
Texas module
============
This module presents two classes to handle the way state holidays are managed
in Texas.
The :class:`TexasBase` class gathers all available holidays for Texas,
according to this document:
http://www.statutes.legis.state.tx.us/Docs/GV/htm/GV.662.htm
The :class:`Texas` class includes all national and state holidays, as described
in the said document. This should be the "default" Texas calendar class, to be
used in most cases.
But if state holidays are supposed to be observed by most of the workforces,
any employee can chose to skip one of these days and replace it by another.
If at some point you need to create a specific calendar class based on Texas
calendar, you can either use the :class:`TexasBase` class or directly the
:class:`Texas` class and overwrite/override the :method:`get_fixed_holidays()`
and/or :method:`get_variable_days()` to fit your needs.
Example:
.. code::
class TexasCustom(TexasBase):
# This will include the confederate heroes day
texas_include_confederate_heroes = True
FIXED_HOLIDAYS = TexasBase.FIXED_HOLIDAYS + (
(7, 14, "Bastille Day!"),
)
def get_variable_days(self, year):
days = super(TexasCustom, self).get_variable_days(year)
days.append(
(self.get_nth_weekday_in_month(year, 1, 15), "Special Day")
)
return days
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date
from ..registry_tools import iso_register
from .core import UnitedStates
class TexasBase(UnitedStates):
"""Texas Base (w/o State holidays)"""
include_columbus_day = False
texas_include_confederate_heroes = False
texas_include_independance_day = False
texas_san_jacinto_day = False
texas_emancipation_day = False
texas_lyndon_johnson_day = False
# Non-Texas-specific state holidays
include_thanksgiving_friday = False
include_christmas_eve = False
include_boxing_day = False
@iso_register('US-TX')
class Texas(TexasBase):
"""Texas"""
texas_include_confederate_heroes = True
texas_include_independance_day = True
texas_san_jacinto_day = True
texas_emancipation_day = True
texas_lyndon_johnson_day = True
include_thanksgiving_friday = True
include_christmas_eve = True
include_boxing_day = True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
21607,
8265,
198,
25609,
198,
198,
1212,
8265,
10969,
734,
6097,
284,
5412,
262,
835,
1181,
17122,
389,
5257,
198,
259,
3936,
13,
198,
198,
464,
1058,
4871,
... | 2.762655 | 889 |
from django.shortcuts import render
from .models import Thing | [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
764,
27530,
1330,
21561
] | 4.357143 | 14 |
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from torch import Tensor
from . import common
from .common import SignalInfo, EncodingInfo
@common._impl_load
@common._impl_load_wav
@common._impl_save
@common._impl_info
| [
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
6738,
28034,
1330,
309,
22854,
198,
198,
6738,
764,
1330,
2219,
198,
6738,
764,
11321,
1330,
26484,
12360,
11,
... | 3.329114 | 79 |
from __future__ import print_function
import os
import os.path as op
import sys
from peddy import Ped, Family, Sample, PHENOTYPE, SEX
HERE = op.dirname(op.dirname(os.path.abspath(os.path.dirname(__file__))))
from contextlib import contextmanager
@contextmanager
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
28686,
198,
11748,
28686,
13,
6978,
355,
1034,
198,
11748,
25064,
198,
6738,
279,
21874,
1330,
13457,
11,
7884,
11,
27565,
11,
9370,
1677,
2394,
56,
11401,
11,
311,
6369,
198,
... | 2.936842 | 95 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: market.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='market.proto',
package='api',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cmarket.proto\x12\x03\x61pi\"\xb5\x05\n\x07MDEntry\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12\'\n\x0eMDUpdateAction\x18\x97\x02 \x01(\tR\x0emdUpdateAction\x12!\n\x0bMDEntryType\x18\x8d\x02 \x01(\tR\x0bmdEntryType\x12\x1d\n\tMDEntryPx\x18\x8e\x02 \x01(\tR\tmdEntryPx\x12!\n\x0bMDEntrySize\x18\x8f\x02 \x01(\tR\x0bmdEntrySize\x12\'\n\x0eNumberOfOrders\x18\xda\x02 \x01(\rR\x0enumberOfOrders\x12\"\n\x0cTransactTime\x18< \x01(\x03R\x0ctransactTime\x12\x19\n\x07TradeId\x18\xeb\x07 \x01(\tR\x07tradeId\x12%\n\rAggressorSide\x18\xdd\x0b \x01(\tR\raggressorSide\x12\x19\n\x07\x46irstPx\x18\x81\x08 \x01(\tR\x07\x66irstPx\x12\x16\n\x06LastPx\x18\x1f \x01(\tR\x06lastPx\x12\x17\n\x06HighPx\x18\xcc\x02 \x01(\tR\x06highPx\x12\x15\n\x05LowPx\x18\xcd\x02 \x01(\tR\x05lowPx\x12\x1d\n\tBuyVolume\x18\xca\x02 \x01(\tR\tbuyVolume\x12\x1f\n\nSellVolume\x18\xcb\x02 \x01(\tR\nsellVolume\x12\x11\n\x03\x42id\x18\xde\x0b \x01(\tR\x03\x62id\x12\x11\n\x03\x41sk\x18\xdf\x0b \x01(\tR\x03\x61sk\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\"\xe4\x03\n\x11MarketDataRefresh\x12\x18\n\x07MsgType\x18# \x01(\tR\x07msgType\x12\x1f\n\nMDStreamId\x18\xdc\x0b \x01(\tR\nmdStreamId\x12\'\n\x0eLastUpdateTime\x18\x8b\x06 \x01(\x03R\x0elastUpdateTime\x12\x1f\n\nMDBookType\x18\xfd\x07 \x01(\tR\nmdBookType\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\x12\x19\n\x07\x42\x65stBid\x18\xde\x0b \x01(\tR\x07\x62\x65stBid\x12\x19\n\x07\x42\x65stAsk\x18\xdf\x0b \x01(\tR\x07\x62\x65stAsk\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntry\x12%\n\x06Ratios\x18\xe0\x0b \x03(\x0b\x32\x0c.api.MDEntryR\x06ratios\"\xdb\x01\n\x11MarketDataRequest\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12 \n\x17SubscriptionRequestType\x18\x87\x02 \x01(\t\x12\x15\n\x0cThrottleType\x18\xcc\x0c \x01(\t\x12\x1d\n\x14ThrottleTimeInterval\x18\xce\x0c \x01(\x03\x12\x19\n\x10ThrottleTimeUnit\x18\xcf\x0c \x01(\t\x12\x17\n\x0e\x41ggregatedBook\x18\x8a\x02 \x01(\x03\x12\x14\n\x0bMarketDepth\x18\x88\x02 \x01(\x03\"T\n\x17MarketDataRequestReject\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12\x13\n\nRejectText\x18\xb0\n \x01(\t\"/\n\x04\x42\x61rs\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntryb\x06proto3')
)
_MDENTRY = _descriptor.Descriptor(
name='MDEntry',
full_name='api.MDEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MDEntry.Symbol', index=0,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDUpdateAction', full_name='api.MDEntry.MDUpdateAction', index=1,
number=279, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdUpdateAction', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryType', full_name='api.MDEntry.MDEntryType', index=2,
number=269, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryPx', full_name='api.MDEntry.MDEntryPx', index=3,
number=270, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntrySize', full_name='api.MDEntry.MDEntrySize', index=4,
number=271, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntrySize', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='NumberOfOrders', full_name='api.MDEntry.NumberOfOrders', index=5,
number=346, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numberOfOrders', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TransactTime', full_name='api.MDEntry.TransactTime', index=6,
number=60, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='transactTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TradeId', full_name='api.MDEntry.TradeId', index=7,
number=1003, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tradeId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggressorSide', full_name='api.MDEntry.AggressorSide', index=8,
number=1501, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='aggressorSide', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FirstPx', full_name='api.MDEntry.FirstPx', index=9,
number=1025, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='firstPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastPx', full_name='api.MDEntry.LastPx', index=10,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighPx', full_name='api.MDEntry.HighPx', index=11,
number=332, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowPx', full_name='api.MDEntry.LowPx', index=12,
number=333, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BuyVolume', full_name='api.MDEntry.BuyVolume', index=13,
number=330, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='buyVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SellVolume', full_name='api.MDEntry.SellVolume', index=14,
number=331, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='sellVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Bid', full_name='api.MDEntry.Bid', index=15,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ask', full_name='api.MDEntry.Ask', index=16,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ask', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MDEntry.LowRangePx', index=17,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MDEntry.HighRangePx', index=18,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MDEntry.LowLimitPx', index=19,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MDEntry.HighLimitPx', index=20,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MDEntry.ClearingPx', index=21,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=715,
)
_MARKETDATAREFRESH = _descriptor.Descriptor(
name='MarketDataRefresh',
full_name='api.MarketDataRefresh',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRefresh.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='msgType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRefresh.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdStreamId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastUpdateTime', full_name='api.MarketDataRefresh.LastUpdateTime', index=2,
number=779, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastUpdateTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDBookType', full_name='api.MarketDataRefresh.MDBookType', index=3,
number=1021, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdBookType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MarketDataRefresh.Symbol', index=4,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MarketDataRefresh.LowRangePx', index=5,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MarketDataRefresh.HighRangePx', index=6,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MarketDataRefresh.LowLimitPx', index=7,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MarketDataRefresh.HighLimitPx', index=8,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MarketDataRefresh.ClearingPx', index=9,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestBid', full_name='api.MarketDataRefresh.BestBid', index=10,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestBid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestAsk', full_name='api.MarketDataRefresh.BestAsk', index=11,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestAsk', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.MarketDataRefresh.MDEntry', index=12,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ratios', full_name='api.MarketDataRefresh.Ratios', index=13,
number=1504, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ratios', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=718,
serialized_end=1202,
)
_MARKETDATAREQUEST = _descriptor.Descriptor(
name='MarketDataRequest',
full_name='api.MarketDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequest.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequest.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SubscriptionRequestType', full_name='api.MarketDataRequest.SubscriptionRequestType', index=2,
number=263, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleType', full_name='api.MarketDataRequest.ThrottleType', index=3,
number=1612, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeInterval', full_name='api.MarketDataRequest.ThrottleTimeInterval', index=4,
number=1614, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeUnit', full_name='api.MarketDataRequest.ThrottleTimeUnit', index=5,
number=1615, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregatedBook', full_name='api.MarketDataRequest.AggregatedBook', index=6,
number=266, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MarketDepth', full_name='api.MarketDataRequest.MarketDepth', index=7,
number=264, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1205,
serialized_end=1424,
)
_MARKETDATAREQUESTREJECT = _descriptor.Descriptor(
name='MarketDataRequestReject',
full_name='api.MarketDataRequestReject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequestReject.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequestReject.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='RejectText', full_name='api.MarketDataRequestReject.RejectText', index=2,
number=1328, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1426,
serialized_end=1510,
)
_BARS = _descriptor.Descriptor(
name='Bars',
full_name='api.Bars',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.Bars.MDEntry', index=0,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1512,
serialized_end=1559,
)
_MARKETDATAREFRESH.fields_by_name['MDEntry'].message_type = _MDENTRY
_MARKETDATAREFRESH.fields_by_name['Ratios'].message_type = _MDENTRY
_BARS.fields_by_name['MDEntry'].message_type = _MDENTRY
DESCRIPTOR.message_types_by_name['MDEntry'] = _MDENTRY
DESCRIPTOR.message_types_by_name['MarketDataRefresh'] = _MARKETDATAREFRESH
DESCRIPTOR.message_types_by_name['MarketDataRequest'] = _MARKETDATAREQUEST
DESCRIPTOR.message_types_by_name['MarketDataRequestReject'] = _MARKETDATAREQUESTREJECT
DESCRIPTOR.message_types_by_name['Bars'] = _BARS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MDEntry = _reflection.GeneratedProtocolMessageType('MDEntry', (_message.Message,), dict(
DESCRIPTOR = _MDENTRY,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MDEntry)
))
_sym_db.RegisterMessage(MDEntry)
MarketDataRefresh = _reflection.GeneratedProtocolMessageType('MarketDataRefresh', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREFRESH,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRefresh)
))
_sym_db.RegisterMessage(MarketDataRefresh)
MarketDataRequest = _reflection.GeneratedProtocolMessageType('MarketDataRequest', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUEST,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequest)
))
_sym_db.RegisterMessage(MarketDataRequest)
MarketDataRequestReject = _reflection.GeneratedProtocolMessageType('MarketDataRequestReject', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUESTREJECT,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequestReject)
))
_sym_db.RegisterMessage(MarketDataRequestReject)
Bars = _reflection.GeneratedProtocolMessageType('Bars', (_message.Message,), dict(
DESCRIPTOR = _BARS,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.Bars)
))
_sym_db.RegisterMessage(Bars)
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
1910,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
290,
357,
50033,
2124,
2... | 2.307492 | 11,493 |
#!/usr/bin/env python
import django
import sys, os, logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='../logs/task.log',
filemode='a')
sys.path.append(os.path.join(os.path.dirname(__file__),'../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from dvaapp.models import TEvent
from dvaapp.task_handlers import handle_perform_analysis, handle_perform_indexing, handle_perform_detection
if __name__ == '__main__':
task_name = sys.argv[-2]
pk = int(sys.argv[-1])
logging.info("Executing {} {}".format(task_name,pk))
if task_name == 'perform_indexing':
handle_perform_indexing(TEvent.objects.get(pk=pk))
elif task_name == 'perform_detection':
handle_perform_detection(TEvent.objects.get(pk=pk))
elif task_name == 'perform_analysis':
handle_perform_analysis(TEvent.objects.get(pk=pk))
else:
raise ValueError("Unknown task name {}".format(task_name)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
42625,
14208,
198,
11748,
25064,
11,
28686,
11,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
10778,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.20198 | 505 |
import os
import json
import ffmpeg
import pickle
import sys
import matplotlib.pyplot as plt
from cv2 import VideoWriter, VideoWriter_fourcc, resize
import numpy as np
import cv2
def imgseq2video(imgseq, name="pick_up", decode="mp4v", folder=None, fps=3, o_h=500, o_w=500,
full_path=None, rgb_to_bgr=True, verbose=True):
"""
Generate a video from a img sequence list.
:param imgseq: RGB image frames.
:param name: video file name.
:param decode: video decoder type, X264 is not working.
:param folder: saved to which folder.
:param fps: fps of saved video.
:param o_h: height of video.
:param o_w: width of video
:param full_path: full path to the video, if not None, overwrite folder and name.
:param rgb_to_bgr: convert rgb image to bgr img.
:param verbose: whether to print save path.
:return: None.
"""
if len(imgseq) < 1:
print("[WARNING] Try to save empty video.")
return
# Suppress OpenCV and ffmpeg output.
sys.stdout = open(os.devnull, "w")
if full_path is not None:
assert ".mp4" in full_path[-4:], "Full path should end with .mp4"
tmp_path = full_path[:-4] + "tmp" + ".mp4"
path = full_path
else:
tmp_path = name + "tmp.mp4" if folder is None else os.path.join(folder, name + "tmp.mp4")
path = name + ".mp4" if folder is None else os.path.join(folder, name + ".mp4")
fourcc = VideoWriter_fourcc(*decode)
videoWriter = VideoWriter(tmp_path, fourcc, fps, (o_w, o_h))
for img in imgseq:
img = np.uint8(img)
if img.shape[0] == 3:
# needs to be in shape of oh, ow, 3
img = img.transpose(1, 2, 0)
if rgb_to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize(img, (o_w, o_h))
videoWriter.write(img)
videoWriter.release()
(
ffmpeg
.input(tmp_path)
.output(path, vcodec="h264", loglevel="error")
.overwrite_output()
.run()
)
# .global_args('-loglevel', 'error')
# print("should be blocked")
os.remove(tmp_path)
sys.stdout = sys.__stdout__
if verbose:
print("Video saved to", path, "with ", len(imgseq), " total frames.")
return path
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
31246,
43913,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
269,
85,
17,
1330,
7623,
34379,
11,
7623,
34379,
62,
14337,
53... | 2.277336 | 1,006 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ArgParseInator test
"""
__file_name__ = "apitest.py"
__author__ = "luca"
__version__ = "1.0.0"
__date__ = "2014-11-18"
import argparseinator
from argparseinator import arg, ap_arg, class_args
@argparseinator.arg("name", help="The name to print")
@argparseinator.arg('-s', '--surname', default='', help="optional surname")
def print_name(name, surname, address):
"""
Will print the passed name.
"""
print "Printing the name...", name, surname, address
@argparseinator.arg(cmd_name="foo")
def foo_name():
"""
print foo.
"""
print "foo"
@class_args
class CommandsContainer(object):
"""
CommandsContainer class.
"""
prefix = "The name is"
__arguments__ = [ap_arg('--arguments', help="Class arguments")]
__shared_arguments__ = [
ap_arg('name', help="The name"),
ap_arg('--prefix', help="string prefix", default='We have')]
@arg()
def name(self, name, prefix):
"""
Print the name.
"""
print prefix, 'name', name
@arg()
def surname(self, name, prefix):
"""
Print the surname.
"""
print prefix, 'surname', name
@arg()
def nickname(self, name, prefix):
"""
Print the nickname.
"""
print prefix, "nickname", name
@class_args
class Greetings(object):
"""
Greeting command.
"""
__cmd_name__ = 'greet'
__arguments__ = [ap_arg(
'-p', '--prefix', help='greeting prefix', default="We say")]
__shared_arguments__ = [ap_arg('name', help='the name')]
@arg()
def ciao(self, name, prefix):
"""
Say ciao.
"""
print prefix, 'Ciao', 'to', name
@arg()
def hello(self, name, prefix):
"""
Say hello.
"""
print prefix, 'hello', 'to', name
if __name__ == "__main__":
inator = argparseinator.ArgParseInator(
description="Silly script",
args=[
ap_arg('--address', help='Person address', default='Home'),
]
)
inator.check_command()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
220,
220,
220,
20559,
10044,
325,
818,
1352,
1332,
198,
37811,
198,
834,
7753,
62,
3672,
834,
796,
366,
4... | 2.275347 | 937 |
# -*- coding: utf-8 -*-
"""Example of using Copula Based Outlier Detector (COPOD) for outlier detection
Sample wise interpretation is provided here.
"""
# Author: Winston Li <jk_zhengli@hotmail.com>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from pyod.models.copod import COPOD
from pyod.utils.utility import standardizer
if __name__ == "__main__":
# Define data file and read X and y
# Generate some data if the source data is missing
mat_file = 'cardio.mat'
mat = loadmat(os.path.join('data', mat_file))
X = mat['X']
y = mat['y'].ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=1)
# standardizing data for processing
X_train_norm, X_test_norm = standardizer(X_train, X_test)
# train COPOD detector
clf_name = 'COPOD'
clf = COPOD()
# you could try parallel version as well.
# clf = COPOD(n_jobs=2)
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
print('The first sample is an outlier', y_train[0])
clf.explain_outlier(0)
# we could see feature 7, 16, and 20 is above the 0.99 cutoff
# and play a more important role in deciding it is an outlier.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
16281,
286,
1262,
6955,
4712,
13403,
3806,
2505,
4614,
9250,
357,
34,
3185,
3727,
8,
329,
503,
2505,
13326,
198,
36674,
10787,
10794,
318,
2810,
994,
13,
198,
378... | 2.634757 | 679 |
import numpy as np
import open3d
if __name__ == "__main__":
print("Load a ply point cloud, print it, and render it")
pcd = open3d.read_point_cloud('/home/heider/Datasets/pointclouds/office.ply')
print(pcd)
print(np.asarray(pcd.points))
# open3d.draw_geometries([pcd])
print("Downsample the point cloud with a voxel of 0.05")
downsampled = open3d.voxel_down_sample(pcd, voxel_size=0.1)
# open3d.draw_geometries([downpcd])
print("Recompute the normal of the downsampled point cloud")
open3d.estimate_normals(downsampled, search_param=open3d.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
open3d.draw_geometries([downsampled])
| [
11748,
299,
32152,
355,
45941,
198,
11748,
1280,
18,
67,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
3601,
7203,
8912,
257,
35960,
966,
6279,
11,
3601,
340,
11,
290,
8543,
340,
4943,
198,
220,
279,
1... | 2.530769 | 260 |
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch_sparse import fill_diag, sum as sparsesum, mul
import torch_geometric.transforms as T
from gcn import GCN
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def row_normalize(adj):
"""row normalize"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_mat_inv = sp.diags(1./np.maximum(degree, np.finfo(float).eps))
return d_mat_inv.dot(adj).tocoo()
def preprocess_high_order_adj(adj, order, eps):
"""A higher-order polynomial with sparsification"""
adj = row_normalize(adj)
adj_sum = adj
cur_adj = adj
for i in range(1, order):
cur_adj = cur_adj.dot(adj)
adj_sum += cur_adj
adj_sum /= order
adj_sum.setdiag(0)
adj_sum.data[adj_sum.data<eps] = 0
adj_sum.eliminate_zeros()
adj_sum += sp.eye(adj.shape[0])
return sym_normalize_adj(adj_sum + adj_sum.T)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
@torch.no_grad()
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
198,
6738,
28034,
62,
82,
29572,
1330,
6070,
62,
10989,
363,
11,
2160,
355,
599,
945,
274,
38... | 2.356522 | 805 |
# -*- coding: utf-8 -*-
"""Human readable object exploration module.
It is designed to be more verbose than the dir()-function, while being more
compact than help().
"""
from __future__ import print_function
__author__ = "Talon24"
__license__ = "MIT"
__version__ = "0.1.10"
__maintainer__ = "Talon24"
__url__ = "https://github.com/Talon24/explore"
__status__ = "Developement"
__all__ = ["explore", "explore_object", "explore_signature"]
import pydoc
import inspect
import itertools
import colorama
import terminaltables
# import pkg_resources
colorama.init()
TABLETYPE = terminaltables.DoubleTable
COLORIZE = True
# _MAPPING = pkg_resources.resource_string("explore", "mapping.json")
# Isn't created in a subdirectory without more than one module.
_MAPPING = {
"__add__": "+",
"__sub__": "-",
"__mul__": "*",
"__truediv__": "/",
"__floordiv__": "//",
"__matmul__": "@",
"__pow__": "**",
"__mod__": "%",
"__divmod__": "divmod",
"__and__": "&",
"__or__": "|",
"__xor__": "^",
"__lshift__": "<<",
"__rshift__": ">>",
"__iadd__": "+=",
"__isub__": "-=",
"__imul__": "*=",
"__itruediv__": "/=",
"__ifloordiv__": "//=",
"__imatmul__": "@=",
"__ipow__": "**=",
"__imod__": "%=",
"__iand__": "&=",
"__ior__": "|=",
"__ixor__": "^=",
"__ilshift__": "<<=",
"__irshift__": ">>=",
"__eq__": "==",
"__ne__": "!=",
"__lt__": "<",
"__gt__": ">",
"__leq__": "<=",
"__geq__": ">=",
"__invert__": "~",
"__pos__": "+()",
"__neg__": "-()",
"__abs__": "abs",
"__len__": "len",
"__int__": "int",
"__float__": "float",
"__round__": "round",
"__enter__": "with:",
"__await__": "await",
"__contains__": "in",
"__getitem__": "[]",
"__setitem__": "[] = x",
"__delitem__": "del x",
"__call__": "()"
}
def colored(data, color):
"""Color a string with colorama and reset."""
if COLORIZE:
return "{color}{data}{reset}".format(color=color, data=data,
reset=colorama.Style.RESET_ALL)
else:
return data
def _map_dunders(thing, items):
"""Match dunder methods to the operator/construct they are related to."""
ops = []
for item in items:
if item in _MAPPING:
text = _MAPPING[item]
ops.append(text)
# Special case: Hash. Classes can have hashes, but not their instances,
# or hash might be None.
# list has a __hash__ - attr (None), even though it is not hashable
if "__hash__" in items and thing.__hash__:
ops.append("hash")
return ops
def _prune_data(thing, data):
"""Move items out of the Data row."""
remappable = ("method_descriptor", "builtin_function_or_method")
uninteresting = ("PytestTester", "_Feature")
for item in data["Data"][:]:
typename = type(getattr(thing, item)).__name__
if typename in remappable or typename in uninteresting:
if typename in remappable:
if inspect.ismodule(thing):
data["Functions"].append(item)
else:
data["Methods"].append(item)
data["Data"].remove(item)
def _prune_arguments_list(data, header):
"""Remove default information from list of arguments if all are unset."""
type_index = header.index("Type")
if all(entry[type_index] == "Any" for entry in data):
for entry in data:
del entry[type_index]
del header[type_index]
kind_index = header.index("Kind")
if all(entry[kind_index] == "Positional Or Keyword" for entry in data):
for entry in data:
del entry[kind_index]
del header[kind_index]
def explore_signature(thing, show_hidden=False):
"""Show information about a function and its parameters as a table."""
try:
signature = inspect.signature(thing)
except ValueError as exc:
print(colored("{!r} does not reveal its signature.".format(
thing), colorama.Fore.RED))
standard_builtins = (__import__, breakpoint, dir, getattr, iter,
max, min, next, print, vars)
if thing in standard_builtins:
print(colored("Check the documentation at "
"https://docs.python.org/3/library/functions.html#{}"
" .".format(thing.__name__), colorama.Fore.RED))
return
empty = inspect.Signature.empty
header = ["Argument", "Default", "Type", "Kind"]
data = []
return_type = signature.return_annotation
for name, parameter in signature.parameters.items():
# kind = parameter.kind.name.replace("_", " ").title()
kind = parameter.kind.description
default = parameter.default
default = repr(default) if default is not empty else "---"
annotation = parameter.annotation
annotation = annotation.__name__ if annotation is not empty else "Any"
data.append([name, default, annotation, kind])
# Coloring
for row in data:
if row[0] in ("self", "cls"):
row[0] = colored(row[0], colorama.Fore.YELLOW)
elif row[1] == "---" and not row[3].startswith("var"):
# Required argument, as no default is set.
# Variadic is allowed to be empty, though.
row[0] = colored(row[0], colorama.Fore.RED)
if not show_hidden:
_prune_arguments_list(data, header)
# Convert to Table
table = TABLETYPE([header] + data)
if not inspect.isclass(thing):
table.title = " Function {} ".format(thing.__name__)
if return_type is not inspect.Signature.empty:
table.title += "-> {} ".format(return_type.__name__)
else:
table.title = " Constructor "
description = pydoc.getdoc(thing).split(".")[0]
if description:
print(" Description:\n{}.".format(description))
if not len(data) == 0:
print(table.table)
else:
print("This Function takes no arguments.")
def explore_object(thing, show_hidden=False):
"""Show dir(thing) as a table to make it more human readable."""
items = set(dir(thing))
data = dict()
# Extract members, assign them to categories
data["Dunders"] = [
item for item in items if item.startswith("__") and item.endswith("__")]
items.difference_update(data["Dunders"])
data["Secrets"] = [
item for item in items if item.startswith("_")]
items.difference_update(data["Secrets"])
data["Constants"] = [
item for item in items if item.isupper()]
items.difference_update(data["Constants"])
data["Modules"] = [
item for item in items if inspect.ismodule(getattr(thing, item))]
items.difference_update(data["Modules"])
data["Methods"] = [
item for item in items if inspect.ismethod(getattr(thing, item))]
items.difference_update(data["Methods"])
data["Functions"] = [
item for item in items if inspect.isfunction(getattr(thing, item))]
items.difference_update(data["Functions"])
data["Classes"] = [
item for item in items if inspect.isclass(getattr(thing, item))]
items.difference_update(data["Classes"])
data["Data"] = list(items)
data["Ops"] = _map_dunders(thing, data["Dunders"])
_prune_data(thing, data)
# color operators
data["Ops"] = [colored(text, colorama.Fore.LIGHTBLUE_EX)
for text in data["Ops"]]
if not show_hidden:
hidden_names = ["Secrets", "Dunders"]
for name in hidden_names:
try:
del data[name]
except KeyError:
pass
# color types
newdata = []
for item in data["Data"]:
type_ = colored(type(getattr(thing, item)).__name__,
colorama.Fore.LIGHTCYAN_EX)
newdata.append("{}: {}".format(item, type_))
data["Data"] = newdata
# list-of-colums to list-of-rows
with_header = [
[key] + sorted(value) for key, value in data.items() if len(value) > 0]
rotated = [row for row in itertools.zip_longest(*with_header, fillvalue="")]
table = TABLETYPE(rotated)
try:
table.title = " {}: {} ".format(type(thing).__name__, thing.__name__)
except AttributeError:
table.title = " Class {} ".format(type(thing).__name__)
descr = pydoc.getdoc(thing).split(".")[0]
if descr:
print(" Description:\n{}.".format(descr))
print(table.table)
def explore(thing, show_hidden=False):
"""Show what you can do with an object.
Depending on the with explore_function or explore_object.
Note that built-in objects or functions might not be matched correctly.
"""
if (
inspect.isfunction(thing) or
inspect.ismethod(thing) or
inspect.isbuiltin(thing) # This can miss, e.g. print, namedtuple
):
explore_signature(thing, show_hidden=show_hidden)
elif inspect.isclass(thing):
explore_object(thing, show_hidden=show_hidden)
explore_signature(thing, show_hidden=show_hidden)
else:
explore_object(thing, show_hidden=show_hidden)
if __name__ == '__main__':
# explore(1)
# explore("")
# explore(list)
# explore(complex)
# def a_function(pos: int, /, both: float, untyped=4, *, kw_only: str = "blue") -> complex:
# """Kinds of arguments."""
# def variadic_function(*args, reverse=True, **kwargs):
# """Variadic arguments."""
# explore(a_function)
# explore(variadic_function)
# import requests
# explore(requests.Request)
import datetime
explore(datetime.datetime.now())
# import pathlib
# explore(pathlib)
import fractions
explore(fractions.Fraction)
# explore(open)
explore(property)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
20490,
31744,
2134,
13936,
8265,
13,
198,
198,
1026,
318,
3562,
284,
307,
517,
15942,
577,
621,
262,
26672,
3419,
12,
8818,
11,
981,
852,
517,
198,
5589,
529,
6... | 2.341911 | 4,197 |
import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
net = MaskNet(5)
# Summarize
#from torchinfo import summary
data_sizes = [
(2, 2),
(2, 1, 360),
(2, 3, 95, 160),
(2, 18),
]
#summary(net, data_sizes)
# Test run
for _ in range(3):
example(net, 'cpu')
if torch.cuda.is_available():
example(net, 'cuda:0')
else:
print('* CUDA not available.')
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
628,
220,
220,
220,
2010,
796,
18007,
7934,
7,
20,
8,
6... | 2.024896 | 241 |
#!python3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
if __name__ == "__main__":
from exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
else:
from diag.exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
configuration = {
"left_margin": 1000,
"right_margin": 1000,
"line_colors": ["xkcd:indigo", "xkcd:forest green", "xkcd:navy blue"],
}
def draw_exon_sequence_graph(
sequence_graph, y_exons=130, file_name=None, title=None, to_scale=True
):
"""Given a dictionary with two entries
- 'exons' an array of exon start and end offsets
- 'sequences' an array of exon sequences
draws a graph using different colors for each sequence.
The goal is to show different exon sequences formed from
one universal set of exons"""
_, ax = plt.subplots()
exons = sequence_graph["exons"]
if not to_scale:
unscaled_mapping, unscaled_exons = make_exons_unscaled(exons)
exons = unscaled_exons
patches = make_exon_shapes(exons, y_exons)
p = PatchCollection(patches)
sequence_height = 5
sequence_index = 0
draw_position = ["mid", "top", "bottom"]
for sequence in sequence_graph["sequences"]:
if not to_scale:
unscaled_sequence = [unscaled_mapping[x] for x in sequence]
sequence = unscaled_sequence
exon_pairs = zip(sequence, sequence[1:])
make_exon_exon_lines(
exon_pairs,
ax,
y_exons,
height=sequence_height,
draw_at=draw_position[sequence_index],
color=configuration["line_colors"][sequence_index],
)
sequence_height += 5
sequence_index += 1
if sequence_index >= len(configuration["line_colors"]):
sequence_index = 0
xmin = exons[0][0] - configuration["left_margin"]
xmax = exons[len(exons) - 1][1] + configuration["right_margin"]
if to_scale:
xtick_interval = (xmax - xmin) / 10
ax.set_xticks(np.arange(xmin, xmax, xtick_interval))
else:
ax.set_xticks([])
ax.set_yticks([y_exons])
if "id" in sequence_graph:
ax.set_yticklabels([sequence_graph["id"]])
ax.set_xbound(xmin, xmax)
ax.set_ybound(0, 200)
ax.add_collection(p)
if title is not None:
ax.set_title(title)
if file_name is None:
plt.show()
else:
plt.savefig(file_name)
if __name__ == "__main__":
# Contrived example using some exons from DDX11L1
draw_exon_sequence_graph(
{
"id": "gr1",
"exons": [
(12010, 12057),
(12179, 12227),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
"sequences": [
[(12010, 12057), (12179, 12227), (12613, 12619), (12975, 13052)],
[
(12010, 12057),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
],
},
file_name="out4.png",
title="Contrived example using some exons from DDX11L1",
to_scale=False,
)
| [
2,
0,
29412,
18,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2603,
29487,
8019,
13,
4033,
26448,
1330,
17106,
36307,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834... | 2.034672 | 1,644 |
#!/usr/bin/env python3
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
import scipy
import scipy.signal
import math
import imutils
import img_util
card_regions = loadCardRegions()
orig_image = loadImage(card_regions[13]['file'])
image = imutils.resize(orig_image, width=400)
height, width, depth = image.shape
blurred = cv2.blur(image,(3,3),0)
hue, sat, val = hsv_img(blurred)
hough_circles = cv2.HoughCircles(sat, cv2.HOUGH_GRADIENT, .5, 10,
param1=10,
param2=20,
minRadius=2,
maxRadius=15)
circles = np.round(hough_circles[0, :]).astype("int")
print("finished detecting circles: ", len(circles))
displayCircles(image, circles)
destroyWindowOnKey()
radius_mode = radiiMode(circles)
#hist(circles[:,2], 100)
# make a binary image in which each pixel indicates
# if it's within the radius of a circle
angleMode(circles)
sized_cs = circles[np.where(np.logical_and(circles[:,2]>=.8*radius_mode, circles[:,2]<=1.2*radius_mode))]
len(circles)
len(sized_cs)
displayCircles(sat, circles)
displayCircles(sat, sized_cs)
destroyWindowOnKey()
circle_bin = circleBinImage(sized_cs)
showImage(circle_bin)
lines = cv2.HoughLines(circle_bin,1,np.pi/180,7).reshape(-1, 2)
showImage(drawLines(image, lines))
line_angle_clusters = cluster_1d(lines[:,1] % (math.pi/2), bw=0.05)
cardinal_lines = lines_with_label_in(lines, line_angle_clusters.labels_, [0])
showImage(drawLines(image, cardinal_lines))
clustered_lines = cluster_2d(cardinal_lines, 0.02)
showImage(drawLines(image, clustered_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.02)
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
showImage(drawLines(image, clean_cardinal_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.1)
a_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
b_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
a_lines.sort(0)
b_lines.sort(0)
line_pairs = list(itertools.product(a_lines, b_lines))
intersections = [seg_intersect(*polar2seg(*a), *polar2seg(*b))for (a, b) in line_pairs]
intersection_splotches_r = [n_closest(image[:,:,0], inter.astype(np.uint8), d=2) for inter in intersections]
([np.mean(splotch) for splotch in intersection_splotches_r])
showImage(n_closest(image, intersections[20].astype(np.uint8), d=1))
showImage(drawLines(image, clustered_lines))
showImage(drawPoints(image, intersections))
print(lines)
print('done')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
17268,
... | 2.345395 | 1,216 |
import random
import numpy as np
import pybullet as p
import itertools
from rrc_simulation import visual_objects
from scipy.spatial.transform import Rotation as R
def apply_rotation_z(org_pos, theta):
'''
Apply 3 x 3 rotation matrix for rotation on xy-plane
'''
x_, y_, z_ = org_pos
x = x_ * np.cos(theta) - y_ * np.sin(theta)
y = x_ * np.sin(theta) + y_ * np.cos(theta)
z = z_
return x, y, z
def sample_from_normal_cube(cube_halfwidth, face=None, shrink_region=1.0, avoid_top=False,
sample_from_all_faces=False):
'''
sample from hypothetical cube that has no rotation and is located at (0, 0, 0)
NOTE: It does NOT sample point from the bottom face
It samples points with the following procedure:
1. choose one of the 5 faces (except the bottom)
2a. if the top face is chosen, just sample from there
2b. if a side face is chosen:
1. sample points from the front face
2. rotate the sampled points properly according to the selected face
'''
# 1. choose one of the faces:
if avoid_top:
faces = [0, 1, 2, 3]
elif sample_from_all_faces:
faces = [-2, -1, 0, 1, 2, 3]
else:
faces = [-1, 0, 1, 2, 3]
if face is None:
face = random.choice(faces)
if face not in faces:
raise KeyError(f'face {face} is not in the list of allowed faces: {faces}')
if face == -1:
# top
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = cube_halfwidth
elif face == -2:
# bottom (only allowed when sample_from_all_faces is enabled)
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = -cube_halfwidth
else:
# one of the side faces
# sample on the front xz-face
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
# apply rotation to the points according to its face direction
rot_theta = face * np.pi / 2
x, y, z = apply_rotation_z((x_, y_, z_), rot_theta)
return x, y, z
def sample_heuristic_points(cube_halfwidth=0.0325, shrink_region=1.0):
'''
Sample three points on the normal cube heurisitcally.
One point is sampled on a side face, and the other two points are sampled
from the face stading on the other side.
The two points are sampled in a way that they are point symmetric w.r.t.
the center of the face.
'''
min_dist = cube_halfwidth * 0.1
# center of the front face
x_, z_ = 0, 0
y_ = -cube_halfwidth
center_point = (x_, y_, z_)
# two points that are point symmetric w.r.t. the center of the face
x_, z_ = 0, 0
while np.sqrt(x_ ** 2 + z_ ** 2) < min_dist: # rejection sampling
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
x__, z__ = -x_, -z_ # point symetric w.r.t. the center point
y__ = y_
support_point1 = (x_, y_, z_)
support_point2 = (x__, y__, z__)
# sample two faces that are in parallel
faces = [0, 1, 2, 3]
face = random.choice(faces)
parallel_face = face + 2 % 4
# apply rotation to the points according to its face direction
sample_points = []
rot_theta = face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(center_point, rot_theta),
dtype=np.float))
for point in [support_point1, support_point2]:
rot_theta = parallel_face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(point, rot_theta),
dtype=np.float))
return sample_points
def sample_cube_surface_points(cube_halfwidth=0.0325,
shrink_region=0.8,
num_samples=3,
heuristic='pinch'):
'''
sample points on the surfaces of the cube except the one at the bottom.
NOTE: This function only works when the bottom face is fully touching on
the table.
Args:
cube_pos: Position (x, y, z)
cube_orientation: Orientation as quaternion (x, y, z, w)
cube_halfwidth: halfwidth of the cube (float)
shrink_region: shrink the sample region on each plane by the specified
coefficient (float)
num_samples: number of points to sample (int)
Returns:
List of sampled positions
'''
# Backward compatibility
if heuristic == 'pinch':
assert num_samples == 3, 'heuristic sampling only supports 3 samples'
norm_cube_samples = sample_heuristic_points(cube_halfwidth=cube_halfwidth,
shrink_region=shrink_region)
elif heuristic == 'center_of_three':
assert num_samples == 3
norm_cube_samples = sample_center_of_three(cube_halfwidth=cube_halfwidth)
elif heuristic == 'center_of_two':
assert num_samples == 3 #don't use this flag
norm_cube_samples = sample_center_of_two(cube_halfwidth=cube_halfwidth)
elif heuristic is None:
norm_cube_samples = [sample_from_normal_cube(cube_halfwidth,
shrink_region=shrink_region)
for _ in range(num_samples)]
else:
raise KeyError('Unrecognized heuristic value: {}. Use one of ["pinch", "center_of_three", None]'.format(heuristic))
# apply transformation
return np.array(norm_cube_samples)
# sample_points = apply_transform(cube_pos, cube_orientation,
# np.array(norm_cube_samples))
#
# return sample_points
class VisualMarkers:
'''Visualize spheres on the specified points'''
class VisualCubeOrientation:
'''visualize cube orientation by three cylinder'''
class CylinderMarker:
"""Visualize a cylinder."""
def __init__(
self, radius, length, position, orientation, color=(0, 1, 0, 0.5)):
"""
Create a cylinder marker for visualization
Args:
radius (float): radius of cylinder.
length (float): length of cylinder.
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
color: Color of the cube as a tuple (r, b, g, q)
"""
self.shape_id = p.createVisualShape(
shapeType=p.GEOM_CYLINDER,
radius=radius,
length=length,
rgbaColor=color
)
self.body_id = p.createMultiBody(
baseVisualShapeIndex=self.shape_id,
basePosition=position,
baseOrientation=orientation
)
def set_state(self, position, orientation):
"""Set pose of the marker.
Args:
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
"""
p.resetBasePositionAndOrientation(
self.body_id,
position,
orientation
)
import copy
from rrc_simulation.gym_wrapper.envs.cube_env import ActionType
class action_type_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
def repeat(sequence, num_repeat=3):
'''
[1,2,3] with num_repeat = 3 --> [1,1,1,2,2,2,3,3,3]
'''
return list(e for e in sequence for _ in range(num_repeat))
def ease_out(sequence, in_rep=1, out_rep=5):
'''
create "ease out" motion where an action is repeated for *out_rep* times at the end.
'''
in_seq_length = len(sequence[:-len(sequence) // 3])
out_seq_length = len(sequence[-len(sequence) // 3:])
x = [0, out_seq_length - 1]
rep = [in_rep, out_rep]
out_repeats = np.interp(list(range(out_seq_length)), x, rep).astype(int).tolist()
#in_repeats = np.ones(in_seq_length).astype(int).tolist()
in_repeats = np.ones(in_seq_length) * in_rep
in_repeats = in_repeats.astype(int).tolist()
repeats = in_repeats + out_repeats
assert len(repeats) == len(sequence)
seq = [repeat([e], n_rep) for e, n_rep in zip(sequence, repeats)]
seq = [y for x in seq for y in x] # flatten it
return seq
class frameskip_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
class keep_state:
'''
A Context Manager that preserves the state of the simulator
'''
| [
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
15065,
1616,
355,
279,
198,
11748,
340,
861,
10141,
198,
6738,
374,
6015,
62,
14323,
1741,
1330,
5874,
62,
48205,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
35636,
... | 2.273541 | 3,908 |