content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from schematics import Model
from schematics.types import ModelType, ListType, StringType, IntType, DateTimeType, BooleanType, FloatType, DictType, UnionType, MultiType
| [
6738,
3897,
6759,
873,
1330,
9104,
198,
6738,
3897,
6759,
873,
13,
19199,
1330,
9104,
6030,
11,
7343,
6030,
11,
10903,
6030,
11,
2558,
6030,
11,
7536,
7575,
6030,
11,
41146,
6030,
11,
48436,
6030,
11,
360,
713,
6030,
11,
4479,
6030,
... | 3.305085 | 59 |
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/6/15.
"""
from app.core.swagger_filed import StringPathFiled, StringQueryFiled, BodyField
__author__ = 'Allen7D'
type_id_in_path = StringPathFiled(
name='id', description='字典类型ID', enum=[1, 2, 3, 4, 5, 10, 100], default=1)
type_id_in_query = StringQueryFiled(
name='type_id', description='字典类型ID', enum=[1, 2, 3, 4, 5, 10, 100], default=1)
name = BodyField(name='name', type='string', description='字典名称', enum=['用户性别', '菜单状态'])
type = BodyField(name='type', type='string', description='字典类型', enum=['sys_user_sex', 'sys_show_hide'])
status = BodyField(name='status', type='boolean', description='状态(True正常, False停用)', enum=[True, False], default=True)
remark = BodyField(name='remark', type='string', description='备注', enum=[''])
| [
2,
4808,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
198,
37811,
198,
220,
15622,
416,
9659,
22,
35,
319,
12131,
14,
21,
14,
1314,
13,
198,
37811,
198,
6738,
598,
13,
7295,
13,
2032,
7928,
62,
69,
3902,
1330,
10903,
15235,
... | 2.267045 | 352 |
#!/usr/bin/python3
"""
Tune hyperparameters of a KeraSTS model on the given task, that is
train + evaluate the model with many different randomly samples config
settings.
Usage: tools/tuning.py MODEL TASK TRAINDATA VALDATA PARAM=VALUESET...
Example:
tools/tuning.py cnn anssel data/anssel/wang/train-all.csv data/anssel/wang/dev.csv \
"dropout=[1/2, 2/3, 3/4]" "inp_e_dropout=[1/2, 3/4, 4/5]" "l2reg=[1e-4, 1e-3, 1e-2]" \
"project=[True, True, False]" "cnnact=['tanh', 'relu']" \
"cdim={1: [0,0,1/2,1,2], 2: [0,0,1/2,1,2,0], 3: [0,0,1/2,1,2,0], 4: [0,0,1/2,1,2,0], 5: [0,0,1/2,1,2]},"
That is, the VALUESET is array of possible values for the given parameter;
in case the parameter takes a dict, it is a dict of key-valuesets.
TODO use spearmint or something for non-random sampling and estimation
of influence of different parameters on performance
"""
from __future__ import print_function
from __future__ import division
import importlib
import numpy as np
import sys
import time
import pysts.embedding as emb
from pysts.hyperparam import RandomSearch, hash_params
import models # importlib python3 compatibility requirement
import tasks
from train import config, train_and_eval
# Unused imports for evaluating commandline params
from keras.layers.recurrent import SimpleRNN, GRU, LSTM
from pysts.kerasts.objectives import ranknet, ranksvm, cicerons_1504
import pysts.kerasts.blocks as B
if __name__ == "__main__":
modelname, taskname, trainf, valf = sys.argv[1:5]
params = sys.argv[5:]
model_module = importlib.import_module('.'+modelname, 'models')
task_module = importlib.import_module('.'+taskname, 'tasks')
task = task_module.task()
# Preliminary config:
# (N.B. some conf values will be the sets, which is not something
# we can use directly, but we just assume whatever we use below
# directly wasn't specified as a tunable.)
conf, ps, h = config(model_module.config, task.config, params)
task.set_conf(conf)
# TODO configurable embedding class
if conf['embdim'] is not None:
print('GloVe')
task.emb = emb.GloVe(N=conf['embdim'])
print('Dataset')
if 'vocabf' in conf:
task.load_vocab(conf['vocabf'])
task.load_data(trainf, valf)
tuneargs = dict()
for p in params:
k, v = p.split('=')
v = eval(v)
if isinstance(v, list) or isinstance(v, dict):
tuneargs[k] = v
rs = RandomSearch(modelname+'_'+taskname+'_log.txt', **tuneargs)
for ps, h, pardict in rs():
# final config for this run
conf, ps, h = config(model_module.config, task.config, [])
for k, v in pardict.items():
conf[k] = v
ps, h = hash_params(conf)
task.set_conf(conf)
runid = '%s-%s-%x' % (taskname, modelname, h)
print()
print(' ...... %s .................... %s' % (runid, ps))
try:
model, res = train_and_eval(runid, model_module.prep_model, task, conf)
rs.report(ps, h, res[1])
except Exception as e:
print(e)
time.sleep(1)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
37811,
198,
51,
1726,
8718,
17143,
7307,
286,
257,
509,
8607,
2257,
50,
2746,
319,
262,
1813,
4876,
11,
326,
318,
198,
27432,
1343,
13446,
262,
2746,
351,
867,
1180,
15456,
8405,
4566,
... | 2.414219 | 1,294 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@Copyright © 2021 sanbo Inc. All rights reserved.
@Description:
@Version: 1.0
@Create: 2021-01-15 12:32:54
@author: sanbo
'''
from run_core import LanZouCloud
version = '1.2.0'
__all__ = ['utils', 't_variable', 'models', 'LanZouCloud', 'version', 'upload_2_github', 'file_2_base64']
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
31,
15269,
10673,
33448,
5336,
2127,
3457,
13,
1439,
2489,
10395,
13,
198,
31,
11828,
25,
220,
198... | 2.456522 | 138 |
import math
if __name__ == '__main__':
main()
| [
11748,
10688,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.454545 | 22 |
# -*- coding: utf-8 -*-
"""Module with main code for pyani application/package.
Python package version should match:
r"^__version__ = '(?P<version>[^']+)'$" for setup.py
"""
__version__ = "0.1.0-alpha"
name = "lpbio"
import os
import shlex
import shutil
from subprocess import check_output, CalledProcessError
class LPBioNotExecutableError(Exception):
"""Exception raised when expected executable is not executable"""
def is_exe(filename):
"""Returns True if path is to an executable file
Use shutil.which() instead?
"""
filename = shlex.quote(filename)
if os.path.isfile(filename) and os.access(filename, os.X_OK):
return True
else:
try:
exefile = shutil.which(filename)
except CalledProcessError:
raise LPBioNotExecutableError("{0} does not exist".format(filename))
return os.path.isfile(exefile) and os.access(exefile, os.X_OK)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
26796,
351,
1388,
2438,
329,
12972,
3216,
3586,
14,
26495,
13,
198,
198,
37906,
5301,
2196,
815,
2872,
25,
198,
198,
81,
1,
61,
834,
9641,
834,
796,
29513,
30,
... | 2.708824 | 340 |
# Vendor
import numpy as np
# Project
from util import to_one_hot
from gates.Gate import Gate
| [
2,
39896,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
4935,
198,
6738,
7736,
1330,
284,
62,
505,
62,
8940,
198,
6738,
17435,
13,
22628,
1330,
12816,
198
] | 3.275862 | 29 |
#!/usr/bin/python3
import sys
import csv
infile = sys.stdin
#next(infile)
#fuel column index 8
for line in infile:
line = line.strip()
l = line.split(',')
if(l[0]=='ball'):
#if(l[9]=='lbw' or l[9]=='bowled' or l[9]=='caught' or l[9]=='caught and bowled' or l[9]=='stumped' or l[9]=='hit wicket' or l[9]=='obstructing the field'):
runs =l[4]+','+l[6]+','+str(l[7])+','+str(l[8])
print(runs," 1")
'''else:
not_out=l[4]+','+l[6]+','+"0"
print(not_out," 1")'''
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
25064,
198,
11748,
269,
21370,
198,
259,
7753,
796,
25064,
13,
19282,
259,
198,
2,
19545,
7,
259,
7753,
8,
198,
198,
2,
25802,
5721,
6376,
807,
198,
1640,
1627,
287,
1167,
576,
... | 1.825342 | 292 |
from django.utils.text import slugify
def total_orgs_participated_year(data):
"""
Returns the total number of organisations participated
in the specified year. The year data is passed as parameter
to the function.
"""
return len(data.keys())
def total_projects_done_year(data):
"""
Returns the total number of projects completed in
a year. The data of the year is passed as parameter
to the function.
"""
no_of_projects = 0
for org in data.keys():
no_of_projects += len(data[org])
return no_of_projects
def get_org_name_from_slug(slug, DATA):
"""
Returns the name of the org from its slug
"""
for year in DATA.keys():
for org_name in DATA[year].keys():
if slug == slugify(org_name):
return org_name
def get_years_of_participation(org_name, DATA):
"""
Returns a list of the years the organisations
has taken part in the GSoC
"""
years_of_participation = []
for year in DATA.keys():
for org in DATA[year].keys():
if org == org_name:
years_of_participation.append(year)
return years_of_participation
def get_no_of_projects_each_year(org_name, DATA):
"""
Returns a dictionary of the year and number of projects
of a organisation and also returns the total number of
projects
"""
context = {}
total_projects = 0
for year in DATA.keys():
if DATA[year].get(org_name):
context[year] = len(DATA[year][org_name])
total_projects += len(DATA[year][org_name])
return context, total_projects | [
6738,
42625,
14208,
13,
26791,
13,
5239,
1330,
31065,
1958,
628,
198,
4299,
2472,
62,
2398,
82,
62,
3911,
40988,
62,
1941,
7,
7890,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
16409,
262,
2472,
1271,
286,
16435,
14888,
198,
... | 2.601911 | 628 |
import json
from fastapi import APIRouter
from ..worker import celery
router = APIRouter()
@router.get("/check-task/{id}")
| [
11748,
33918,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
198,
6738,
11485,
28816,
1330,
18725,
1924,
628,
198,
472,
353,
796,
3486,
4663,
39605,
3419,
628,
198,
31,
472,
353,
13,
1136,
7203,
14,
9122,
12,
35943,
14,
90,
312,
92,... | 2.886364 | 44 |
# AUTOGENERATED! DO NOT EDIT! File to edit: 10_1_Python_Phone_and_email_scrapper.ipynb (unless otherwise specified).
__all__ = ['phoneRegEx', 'emailRegEx', 'text', 'listOfAllPhoneNumbers', 'allPhoneNumbers', 'allEmailAddresses',
'results']
# Cell
#! python3
import re, pyperclip
#Create a Regex object for phone numbers
phoneRegEx = re.compile(r'''
# 415-555-0000, 555-0000, (415) 555-0000, 555-0000 ext 12345, ext. 1235, x12345
(# adding everything to be part of 1 large group to bypass the "findall" to tuple dynamic
((\d\d\d)|\((\d\d\d)\))? # area code (optional)
( |-) # first separator
(\d\d\d) # first 3 digits
- # second separator
(\d\d\d\d) # last 4 digits
(\s(ext\.?|x)\s)? # extension word part (optional)
(\d{2,5})? # extension number part (optional)
)# end of adding everything to be part of 1 large group to bypass the "findall" to tuple dynamic
''', re.VERBOSE)
#TO DO: Create a Regex object for phone email addresses
emailRegEx = re.compile(r'''
# some.+_thing@some.+_thingcom
[a-zA-Z0-9.+_]+ #name part
@ # @ symbol
[a-zA-Z0-9.+_]+\.[com|org|edu]+ # domain part
''', re.VERBOSE)
#extract the email /phonr off the clipboard
text = pyperclip.paste()
#: Extract email / phone from this text
listOfAllPhoneNumbers = []
allPhoneNumbers = phoneRegEx.findall(text)
for phoneNumber in allPhoneNumbers:
listOfAllPhoneNumbers.append(phoneNumber[0])
allEmailAddresses = emailRegEx.findall(text)
# DEBUG: print(listOfAllPhoneNumbers)
# DEBUG: print(allEmailAddresses)
#Copy extracted list back to the clipboard
results = '\n'.join(listOfAllPhoneNumbers) + '\n' + '\n'.join(allEmailAddresses)
# DEBUG: print(results)
pyperclip.copy(results) | [
2,
47044,
7730,
1677,
1137,
11617,
0,
8410,
5626,
48483,
0,
9220,
284,
4370,
25,
838,
62,
16,
62,
37906,
62,
6132,
62,
392,
62,
12888,
62,
1416,
430,
2848,
13,
541,
2047,
65,
357,
25252,
4306,
7368,
737,
198,
198,
834,
439,
834,
... | 2.423231 | 749 |
import requests
from bs4 import BeautifulSoup
import webbrowser
from tkinter import *
from PIL import ImageTk,Image
import re
root=Tk()
root.geometry('700x700')
root.config(bg="light green")
root.title('site scrapped')
job=StringVar()
city=StringVar()
img = ImageTk.PhotoImage(Image.open("shine.png"))
img_label=Label(image=img,padx=150 , pady=20)
img_label.pack()
L1 = Label(root, text="Enter Job/Skill",font=('Helvetica bold', 13),borderwidth=4)
L1.pack()
E1 = Entry(root, bd=5,textvariable=job,borderwidth=4)
E1.pack()
L2 = Label(root, text="Enter City",font=('Helvetica bold', 13),borderwidth=4)
L2.pack()
E2 = Entry(root, bd=5,textvariable=city)
E2.pack()
b1=Button(root,text="search",command=click,borderwidth=4)
b1.pack()
root.mainloop() | [
11748,
7007,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
3992,
40259,
198,
6738,
256,
74,
3849,
1330,
1635,
198,
6738,
350,
4146,
1330,
7412,
51,
74,
11,
5159,
198,
11748,
302,
198,
15763,
28,
51,
74,
3419,
198,
1576... | 2.628975 | 283 |
'Calculando a Iluminação'
print("ESTE PROGRAMA VISA CALCULAR A ILUMINAÇÃO NECESSÁRIA PARA SEU COMODO")
import math
comprimento = float(input("Digite o valor do comprimento = "))
altura = float(input("Digite o valor para a altura = "))
largura = float(input("Digite o valor da largura = "))
k=round(float((comprimento*largura)/((comprimento+largura)*altura)),2)
area=comprimento*largura
y=k
if k >4.5:
print("A letra da tabela 10 é = A ")
elif y > 3.5 and y <= 4.5:
y =y-0.1
k==y
print("A letra da tabela 10 para K =",k,"é : B")
elif y >2.75 and y <= 3.5:
y=y-0.1
k==y
print("A letra da tabela 10 para K =",k,"é : C")
elif y >2.25 and y <= 2.75:
y=y-0.1
k==y
print(" A letra da tabela 10 para K =",k,"é : D")
elif y >1.75 and y <= 2.25:
y=y-0.1
k==y
print(" A letra da tabela 10 para K =",k,"é : E")
elif y >1.35 and y <= 1.75:
y=y-0.1
k==y
print(" A letra da tabela 10 para k =",k,"é : F")
elif y >1.12 and y <= 1.35:
y=y-0.1
k==y
print(" A letra da tabela 10 para K =",k,"é : G")
elif y > 0.90 and y <= 1.12:
y=y-0.1
k==y
print(" A letra da tabela 10 para K =",k,"é : H")
elif y >= 0.70 and y <= 0.90:
print(" A letra da tabela 10 para K =",k,"é : I")
elif y <= 0.70:
y=y-0.1
k==y
print(" A letra da tabela 10 para K=",k,"é = J")
else:
print("Não existe na tabela 10")
idade=int(input("Digite a idade que frequentará o ambiente = "))
velocidadeDePrecisao=(input("\nDigite a Velocidade de precisão, se = sem importância, Importante, Critica = "))
VdP=velocidadeDePrecisao
percentual=int(input("Digite o percentual de refletância do ambiente sem o simbolo % = "))
luminancia=int(input("Na tabela 4 ou 6, verifique a quantidade de lux necessária = "))
Utilização=float(input("Digite o coeficiente de utilização = "))
depreciacao=float(input("Digite o fator de depreciação= "))
fatorManutenção=float(input("Digite o fator de manutenção = "))
lampada=float(input("Digite a potência em Watts da lâmpada que será utilizada = "))
lux=float(input("Digite a quantidade de lux da lãmpada = "))
if idade <40:
SomaIdade=-1
elif idade >= 40 and idade <= 55:
SomaIdade=0
else:
SomaIdade=1
if len(VdP) == 15:
SomaIdade =SomaIdade + -1
elif len(VdP) == 10:
SomaIdade =SomaIdade + 0
else:
SomaIdade =SomaIdade + 1
if percentual > 70:
SomaIdade =SomaIdade + -1
if percentual >=30 and percentual <=70:
SomaIdade =SomaIdade + 0
else:
SomaIdade =SomaIdade + 1
phi=round(float((luminancia*area)/(Utilização*depreciacao)),2)
theta=int(lampada*lux)
quantidadeLampadas=int(phi/theta)
fluxoL=round(float((luminancia*area)/(Utilização*depreciacao*fatorManutenção)),2)
print("A quantidade de lãmpadas necessárias pelo calculo de fato médio é = ",quantidadeLampadas,".")
print("A quantidade de lãmpadas necessárias pelo calculo de fluxo luminoso é = ",quantidadeLampadas,".")
| [
6,
9771,
3129,
25440,
257,
13778,
388,
1437,
16175,
28749,
6,
198,
4798,
7203,
6465,
36,
46805,
32,
569,
22312,
33290,
34,
37232,
317,
14639,
5883,
28893,
127,
229,
5746,
46,
41804,
7597,
127,
223,
49,
3539,
350,
24401,
7946,
52,
9440... | 2.199545 | 1,318 |
from django.db.models import Q
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from allauth.account.adapter import DefaultAccountAdapter
from allauth.socialaccount.adapter import DefaultSocialAccountAdapter
from gem.models import Invite
class StaffUserAdapter(StaffUserMixin, DefaultAccountAdapter):
""" give users an is_staff default of true """
class StaffUserSocialAdapter(StaffUserMixin, DefaultSocialAccountAdapter):
""" give users an is_staff default of true """
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
2448,
3411,
11,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
11299,
19199,
13,
27530,
1330,
14041,
6030,
198,... | 3.826389 | 144 |
#!/usr/bin/python3
from bs4 import BeautifulSoup
import requests
import pandas as pd
import sys
import re
banner_text = '''
██████╗ ██████╗ ██╗ ██╗██╗██████╗ ██╗ █████╗ ██╗ ██╗██╗ ██╗███████╗ ███████╗████████╗ █████╗ ████████╗██╗ ██████╗███████╗
██╔════╝██╔═══██╗██║ ██║██║██╔══██╗ ███║██╔══██╗ ██║ ██║██║ ██║██╔════╝ ██╔════╝╚══██╔══╝██╔══██╗╚══██╔══╝██║██╔════╝██╔════╝
██║ ██║ ██║██║ ██║██║██║ ██║█████╗╚██║╚██████║ ██║ ██║██║ ██║█████╗ ███████╗ ██║ ███████║ ██║ ██║██║ ███████╗
██║ ██║ ██║╚██╗ ██╔╝██║██║ ██║╚════╝ ██║ ╚═══██║ ██║ ██║╚██╗ ██╔╝██╔══╝ ╚════██║ ██║ ██╔══██║ ██║ ██║██║ ╚════██║
╚██████╗╚██████╔╝ ╚████╔╝ ██║██████╔╝ ██║ █████╔╝ ███████╗██║ ╚████╔╝ ███████╗ ███████║ ██║ ██║ ██║ ██║ ██║╚██████╗███████║
╚═════╝ ╚═════╝ ╚═══╝ ╚═╝╚═════╝ ╚═╝ ╚════╝ ╚══════╝╚═╝ ╚═══╝ ╚══════╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚══════╝
Script Written By r00tk1ll3r
DISCLAIMER:
ALL OF THESE STATICS FROM https://www.corona.help
AND THANKS TO Alex Dumitru WHO BUILT ABOVE MENTIONED WEB SITE
To See Useage just execute python3 COVID19.py
'''
print(banner_text)
coronaPage = requests.get("https://corona.help")
pageContent = BeautifulSoup(coronaPage.content,'html.parser')
if(len(sys.argv) == 2):
j = 0
statics = pageContent.find_all(class_="col-xl-2 col-md-4 col-sm-6")
if("totalInfected" in str(sys.argv)):
for i in statics:
totalInfected = i.find(class_="mb-0 line-ellipsis").get_text()
if(totalInfected == "Total people infected"):
value = statics[j].find(class_="text-bold-700").get_text()
print(totalInfected + " " +value)
j=j+1
elif("totalDeath" in str(sys.argv)):
for i in statics:
totalDeath = i.find(class_="mb-0 line-ellipsis").get_text()
if(totalDeath == "Total deaths"):
value = statics[j].find(class_="text-bold-700").get_text()
print(totalDeath + " " + value)
j=j+1
elif("totalRecovered" in str(sys.argv)):
for i in statics:
totalRecovered = i.find(class_="mb-0 line-ellipsis").get_text()
if(totalRecovered == "Total people recovered"):
value = statics[j].find(class_="text-bold-700").get_text()
print(totalRecovered + " " + value)
j=j+1
elif("todayInfected" in str(sys.argv)):
for i in statics:
todayInfected = i.find(class_="mb-0 line-ellipsis").get_text()
if(todayInfected == "People infected today"):
value = statics[j].find(class_="text-bold-700").get_text()
print(todayInfected+ " " + value)
j=j+1
elif("todayDeath" in str(sys.argv)):
for i in statics:
todayDeath = i.find(class_="mb-0 line-ellipsis").get_text()
if(todayDeath== "Deaths today"):
value = statics[j].find(class_="text-bold-700").get_text()
print(todayDeath + " " + value)
j=j+1
elif("todayRecovered" in str(sys.argv)):
for i in statics:
todayRecovered = i.find(class_="mb-0 line-ellipsis").get_text()
if(todayRecovered == "People recovered today"):
value = statics[j].find(class_="text-bold-700").get_text()
print(todayRecovered + " " + value)
j=j+1
elif(len(sys.argv) == 3):
k=0
if("COUNTRY" in str(sys.argv)):
countryStatics = pageContent.find(class_="table table-hover-animation mb-0")
tr = countryStatics.find_all("tr")
for j in tr:
div = j.find("div")
if div is not None:
if(re.sub('W+',' ', div.get_text() ).lower()==str(sys.argv[2])):
country = "| " +div.get_text() + " | "
infected = tr[k].find(class_="text-warning").get_text() + " | "
deaths = tr[k].find(class_="text-danger").get_text() + " | "
recovered = tr[k].find(class_="text-success").get_text() + " | "
active = tr[k].find_all("td")
active = active[4].get_text() + " | "
static = pd.DataFrame({
"| Country | ":[country],
"Infected | ":[infected],
"Deaths | ":[deaths],
"Recovered | ":[recovered],
"Active | ":[active]
})
print(static)
k=k+1
else:
useage = '''
USEAGE
------
TO GET TOTAL NUMBER OF PEOPLE WHO INFECTED BY COVID-19 : python3 COVID19.py totalInfected
TO GET TOTAL NUMBER DEATH BY COVID-19 : python3 COVID19.py totalDeath
TO GET TOTAL NUMBER OF PEOPLE WHO RECOVERED FROM COVID-19 : python3 COVID19.py totalRecovered
TO GET TOTAL NUMBER OF PEOPLE WHO INFECTED TODAY : python3 COVID19.py todayInfected
TO GET TOTAL NUMBER OF DEATH TODAY : python3 COVID19.py todayDeath
TO GET TOTAL NUMBER OF RECOVERED PEOPLE IN TODAY : python3 COVID19.py todayRecovered
TO GET SPECIFIC COUNTRY'S STATICS : python3 COVID19.py COUNTRY COUNTRY-NAME
IF YOU WANT TO GET CHINA'S STATICS : python3 COVID19.py COUNTRY "mainland china"
'''
print(useage) | [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
25064,
198,
11748,
302,
198,
198,
3820,
1008,
62,
5239,
796,
705,
706... | 1.759514 | 3,127 |
#!/usr/bin/env python3
import tensorflow as tf
from models.model import LogueModel
flags = tf.app.flags
flags.DEFINE_float('lr', 0.01, 'Learning rate.')
flags.DEFINE_integer('embed_size', 384, 'Word embedding size.')
flags.DEFINE_bool('no_josa', True, 'Filter Josa in korean sentence?')
flags.DEFINE_string('log_dir', 'run/logs/', 'Logging directory.')
flags.DEFINE_string('save_dir', 'run/checkpoints/', 'Model saving directory.')
flags.DEFINE_integer('save_interval', 15 * 60, 'Model save interval. (sec)')
flags.DEFINE_integer('summary_interval', 60, 'Summary saving interval. (sec)')
FLAGS = flags.FLAGS
if __name__ == '__main__':
tf.app.run() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
4981,
13,
19849,
1330,
406,
5119,
17633,
198,
198,
33152,
796,
48700,
13,
1324,
13,
33152,
198,
198,
33152,
13,
7206,
29940,
6... | 2.824034 | 233 |
import shutil, os, Evtx, mmap, contextlib, sys, xml, random_functions, colorama, Forensics_tool_redesigned_using_oops
from Evtx.Evtx import FileHeader
from Evtx.Views import evtx_file_xml_view
from xml.dom import minidom
#module2 - event manager module
| [
11748,
4423,
346,
11,
28686,
11,
4319,
17602,
11,
8085,
499,
11,
4732,
8019,
11,
25064,
11,
35555,
11,
4738,
62,
12543,
2733,
11,
3124,
1689,
11,
4558,
49242,
62,
25981,
62,
445,
274,
3916,
62,
3500,
62,
44860,
201,
198,
6738,
4319,... | 2.910112 | 89 |
""" Bluesteel Project tests """
from django.test import TestCase
from django.utils import timezone
from app.logic.bluesteel.models.BluesteelLayoutModel import BluesteelLayoutEntry
from app.logic.bluesteel.models.BluesteelProjectModel import BluesteelProjectEntry
from app.logic.gitrepo.models.GitProjectModel import GitProjectEntry
from app.logic.gitrepo.models.GitCommitModel import GitCommitEntry
from app.logic.gitrepo.models.GitUserModel import GitUserEntry
from app.logic.commandrepo.models.CommandGroupModel import CommandGroupEntry
from app.logic.commandrepo.models.CommandSetModel import CommandSetEntry
from app.logic.commandrepo.models.CommandModel import CommandEntry
| [
37811,
12391,
29872,
417,
4935,
5254,
37227,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
598,
13,
6404,
291,
13,
65,
2290,
29872,
417,
13,
27530,
13,
38676,... | 3.405 | 200 |
# dictionaries (aka Java Maps)
d = {'k':'v'}
print(d['k'])
d = dict(key = 'v')
print(d['key'])
# check contains key
print('key' in d)
print('v' not in d)
# any immutable type of key (no arrays)
d = {
(1, 2, 3): "tuple",
"abc": "string",
999: "num"
}
# update/new dict value
d['k'] = 'v'
# remove key
del d['k']
# k/v arr
for key in d.keys():
print(key)
for value in d.values():
print(value)
# items as tuple
for tu in d.items():
print(tu[0], tu[1])
# get key or default w/o error
print(d.get('null')) | [
2,
48589,
3166,
357,
8130,
7349,
20347,
8,
198,
198,
67,
796,
1391,
6,
74,
10354,
6,
85,
6,
92,
198,
4798,
7,
67,
17816,
74,
6,
12962,
198,
67,
796,
8633,
7,
2539,
796,
705,
85,
11537,
198,
4798,
7,
67,
17816,
2539,
6,
12962,
... | 2.156863 | 255 |
df['A']
# A
# ---------
# -0.613035
# -1.265520
# 0.763851
# -1.248425
# 2.105805
# 1.763502
# -0.781973
# 1.400853
# -0.746025
# -1.120648
#
# [100 rows x 1 column] | [
7568,
17816,
32,
20520,
198,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
317,
198,
2,
45337,
198,
2,
532,
15,
13,
5333,
1270,
2327,
198,
2,
532,
16,
13,
2075,
2816,
1238,
198,
2,
220,
657,
13,
4304,
2548,
4349,
198,
2,
532... | 1.657407 | 108 |
from PyQt5.QtWidgets import QWidget, QMessageBox
from GUI.Ui_PataList import Ui_PataList
# from parameter import Parameter
from PyQt5.QtSerialPort import QSerialPort
from src.uart import Uart
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
38300,
11,
1195,
12837,
14253,
198,
6738,
25757,
13,
52,
72,
62,
47,
1045,
8053,
1330,
471,
72,
62,
47,
1045,
8053,
198,
2,
422,
11507,
1330,
25139,
2357,
198,
6738,
... | 2.924242 | 66 |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud DNS API."""
from google.cloud.client import JSONClient
from google.cloud.dns.connection import Connection
from google.cloud.dns.zone import ManagedZone
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: string
:param project: the project which the client acts on behalf of. Will be
passed when creating a zone. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def quotas(self):
"""Return DNS quotas for the project associated with this client.
See:
https://cloud.google.com/dns/api/v1/projects/get
:rtype: mapping
:returns: keys for the mapping correspond to those of the ``quota``
sub-mapping of the project resource.
"""
path = '/projects/%s' % (self.project,)
resp = self.connection.api_request(method='GET', path=path)
return dict([(key, int(value))
for key, value in resp['quota'].items() if key != 'kind'])
def list_zones(self, max_results=None, page_token=None):
"""List zones for the project associated with this client.
See:
https://cloud.google.com/dns/api/v1/managedZones/list
:type max_results: int
:param max_results: maximum number of zones to return, If not
passed, defaults to a value set by the API.
:type page_token: string
:param page_token: opaque marker for the next "page" of zones. If
not passed, the API will return the first page of
zones.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.dns.zone.ManagedZone`, plus a
"next page token" string: if the token is not None,
indicates that more zones can be retrieved with another
call (pass that value as ``page_token``).
"""
params = {}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
path = '/projects/%s/managedZones' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
zones = [ManagedZone.from_api_repr(resource, self)
for resource in resp['managedZones']]
return zones, resp.get('nextPageToken')
def zone(self, name, dns_name=None, description=None):
"""Construct a zone bound to this client.
:type name: string
:param name: Name of the zone.
:type dns_name: string or :class:`NoneType`
:param dns_name: DNS name of the zone. If not passed, then calls
to :meth:`zone.create` will fail.
:type description: string or :class:`NoneType`
:param description: the description for the zone. If not passed,
defaults to the value of 'dns_name'.
:rtype: :class:`google.cloud.dns.zone.ManagedZone`
:returns: a new ``ManagedZone`` instance.
"""
return ManagedZone(name, dns_name, client=self,
description=description)
| [
2,
15069,
1853,
3012,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.471459 | 1,892 |
"""test_ridgepaths.py: tests for ridge_paths function."""
import itertools
import unittest
import torch
from scipy.spatial.distance import cosine
from gel.ridgepaths import ridge_paths
class TestRidgePathsEmptySupport(unittest.TestCase):
"""Test ridge_paths with empty support."""
class TestRidgePathsBase:
"""Base class for ridge_paths tests."""
lambdas = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
def test_against_naive(self):
"""Compare with directly obtained solution."""
# pylint: disable=no-member
summaries = ridge_paths(
self.X, self.y, self.support, self.lambdas, lambda _, b: b
)
# Compare each b with the naive solution.
I = torch.eye(self.X.shape[0], device=self.device, dtype=self.dtype)
Q = self.X @ self.X.t()
r = self.X @ self.y
for l, b in summaries.items():
b_naive = torch.inverse(Q + l * I) @ r
self.assertAlmostEqual(
cosine(b.cpu().numpy(), b_naive.cpu().numpy()), 0, places=2
)
for _device_name, _dtype, _m, _p in itertools.product(
["cpu", "cuda"], [torch.float32, torch.float64], [1, 10], [1, 5, 10, 20]
):
create_ridgepaths_test(_device_name, _dtype, _m, _p)
| [
37811,
9288,
62,
12818,
6978,
82,
13,
9078,
25,
5254,
329,
32525,
62,
6978,
82,
2163,
526,
15931,
198,
198,
11748,
340,
861,
10141,
198,
11748,
555,
715,
395,
198,
198,
11748,
28034,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
302... | 2.229682 | 566 |
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import os
try:
import pygrib
except ImportError:
warnings.warn("pygrib has not been imported")
from pygeobase.io_base import ImageBase, MultiTemporalImageBase
from pygeobase.object_base import Image
import pygeogrids
from pygeogrids import netcdf
from pynetcf.time_series import GriddedNcOrthoMultiTs
from datetime import timedelta
from library.era5.grid import ERA5025Cellgrid
from netCDF4 import Dataset
import matplotlib.pylab as plt
class ECMWF_ERA5_025Img(ImageBase):
"""
Class for reading one GLDAS Noah v2.1 nc file in 0.25° grid.
Parameters
----------
filename: string
filename of the GLDAS nc file
mode: string, optional
mode of opening the file, only 'r' is implemented at the moment
parameter : string or list, optional
one or list of parameters to read, see GLDAS v2.1 documentation for more information
Default : 'SoilMoi0_10cm_inst'
array_1D: boolean, optional
if set then the data is read into 1D arrays. Needed for some legacy code.
"""
class ECMWF_ERA5_025Ds(MultiTemporalImageBase):
"""
Class for reading GLDAS v2.1 images in nc format.
Parameters
----------
data_path : string
path to the nc files
parameter : string or list, optional
one or list of parameters to read, see GLDAS v2.1 documentation for more information
Default : 'SoilMoi0_10cm_inst'
array_1D: boolean, optional
if set then the data is read into 1D arrays. Needed for some legacy code.
"""
def tstamps_for_daterange(self, start_date, end_date):
"""
return timestamps for daterange,
Parameters
----------
start_date: datetime
start of date range
end_date: datetime
end of date range
Returns
-------
timestamps : list
list of datetime objects of each available image between
start_date and end_date
"""
img_offsets = np.array([timedelta(hours=0),
timedelta(hours=1),
timedelta(hours=2),
timedelta(hours=3),
timedelta(hours=4),
timedelta(hours=5),
timedelta(hours=6),
timedelta(hours=7),
timedelta(hours=8),
timedelta(hours=9),
timedelta(hours=10),
timedelta(hours=11),
timedelta(hours=12),
timedelta(hours=13),
timedelta(hours=14),
timedelta(hours=15),
timedelta(hours=16),
timedelta(hours=17),
timedelta(hours=18),
timedelta(hours=19),
timedelta(hours=20),
timedelta(hours=21),
timedelta(hours=22),
timedelta(hours=23)])
timestamps = []
diff = end_date - start_date
for i in range(diff.days + 1):
daily_dates = start_date + timedelta(days=i) + img_offsets
timestamps.extend(daily_dates.tolist())
return timestamps
| [
171,
119,
123,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
14601,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
12972,
70,
822,
198,
16341,
17267... | 1.922664 | 1,862 |
"""
This module defines the database classes.
"""
import json
import zlib
from typing import Any
import gridfs
from bson import ObjectId
from maggma.stores.aws import S3Store
from monty.dev import deprecated
from monty.json import MontyEncoder
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
)
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.io.vasp import Chgcar
from pymongo import ASCENDING, DESCENDING
from atomate.utils.database import CalcDb
from atomate.utils.utils import get_logger
__author__ = "Kiran Mathew"
__credits__ = "Anubhav Jain"
__email__ = "kmathew@lbl.gov"
logger = get_logger(__name__)
# If we use Maggmastores we will have to initialize a magmma store for each object typl
OBJ_NAMES = (
"dos",
"bandstructure",
"chgcar",
"locpot",
"aeccar0",
"aeccar1",
"aeccar2",
"elfcar",
)
class VaspCalcDb(CalcDb):
"""
Class to help manage database insertions of Vasp drones
"""
def build_indexes(self, indexes=None, background=True):
"""
Build the indexes.
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
TODO: make sure that the index building is sensible and check for
existing indexes.
"""
_indices = (
indexes
if indexes
else [
"formula_pretty",
"formula_anonymous",
"output.energy",
"output.energy_per_atom",
"dir_name",
]
)
self.collection.create_index("task_id", unique=True, background=background)
# build single field indexes
for i in _indices:
self.collection.create_index(i, background=background)
# build compound indexes
for formula in ("formula_pretty", "formula_anonymous"):
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
self.collection.create_index(
[
(formula, ASCENDING),
("output.energy_per_atom", DESCENDING),
("completed_at", DESCENDING),
],
background=background,
)
# TODO consider sensible index building for the maggma stores
def insert_task(self, task_doc, use_gridfs=False):
"""
Inserts a task document (e.g., as returned by Drone.assimilate()) into the database.
Handles putting DOS, band structure and charge density into GridFS as needed.
During testing, a percentage of runs on some clusters had corrupted AECCAR files
when even if everything else about the calculation looked OK.
So we do a quick check here and only record the AECCARs if they are valid
Args:
task_doc (dict): the task document
use_gridfs (bool): store the data matching OBJ_NAMES to gridfs.
if maggma_store_type is set (ex. "s3") this flag will be ignored
Returns:
(int) - task_id of inserted document
"""
big_data_to_store = {}
def extract_from_calcs_reversed(obj_key):
"""
Grab the data from calcs_reversed.0.obj_key and store on gridfs directly or some Maggma store
Args:
obj_key: Key of the data in calcs_reversed.0 to store
"""
calcs_r_data = task_doc["calcs_reversed"][0][obj_key]
# remove the big object from all calcs_reversed
# this can catch situations were the drone added the data to more than one calc.
for i_calcs in range(len(task_doc["calcs_reversed"])):
del task_doc["calcs_reversed"][i_calcs][obj_key]
return calcs_r_data
# drop the data from the task_document and keep them in a separate dictionary (big_data_to_store)
if (
self._maggma_store_type is not None or use_gridfs
) and "calcs_reversed" in task_doc:
for data_key in OBJ_NAMES:
if data_key in task_doc["calcs_reversed"][0]:
big_data_to_store[data_key] = extract_from_calcs_reversed(data_key)
# insert the task document
t_id = self.insert(task_doc)
if "calcs_reversed" in task_doc:
# upload the data to a particular location and store the reference to that location in the task database
for data_key, data_val in big_data_to_store.items():
fs_di_, compression_type_ = self.insert_object(
use_gridfs=use_gridfs,
d=data_val,
collection=f"{data_key}_fs",
task_id=t_id,
)
self.collection.update_one(
{"task_id": t_id},
{
"$set": {
f"calcs_reversed.0.{data_key}_compression": compression_type_
}
},
)
self.collection.update_one(
{"task_id": t_id},
{"$set": {f"calcs_reversed.0.{data_key}_fs_id": fs_di_}},
)
return t_id
def retrieve_task(self, task_id):
"""
Retrieves a task document and unpacks the band structure and DOS as dict
Args:
task_id: (int) task_id to retrieve
Returns:
(dict) complete task document with BS + DOS included
"""
task_doc = self.collection.find_one({"task_id": task_id})
calc = task_doc["calcs_reversed"][0]
if "bandstructure_fs_id" in calc:
bs = self.get_band_structure(task_id)
calc["bandstructure"] = bs.as_dict()
if "dos_fs_id" in calc:
dos = self.get_dos(task_id)
calc["dos"] = dos.as_dict()
if "chgcar_fs_id" in calc:
chgcar = self.get_chgcar(task_id)
calc["chgcar"] = chgcar
if "aeccar0_fs_id" in calc:
aeccar = self.get_aeccar(task_id)
calc["aeccar0"] = aeccar["aeccar0"]
calc["aeccar2"] = aeccar["aeccar2"]
return task_doc
def insert_object(self, use_gridfs, *args, **kwargs):
"""Insert the object into big object storage, try maggma_store if
it is availible, if not try storing directly to girdfs.
Args:
use_gridfs (bool): Whether to store on gridfs if maggma storage is not availible
Returns:
fs_id: The id of the stored object
compression_type: The compress method of the stored object
"""
if self._maggma_store_type is not None:
return self.insert_maggma_store(*args, **kwargs)
elif use_gridfs:
return self.insert_gridfs(*args, **kwargs)
def insert_gridfs(self, d, collection="fs", compress=True, oid=None, task_id=None):
"""
Insert the given document into GridFS.
Args:
d (dict): the document
collection (string): the GridFS collection name
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or ObjectId()
compression_type = None
# always perform the string conversion when inserting directly to gridfs
d = json.dumps(d, cls=MontyEncoder)
if compress:
d = zlib.compress(d.encode(), compress)
compression_type = "zlib"
fs = gridfs.GridFS(self.db, collection)
m_data = {"compression": compression_type}
if task_id:
m_data["task_id"] = task_id
# Putting task id in the metadata subdocument as per mongo specs:
# https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst#terms
fs_id = fs.put(d, _id=oid, metadata=m_data)
return fs_id, compression_type
def insert_maggma_store(
self, d: Any, collection: str, oid: ObjectId = None, task_id: Any = None
):
"""
Insert the given document into a Maggma store, first check if the store is already
Args:
data: the document to be stored
collection (string): the name prefix for the maggma store
compress (bool): Whether to compress the data or not
oid (ObjectId()): the _id of the file; if specified, it must not already exist in GridFS
task_id(int or str): the task_id to store into the gridfs metadata
Returns:
file id, the type of compression used.
"""
oid = oid or str(ObjectId())
compression_type = None
doc = {
"fs_id": oid,
"maggma_store_type": self.get_store(collection).__class__.__name__,
"compression": compression_type,
"data": d,
}
search_keys = [
"fs_id",
]
if task_id is not None:
search_keys.append("task_id")
doc["task_id"] = str(task_id)
elif isinstance(d, dict) and "task_id" in d:
search_keys.append("task_id")
doc["task_id"] = str(d["task_id"])
# make sure the store is availible
with self.get_store(collection) as store:
ping_ = store.index._collection.database.command("ping")
if ping_.get("ok", 0) != 1.0:
raise ConnectionError(
f"Not connected to the index store of {self.__name__}.maggma_store[{collection}]"
)
if isinstance(store, S3Store):
# TODO find some way to ping the aws service
# ping_ = self._maggma_stores[collection].s3_bucket._name
pass
if store.compress:
compression_type = "zlib"
doc["compression"] = "zlib"
store.update([doc], search_keys)
return oid, compression_type
def get_data_from_maggma_or_gridfs(self, task_id, key):
"""
look for a task, then the object of type key associated with that task
Returns:
The data stored on object storage, typically a dictionary
"""
m_task = self.collection.find_one({"task_id": task_id}, {"calcs_reversed": 1})
fs_id = m_task["calcs_reversed"][0][f"{key}_fs_id"]
obj_dict = None
if self._maggma_store_type is not None:
with self.get_store(f"{key}_fs") as store:
obj_dict = store.query_one({"fs_id": fs_id})["data"]
# if the object cannot be found then try using the grid_fs method
if obj_dict is not None:
return obj_dict
else:
fs = gridfs.GridFS(self.db, f"{key}_fs")
bs_json = zlib.decompress(fs.get(fs_id).read())
obj_dict = json.loads(bs_json.decode())
return obj_dict
def get_band_structure(self, task_id):
"""
Read the BS data into a PMG BandStructure or BandStructureSymmLine object
Args:
task_id(int or str): the task_id containing the data
Returns:
BandStructure or BandStructureSymmLine
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="bandstructure")
if obj_dict["@class"] == "BandStructure":
return BandStructure.from_dict(obj_dict)
elif obj_dict["@class"] == "BandStructureSymmLine":
return BandStructureSymmLine.from_dict(obj_dict)
else:
raise ValueError(
"Unknown class for band structure! {}".format(obj_dict["@class"])
)
def get_dos(self, task_id):
"""
Read the DOS data into a PMG DOS object
Args:
task_id(int or str): the task_id containing the data
Returns:
CompleteDos object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="dos")
return CompleteDos.from_dict(obj_dict)
@deprecated("No longer supported, use get_chgcar instead")
def get_chgcar(self, task_id):
"""
Read the CHGCAR data into a PMG Chgcar object
Args:
task_id(int or str): the task_id containing the data
Returns:
chgcar: Chgcar object
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="chgcar")
return Chgcar.from_dict(obj_dict)
def get_aeccar(self, task_id, check_valid=True):
"""
Read the AECCAR0 + AECCAR2 grid_fs data into a Chgcar object
Args:
task_id(int or str): the task_id containing the gridfs metadata
check_valid (bool): make sure that the aeccar is positive definite
Returns:
{"aeccar0" : Chgcar, "aeccar2" : Chgcar}: dict of Chgcar objects
"""
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar0")
aeccar0 = Chgcar.from_dict(obj_dict)
obj_dict = self.get_data_from_maggma_or_gridfs(task_id, key="aeccar2")
aeccar2 = Chgcar.from_dict(obj_dict)
if check_valid and (aeccar0.data["total"] + aeccar2.data["total"]).min() < 0:
ValueError(f"The AECCAR seems to be corrupted for task_id = {task_id}")
return {"aeccar0": aeccar0, "aeccar2": aeccar2}
# TODO: @albalu, @matk86, @computron - add BoltztrapCalcDB management here -computron, matk86
def put_file_in_gridfs(
file_path, db, collection_name=None, compress=False, compression_type=None
):
"""
Helper function to store a file in gridfs.
Args:
file_path (str):path to the files that should be saved.
db (CalcDb): the interface with the database.
collection_name (str): optionally modify the name of the collection
with respect to the one included in the db.
compress (bool): if True the file will be compressed with zlib.
compression_type (str): if file is already compressed defines the
compression type to be stored in the metadata.
Returns:
ObjectId: the mongodb id of the file that have been saved.
"""
with open(file_path, "rb") as f:
data = f.read()
if compress:
data = zlib.compress(data, compress)
compression_type = "zlib"
if collection_name is None:
collection_name = db.collection
fs = gridfs.GridFS(db.db, collection_name)
fs_id = fs.put(data, metadata={"compression": compression_type})
return fs_id
| [
37811,
198,
1212,
8265,
15738,
262,
6831,
6097,
13,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
1976,
8019,
198,
6738,
19720,
1330,
4377,
198,
198,
11748,
10706,
9501,
198,
6738,
275,
1559,
1330,
9515,
7390,
198,
6738,
2153,
70,
261... | 2.127497 | 7,059 |
from rest_framework import filters
from lego.apps.permissions.utils import get_permission_handler
class LegoPermissionFilter(filters.BaseFilterBackend):
"""
Use permissions to filter API responses.
"""
| [
6738,
1334,
62,
30604,
1330,
16628,
198,
198,
6738,
1232,
78,
13,
18211,
13,
525,
8481,
13,
26791,
1330,
651,
62,
525,
3411,
62,
30281,
628,
198,
4871,
33198,
5990,
3411,
22417,
7,
10379,
1010,
13,
14881,
22417,
7282,
437,
2599,
198,
... | 3.444444 | 63 |
"""Functions to get dates for various time ranges in RCA."""
import calendar
from datetime import date, timedelta
from typing import Tuple
from dateutil.relativedelta import relativedelta
def get_dates_for_last_30_days(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the last 30 days.
The first tuple contains t-61, t-31.
The second tuple contains t-30, t.
"""
rca_start_date = end_date - timedelta(days=30)
base_end_date = rca_start_date - timedelta(days=1)
base_start_date = base_end_date - timedelta(days=30)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_last_7_days(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the last 7 days.
The first tuple contains t-15, t-8.
The second tuple contains t-7, t.
"""
rca_start_date = end_date - timedelta(days=7)
base_end_date = rca_start_date - timedelta(days=1)
base_start_date = base_end_date - timedelta(days=7)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_previous_day(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the previous day.
The first tuple contains t-1, t-1.
The second tuple contains t, t.
"""
start_date = end_date - timedelta(days=1)
return (start_date, start_date), (end_date, end_date)
def get_dates_for_month_on_month(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the month on month.
The first tuple contains start of prev month, end of prev month.
The second tuple contains start of current month, t.
"""
base_start_date = end_date.replace(day=1) - relativedelta(months=1)
base_end_date = end_date.replace(day=1) - relativedelta(days=1)
rca_start_date = end_date.replace(day=1)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_month_to_date(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the month to date.
The first tuple contains start of prev month, date in prev month where date = t.
The second tuple contains start of current month, t.
"""
base_start_date = end_date.replace(day=1) - relativedelta(months=1)
try:
base_end_date = base_start_date.replace(day=end_date.day)
except ValueError as e:
if base_start_date.month == 2 and end_date.day >= 29:
if calendar.isleap(base_start_date.year):
base_end_date = base_start_date.replace(day=29)
else:
base_end_date = base_start_date.replace(day=28)
elif end_date.day == 31 and base_start_date.month in [4, 6, 9, 11]:
base_end_date = base_start_date.replace(day=30)
else:
raise e
rca_start_date = end_date.replace(day=1)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_week_on_week(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the week on week.
The first tuple contains start of prev week, end of prev week.
The second tuple contains start of current week, t.
"""
end_date_weekday = end_date.weekday()
base_start_date = end_date - timedelta(days=6 + end_date_weekday + 1)
base_end_date = base_start_date + timedelta(days=6)
rca_start_date = end_date - timedelta(days=end_date_weekday)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_week_to_date(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the week to date.
The first tuple contains start of prev week, date in prev week where date = t.
The second tuple contains start of current week, t.
"""
end_date_weekday = end_date.weekday()
base_start_date = end_date - timedelta(days=6 + end_date_weekday + 1)
base_end_date = base_start_date + timedelta(days=end_date_weekday)
rca_start_date = end_date - timedelta(days=end_date_weekday)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_quarter_on_quarter(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the quarter on quarter.
The first tuple contains start of prev quarter, end of prev quarter.
The second tuple contains start of current quarter, t.
"""
if end_date.month >= 10:
rca_start_month = 10
elif end_date.month >= 7:
rca_start_month = 7
elif end_date.month >= 4:
rca_start_month = 4
else:
rca_start_month = 1
rca_start_date = end_date.replace(month=rca_start_month, day=1)
base_start_date = rca_start_date - relativedelta(months=3)
base_end_date = rca_start_date - relativedelta(days=1)
return (base_start_date, base_end_date), (rca_start_date, end_date)
def get_dates_for_quarter_to_date(
end_date: date,
) -> Tuple[Tuple[date, date], Tuple[date, date]]:
"""Returns dates for running RCA on the quarter to date.
The first tuple contains start of prev quarter, date in prev quarter where date = t.
The second tuple contains start of current quarter, t.
"""
if end_date.month >= 10:
rca_start_month = 10
elif end_date.month >= 7:
rca_start_month = 7
elif end_date.month >= 4:
rca_start_month = 4
else:
rca_start_month = 1
rca_start_date = end_date.replace(month=rca_start_month, day=1)
base_start_date = rca_start_date - relativedelta(months=3)
base_end_date = base_start_date + (end_date - rca_start_date)
year = base_start_date.year + ((base_start_date.month + 3) // 12)
month = (base_start_date.month + 3) % 12
base_end_date = min(
base_end_date,
date(year, month, 1) - timedelta(days=1),
)
return (base_start_date, base_end_date), (rca_start_date, end_date)
| [
37811,
24629,
2733,
284,
651,
9667,
329,
2972,
640,
16069,
287,
371,
8141,
526,
15931,
198,
198,
11748,
11845,
198,
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
3128,
22602,
13,
241... | 2.505682 | 2,464 |
from snappi_ixnetwork.exceptions import SnappiIxnException
def test_Bad_Request_server_side(api, b2b_raw_config, utils):
"""
Configure a raw ethernet flow with,
- counter pattern for src and dst MAC address and ether type
Validate,
- Fetch the ethernet header config via restpy and validate
against expected
"""
flow = b2b_raw_config.flows[0]
count = 10
src = ["00:0C:29:E3:53:EA"]
dst = "00:0C:29:E3:53:F4"
step = "00:00:00:00:01:00"
eth_step = 2
flow.packet.ethernet()
eth = flow.packet[-1]
eth.src.increment.start = src
eth.src.increment.step = step
eth.src.increment.count = count
eth.dst.decrement.start = dst
eth.dst.decrement.step = step
eth.dst.decrement.count = count
eth.ether_type.increment.step = eth_step
eth.ether_type.increment.count = count
try:
api.set_config(b2b_raw_config)
assert False
except SnappiIxnException as err:
print(err)
assert err.status_code in [400, 500]
assert err.args[0] in [400, 500]
assert isinstance(err.message, list)
assert isinstance(err.args[1], list)
| [
6738,
3013,
1324,
72,
62,
844,
27349,
13,
1069,
11755,
1330,
5489,
1324,
72,
40,
87,
77,
16922,
628,
198,
4299,
1332,
62,
22069,
62,
18453,
62,
15388,
62,
1589,
7,
15042,
11,
275,
17,
65,
62,
1831,
62,
11250,
11,
3384,
4487,
2599,... | 2.286561 | 506 |
import math, copy
import numpy as np
| [
11748,
10688,
11,
4866,
198,
11748,
299,
32152,
355,
45941,
198
] | 3.363636 | 11 |
# Mamy daną tablicę A z n liczbami. Proszę zaproponować algorytm o złożoności O(n), który stwierdza,
# czy istnieje liczba x (tzw. lider A), która występuje w A na ponad połowie pozycji.
A = [12, 3, 5, 7, 7, 12, 7, 4, 10, 2, 7, 7, 7, 7, 7]
print(leader(A))
| [
2,
337,
14814,
46078,
128,
227,
7400,
677,
128,
247,
317,
1976,
299,
3476,
14969,
6277,
13,
27631,
89,
128,
247,
1976,
499,
1773,
261,
8455,
38325,
435,
70,
652,
17209,
267,
1976,
41615,
78,
129,
120,
29941,
129,
249,
979,
440,
7,
... | 1.793103 | 145 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unparse meta AST node into a dict"""
# pylint: disable=invalid-name
from synr import Transformer
class MetaUnparser(Transformer):
"""Python AST Visitor to unparse meta AST node into a dict"""
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 3.916667 | 252 |
import setuptools
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="svart",
version="1.0.0",
author="Siddharth Dushantha",
author_email="siddharth.dushantha@gmail.com",
description="Change between dark/light mode depending on the ambient light intensity",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sdushantha/svart",
packages=setuptools.find_packages(),
entry_points={"console_scripts": ["svart = svart.svart:main"]},
install_requires=["alsmodule-pkg"],
)
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
7203,
15675,
11682,
13,
9132,
1600,
366,
81,
1600,
21004,
2625,
40477,
12,
23,
4943,
355,
277,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
13,
961,
3419,
198,
198,
2617,
37623,
1... | 2.765487 | 226 |
from flask import Flask, request, url_for, redirect
import sqlite3
dbFile = 'task.db'
conn = None
if __name__ == '__main__':
query_db('delete from task')
print_tasks()
add_task('CMPUT410')
add_task('abs')
add_task('C10')
print_tasks() | [
6738,
42903,
1330,
46947,
11,
2581,
11,
19016,
62,
1640,
11,
18941,
198,
11748,
44161,
578,
18,
198,
198,
9945,
8979,
796,
705,
35943,
13,
9945,
6,
198,
37043,
796,
6045,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
... | 2.439252 | 107 |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import glob
import extinction
from astropy.cosmology import Planck13 as cosmo
#The limiting magnitude of your survey
MAG_LIM = 33.0
ZPT = 30.0
wvs = np.asarray([3600, 4760, 6215, 7545, 8700, 10150])
bands = 'ugrizY'
def shift_lc(df):
"""A code to compute the phase of transient data relative to
the time of trigger.
Parameters
----------
df : Pandas DataFrame
The full dataframe for each event, containing the columns
'MJD' and 'MJD_TRIGGER'.
Returns
-------
Pandas DataFrame
The same dataframe, but with the phase column 'T'.
"""
df['T'] = df['MJD'] - df['MJD_TRIGGER']
return df
def cut_lc(df, min=-30, max=150):
"""Short summary.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
min : float
The minimum phase (relative to trigger) at which to truncate photometry.
max : float
The maximum phase (relative to trigger) at which to truncate photometry.
Returns
-------
Pandas DataFrame
The same dataframe with truncated data.
"""
for idx, row in df.iterrows():
Times = row['T']
Flux = row['Flux']
Flux_Err = row['Flux_Err']
Filter = row['Filter']
MJD = row['MJD']
#truncate
ii = (Times > min) & (Times < max)
Flux = Flux[ii]
Flux_Err = Flux_Err[ii]
Times = Times[ii]
Filter = Filter[ii]
MJD = MJD[ii]
df.at[idx, 'T'] = Times
df.at[idx, 'Filter'] = Filter
df.at[idx, 'MJD'] = MJD
df.at[idx, 'Flux'] = Flux
df.at[idx, 'Flux_Err'] = Flux_Err
return df
def correct_time_dilation(df):
"""Short summary.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
Returns
-------
Pandas DataFrame
The same dataframe with undilated times.
"""
for idx, row in df.iterrows():
row['T'] = row['T'] / (1.+row.ZCMB)
return df
def correct_extinction(df, wvs):
"""Corrects photometry for milky way extinction (requires MWEBV in the pandas dataframe!).
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
wvs : array-like
Description of parameter `wvs`.
Returns
-------
type
Description of returned object.
"""
for idx, row in df.iterrows():
alams = extinction.fm07(wvs, row.MWEBV)
tempMag = np.array(row.Mag)
for i, alam in enumerate(alams):
if bands[i] in row.Filter:
ii = np.array(row.Filter)[0] == bands[i]
tempMag[ii] -= alam
df.at[idx, 'Mag'] = tempMag
return df
def calc_abs_mags(df, err_fill=1.0):
"""Converts apparent to absolute magnitudes and
fill in missing photometry.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
err_fill : float
The dummy uncertainty to report for filled-in values.
Returns
-------
Pandas DataFrame
The same dataframe with absolute magnitudes.
"""
df['Mag'] = [[np.nan]]*len(df)
df['Mag_Err'] = [[np.nan]]*len(df)
df['Abs_Lim_Mag'] = np.nan
for idx, row in df.iterrows():
k_correction = 2.5 * np.log10(1.+row.ZCMB)
dist = cosmo.luminosity_distance([row.ZCMB]).value[0] # returns dist in Mpc
abs_mags = -2.5 * np.log10(row.Flux) + ZPT - 5. * \
np.log10(dist*1e6/10.0) + k_correction
# Sketchy way to calculate error - update later
abs_mags_plus_err = -2.5 * np.log10(row.Flux + row.Flux_Err) + ZPT - 5. * \
np.log10(dist*1e6/10.0) + k_correction
abs_mags_err = np.abs(abs_mags_plus_err - abs_mags)
abs_lim_mag = MAG_LIM - 5.0 * np.log10(dist * 1e6 / 10.0) + \
k_correction
abs_mags_err[abs_mags != abs_mags] = err_fill
abs_mags[abs_mags != abs_mags] = abs_lim_mag
df.at[idx, 'Mag'] = abs_mags
df.at[idx, 'Mag_Err'] = abs_mags_err
df.at[idx, 'Abs_Lim_Mag'] = abs_lim_mag
return df
#def getGPLCs(df):
def stackInputs(df, params):
"""Some basic description
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
params : dict
Dictionary of all run params
Returns
-------
type
Description of returned object.
"""
LCs = {}
if params['GP']:
bands = params['bands'] #use all bands if we have gp-interpolation for them!
for idx, row in df.iterrows():
SN = row.CID
Time = row['T']
Mag = row.Mag
Mag_Err = row.Mag_Err
Filt = row.Filter
for i in np.arange(len(bands)):
band = bands[i]
bandTimes = Time[Filt==band]
bandMags = Mag[Filt==band]
bandErrs = Mag_Err[Filt==band]
if i==0:
matrix = [bandTimes]
else:
matrix.append([bandMags, bandErrs])
matrix = np.vstack(matrix)
LCs[row.CID] = matrix
else:
bands = params['band_stack']
#get max length of a matrix
maxLen = np.nanmax([len(x) for x in df['MJD'].values])
for idx, row in df.iterrows():
SN = row.CID
Time = row['T']
Mag = row.Mag
Mag_Err = row.Mag_Err
Filt = row.Filter
for band in bands:
matrix = np.zeros((maxLen, 3))
if np.nansum(Filt==band) == 0:
continue
bandTimes = Time[Filt==band]
bandMags = Mag[Filt==band]
bandErrs = Mag_Err[Filt==band]
padLen = maxLen - len(bandMags)
abs_mag_lim = df.at[idx, 'Abs_Lim_Mag'].astype(np.float64)
padR = int(padLen/2)
padF = padR
if padLen%2 == 1:
#pad more on the forward end than the back end
padF += 1
padArr_R = [abs_mag_lim]*padR
padErr_R = [1.0]*padR
padArr_F = [abs_mag_lim]*padF
padErr_F = [1.0]*padF
timePad_R = -np.arange(0,padR)*pad_cadence-pad_cadence + np.nanmin(bandTimes)
np.flip(timePad_R)
timePad_F = np.arange(0,padF)*pad_cadence + pad_cadence + np.nanmax(bandTimes)
#combine
stackTimes = np.concatenate([timePad_R, bandTimes, timePad_F])
stackMags = np.concatenate([padArr_R, bandMags, padArr_F])
stackErrs = np.concatenate([padErr_R, bandErrs, padErr_F])
matrix = np.vstack([stackTimes, stackMags, stackErrs])
LCs[row.CID] = matrix
return LCs
#def getGPLCs(df):
def stackGPInputs(df, bands='ugrizY'):
"""Some basic description
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
bands : type
Description of parameter `bands`.
Returns
-------
type
Description of returned object.
"""
LCs = {}
#get max length of a matrix
for idx, row in df.iterrows():
SN = row.CID
Time = row['GP_T']
Flux = row['GP_Flux']
Flux_Err = row['GP_Flux_Err']
Filt = row['GP_Filter']
#in the GP model, we're at the same time for everything
Time = Time[Filt == 'u'] #pick any band, doesn't matter
maxLen = len(Time)
for band in bands:
matrix = np.zeros((maxLen, len(bands)*2+1)) #ugrizY Flux, ugrizY err, time
bandFlux = Flux[Filt==band]
bandErrs = Flux_Err[Filt==band]
#get GP LCs
if bands == 'u':
matrix = np.vstack([stackTimes, bandFlux, bandErrs])
else:
matrix = np.vstack([matrix, bandFlux, bandErrs])
LCs[row.CID] = matrix
return LCs
def encode_classes(df):
"""Encodes the output classes as integers and returns a
dictionary of the encodings.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
Returns
-------
Pandas DataFrame
The same dataframe with encoded column Type_ID.
Pandas dict
Dictionary of encoded classes.
"""
df['Type_ID'] = df['Type'].astype('category').cat.codes
#some clunky trickery to get the mapping from classes to values
encoding_dict = df[['Type', 'Type_ID']].drop_duplicates(subset=['Type', 'Type_ID']).sort_values(by='Type_ID').reset_index(drop=True)['Type'].to_dict()
return df, encoding_dict
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
15095,
198,
11748,
21935,
198,
6738,
6468,
28338,
... | 2.065356 | 4,376 |
from django.contrib import admin
from .models import Weight
class WeightAdmin(admin.ModelAdmin):
""" Admin for weight. """
model = Weight
list_display = ['id', 'datestamp', 'weight', 'author']
admin.site.register(Weight, WeightAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
14331,
628,
198,
4871,
14331,
46787,
7,
28482,
13,
17633,
46787,
2599,
198,
220,
220,
220,
37227,
32053,
329,
3463,
13,
37227,
198,
220,
220,
220,
2746,
796,
... | 3.246753 | 77 |
#!/usr/bin/env python3
import json
with open("tms-stations.json", "r") as read_file:
data = json.load(read_file)
features = []
for sta in data["features"]:
if sta["properties"]["province"] == "Uusimaa":
features.append( sta )
data["features"] = features
with open('tms-stations-uusimaa.json', 'w') as data_file:
data = json.dump(data, data_file)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
33918,
198,
198,
4480,
1280,
7203,
83,
907,
12,
301,
602,
13,
17752,
1600,
366,
81,
4943,
355,
1100,
62,
7753,
25,
198,
220,
220,
220,
1366,
796,
33918,
13,
2220,
7,
961... | 2.358025 | 162 |
"""Conftest for script tests."""
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
| [
37811,
3103,
701,
395,
329,
4226,
5254,
526,
15931,
198,
198,
6738,
5254,
13,
5589,
3906,
13,
17585,
4798,
13,
1102,
701,
395,
1330,
17071,
62,
17585,
4798,
62,
12924,
5039,
220,
1303,
645,
20402,
25,
376,
21844,
198
] | 3.076923 | 39 |
##
# File: StashableBase.py
#
# Base class template that implements a common pattern for to backup and restore
# cache directories to stash storage.
#
# Updates:
# 19-Jul-2021 jdw add git push support
##
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
import logging
import os
import time
from rcsb.utils.io.StashUtil import StashUtil
logger = logging.getLogger(__name__)
class StashableBase(object):
"""Base class implementing a backup and restore methods for cache directories to/from stash storage."""
def __init__(self, cachePath, dirNameL):
"""Methods implementing backup and restore operations to and from stash storage. Remote stash
storage is defined using the standard configuration options. Primary and fallback stash servers
are supported. Git repositories are also supported as stash targets.
Args:
cachePath (str): path to directory containing cached directories
dirNameL (list): list of target directory names in the cache directory for backup and restore operations
"""
self.__cachePath = cachePath
self.__dirNameL = dirNameL
self.__stU = StashUtil(os.path.join(self.__cachePath, "stash"), self.__dirNameL[0])
#
def restore(self, cfgOb, configName, remotePrefix=None, useStash=True, useGit=False):
"""Restore the target cache directory from stash storage.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
useStash (bool, optional): use "stash" storage services. Defaults to True.
useGit (bool, optional): use a git repository service. Defaults to False.
Returns:
bool: True for success or False otherwise
"""
ok = False
if useStash and not ok:
ok = self.__restoreFromStash(cfgOb, configName, remotePrefix=remotePrefix)
#
if useGit and not ok:
ok = self.__restoreFromGit(cfgOb, configName, remotePrefix=remotePrefix)
return ok
def __restoreFromStash(self, cfgOb, configName, remotePrefix=None):
"""Restore the target cache directory from stash storage.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
Returns:
bool: True for success or False otherwise
"""
ok = False
try:
startTime = time.time()
url = cfgOb.get("STASH_SERVER_URL", sectionName=configName)
userName = cfgOb.get("_STASH_AUTH_USERNAME", sectionName=configName)
password = cfgOb.get("_STASH_AUTH_PASSWORD", sectionName=configName)
basePath = cfgOb.get("_STASH_SERVER_BASE_PATH", sectionName=configName)
ok = self.__fromStash(url, basePath, userName=userName, password=password, remoteStashPrefix=remotePrefix)
logger.info("Restored %r data file from stash (%r)", self.__dirNameL, ok)
if not ok:
urlFallBack = cfgOb.get("STASH_SERVER_FALLBACK_URL", sectionName=configName)
ok = self.__fromStash(urlFallBack, basePath, userName=userName, password=password, remoteStashPrefix=remotePrefix)
logger.info("Restored %r data file from fallback stash (%r)", self.__dirNameL, ok)
#
logger.info("Completed stash restore (%r) at %s (%.4f seconds)", ok, time.strftime("%Y %m %d %H:%M:%S", time.localtime()), time.time() - startTime)
except Exception as e:
logger.exception("Failing with %s", str(e))
#
return ok
def __restoreFromGit(self, cfgOb, configName, remotePrefix=None):
"""Restore from a partitioned git stash repository via https fetches.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
Returns:
bool: True for success or False otherwise
"""
ok = False
try:
startTime = time.time()
# accessToken = cfgOb.get("_STASH_GIT_ACCESS_TOKEN", sectionName=configName)
gitRawHost = cfgOb.get("_STASH_GIT_RAW_SERVER_HOST", sectionName=configName)
gitRepositoryPath = cfgOb.get("STASH_GIT_REPOSITORY_PATH", sectionName=configName)
gitBranch = cfgOb.get("STASH_GIT_REPOSITORY_BRANCH", sectionName=configName)
# maxMegaBytes = cfgOb.get("STASH_GIT_REPOSITORY_MAX_SIZE_MB", sectionName=configName)
#
ok = self.__stU.fetchPartitionedBundle(self.__cachePath, gitRepositoryPath, gitRawHost=gitRawHost, gitBranch=gitBranch, remoteStashPrefix=remotePrefix)
logger.info(
"Completed git restore for %r (%r) data at %s (%.4f seconds)",
self.__dirNameL,
ok,
time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
time.time() - startTime,
)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def backup(self, cfgOb, configName, remotePrefix=None, useStash=True, useGit=False):
"""Backup the target cache directory to remote stash and/or git storage.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
useStash (bool, optional): use "stash" storage services. Defaults to True.
useGit (bool, optional): use a git repository service. Defaults to False.
Returns:
bool: True for success or False otherwise
"""
ok = self.__stU.makeBundle(self.__cachePath, self.__dirNameL)
ok1 = True
if ok and useStash:
ok1 = self.__backupToStash(cfgOb, configName, remotePrefix=remotePrefix)
ok2 = True
if ok and useGit:
ok2 = self.__backupToGit(cfgOb, configName, remotePrefix=remotePrefix)
return ok and ok1 and ok2
def __backupToStash(self, cfgOb, configName, remotePrefix=None):
"""Backup the target cache directory to stash storage.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
Returns:
bool: True for success or False otherwise
"""
ok1 = ok2 = False
try:
startTime = time.time()
userName = cfgOb.get("_STASH_AUTH_USERNAME", sectionName=configName)
password = cfgOb.get("_STASH_AUTH_PASSWORD", sectionName=configName)
basePath = cfgOb.get("_STASH_SERVER_BASE_PATH", sectionName=configName)
url = cfgOb.get("STASH_SERVER_URL", sectionName=configName)
urlFallBack = cfgOb.get("STASH_SERVER_FALLBACK_URL", sectionName=configName)
ok1 = self.__toStash(url, basePath, userName=userName, password=password, remoteStashPrefix=remotePrefix)
ok2 = self.__toStash(urlFallBack, basePath, userName=userName, password=password, remoteStashPrefix=remotePrefix)
logger.info(
"Completed backup for %r data (%r/%r) at %s (%.4f seconds)",
self.__dirNameL,
ok1,
ok2,
time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
time.time() - startTime,
)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok1 & ok2
def __backupToGit(self, cfgOb, configName, remotePrefix=None):
"""Backup the to the git stash repository.
Args:
cfgOb (obj): configuration object (ConfigUtil())
configName (str): configuration section name
remotePrefix (str, optional): channel prefix. Defaults to None.
Returns:
bool: True for success or False otherwise
"""
ok = False
try:
startTime = time.time()
accessToken = cfgOb.get("_STASH_GIT_ACCESS_TOKEN", sectionName=configName)
gitHost = cfgOb.get("_STASH_GIT_SERVER_HOST", sectionName=configName)
gitRepositoryPath = cfgOb.get("STASH_GIT_REPOSITORY_PATH", sectionName=configName)
gitBranch = cfgOb.get("STASH_GIT_REPOSITORY_BRANCH", sectionName=configName)
maxMegaBytes = cfgOb.get("STASH_GIT_REPOSITORY_MAX_SIZE_MB", sectionName=configName)
#
ok = self.__stU.pushBundle(gitRepositoryPath, accessToken, gitHost=gitHost, gitBranch=gitBranch, remoteStashPrefix=remotePrefix, maxSizeMB=maxMegaBytes)
logger.info(
"Completed git backup for %r (%r) data at %s (%.4f seconds)",
self.__dirNameL,
ok,
time.strftime("%Y %m %d %H:%M:%S", time.localtime()),
time.time() - startTime,
)
except Exception as e:
logger.exception("Failing with %s", str(e))
return ok
def __toStash(self, url, stashRemoteDirPath, userName=None, password=None, remoteStashPrefix=None):
"""Copy tar and gzipped bundled cache data to remote server/location.
Args:
url (str): server URL (e.g. sftp://hostname.domain) None for local host
stashRemoteDirPath (str): path to target directory on remote server
userName (str, optional): server username. Defaults to None.
password (str, optional): server password. Defaults to None.
remoteStashPrefix (str, optional): channel prefix. Defaults to None.
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
if not url:
# stashing locally then do this under the cache path
stashRemoteDirPath = os.path.join(self.__cachePath, stashRemoteDirPath)
ok = self.__stU.storeBundle(url, stashRemoteDirPath, remoteStashPrefix=remoteStashPrefix, userName=userName, password=password)
except Exception as e:
logger.error("Failing with url %r stashDirPath %r: %s", url, stashRemoteDirPath, str(e))
return ok
def __fromStash(self, url, stashRemoteDirPath, userName=None, password=None, remoteStashPrefix=None):
"""Restore local cache from a tar and gzipped bundle to fetched from a remote server/location.
Args:
url (str): server URL (e.g. sftp://hostname.domain) None for local host
stashRemoteDirPath (str): path to target directory on remote server
userName (str, optional): server username. Defaults to None.
password (str, optional): server password. Defaults to None.
remoteStashPrefix (str, optional): channel prefix. Defaults to None.
Returns:
(bool): True for success or False otherwise
"""
ok = False
try:
if not url:
stashRemoteDirPath = os.path.join(self.__cachePath, stashRemoteDirPath)
ok = self.__stU.fetchBundle(self.__cachePath, url, stashRemoteDirPath, remoteStashPrefix=remoteStashPrefix, userName=userName, password=password)
except Exception as e:
logger.error("Failing with url %r stashDirPath %r: %s", url, stashRemoteDirPath, str(e))
return ok
| [
2235,
198,
2,
9220,
25,
520,
1077,
540,
14881,
13,
9078,
198,
2,
198,
2,
7308,
1398,
11055,
326,
23986,
257,
2219,
3912,
329,
284,
11559,
290,
11169,
198,
2,
12940,
29196,
284,
38305,
6143,
13,
198,
2,
198,
2,
28090,
25,
198,
2,
... | 2.317825 | 5,094 |
"""
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
from collections import defaultdict
from typing import Dict, Tuple
import torch
import torch.nn as nn
from torch.nn import functional as F
# -----------------------------------------------------------------------------
import os
if int(os.environ.get('USE_LIGHTNING', 0)):
import pytorch_lightning as pl
else:
import mingpt.fake_lightning as pl
# -----------------------------------------------------------------------------
from mingpt.constants import NEPTUNE_RUN
from loguru import logger as log
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
class Block(nn.Module):
""" an unassuming Transformer block """
class GPT(pl.LightningModule):
""" the full GPT language model, with a context size of block_size """
def init_blocks(self, attn_pdrop, block_size, embedding_dim, n_head, n_layer, resid_pdrop):
"""
Here we allow different in/out neuron counts in each transformer layer aka block. Transformers usually
keep the same number of neurons at every layer, but a la Perceiver IO, these types of things can be changed
like any MLP. This was originally motivated here by OOM where reducing the output of the first layer
reduces the amount of memory used in all subsequent layers that use the reduced layer width.
"""
blocks = []
approx_scale = [0.25] + [1] * (n_layer - 1) # TODO: Allow passing this in
assert len(approx_scale) == n_layer
out_dims = []
for l_i in range(n_layer):
embedding_dim = self.make_divisible_by_heads(embedding_dim, n_head)
out_embedding_dim = self.make_divisible_by_heads(int(approx_scale[l_i] * embedding_dim), n_head)
blocks.append(Block(embedding_dim, block_size, n_head, attn_pdrop, resid_pdrop, out_embedding_dim))
embedding_dim = out_embedding_dim
out_dims.append(out_embedding_dim)
return nn.Sequential(*blocks), approx_scale, out_dims
def _init_weights(self, module):
"""
Vanilla model initialization:
- all MatMul weights \in N(0, 0.02) and biases to zero
- all LayerNorm post-normalization scaling set to identity, so weight=1, bias=0
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": self.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=self.learning_rate, betas=self.betas)
self.optimizer = optimizer
return optimizer
| [
37811,
198,
38,
11571,
2746,
25,
198,
12,
262,
4238,
10717,
10874,
286,
257,
6087,
286,
11241,
21004,
290,
257,
45203,
21004,
198,
12,
262,
6174,
286,
340,
318,
257,
8187,
8379,
286,
3602,
16354,
7021,
198,
220,
220,
220,
532,
1123,
... | 2.594886 | 2,229 |
import random
import json
# IDEAS:
# - attribute to prioritize new person-group pairings instead of new person-person pairings
# - check that each 'personId' of 'apart' constraints contains at least one element
# Note: It is important to lift the ambiguation when 2 different persons share the same ID in history and person_ids.
# For example, if the ID is the surname, add another letter in the ID for the last name.
| [
11748,
4738,
198,
11748,
33918,
198,
198,
2,
33497,
1921,
25,
198,
2,
532,
11688,
284,
32980,
649,
1048,
12,
8094,
5166,
654,
2427,
286,
649,
1048,
12,
6259,
5166,
654,
198,
2,
532,
2198,
326,
1123,
705,
6259,
7390,
6,
286,
705,
4... | 4.048077 | 104 |
import os
import re
import sys
from fabric.api import run
from fabric.api import task
from fabric.api import sudo
from fabric.api import put
from fabric.api import env
from fabric.api import settings
from fabric.api import hide
from fabric.contrib import files
from fabric.utils import abort
from cloudy.sys.etc import sys_etc_git_commit
def sys_swap_configure(size='512'):
""" Ceates and install a swap file, given file size in MB - Ex (cmd:[Size-MB]) """
swap_file = '/swap/{}MiB.swap'.format(size)
sudo('mkdir -p /swap')
if not files.exists(swap_file):
sudo('fallocate -l {}m {}'.format(size, swap_file))
sudo('chmod 600 {}'.format(swap_file))
sudo('mkswap {}'.format(swap_file))
sudo('swapon {}'.format(swap_file))
sudo('echo "{} swap swap defaults 0 0" | sudo tee -a /etc/fstab'.format(swap_file))
sys_etc_git_commit('Added swap file ({})'.format(swap_file))
else:
print >> sys.stderr, 'Swap file ({}) Exists'.format(swap_file)
| [
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
198,
6738,
9664,
13,
15042,
1330,
1057,
198,
6738,
9664,
13,
15042,
1330,
4876,
198,
6738,
9664,
13,
15042,
1330,
21061,
198,
6738,
9664,
13,
15042,
1330,
1234,
198,
6738,
9664,
13,... | 2.622739 | 387 |
# -*- coding: utf-8 -*-
# time: 2019/4/22 下午4:33
from typing import Union
from flask import g
from flask import jsonify
from app.libs.status_code import status_codes
def error_response(code: int, msg: str = None):
"""
出错时的响应
:param code:
:param msg:
:return:
"""
if msg is None:
lan = g.get('lan', "zh")
msg = status_codes[code][1][lan]
result = dict(code=code, msg=msg)
return jsonify(result)
if __name__ == "__main__":
json_response(0)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
640,
25,
13130,
14,
19,
14,
1828,
220,
10310,
233,
39355,
230,
19,
25,
2091,
198,
6738,
19720,
1330,
4479,
198,
198,
6738,
42903,
1330,
308,
198,
6738,
42903,
1330,... | 2.196507 | 229 |
import pandas as pd
import numpy as np
from scipy.stats import mode, gaussian_kde
from scipy.optimize import minimize, shgo
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import KFold,StratifiedKFold
from sklearn.metrics import f1_score,accuracy_score,precision_score,recall_score,fbeta_score
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context("paper", font_scale=2)
from sklearn.model_selection import GridSearchCV
def cross_valid(model,x,folds,metric,verbose=True):
"""
This function does cross validation for general regressors.
model: Sklearn model or customized model with fit and predict methods;
x : Data as a numpy matrix containg with ***the last column as target***;
folds: Number of folds;
metrics : 'mae': mse,'rmse','rrmse'
verbose: Flag to print report over iterations;
returns: List with scores over the folders
"""
score=[]
kf = KFold(folds,shuffle=False,random_state=0)
i=0
for train_index, test_index in kf.split(x):
xtrain = x[train_index,:]
xtest = x[test_index,:]
model.fit(xtrain[:,:-1],xtrain[:,-1])
ypred = model.predict(xtest[:,:-1])
ytrue= xtest[:,-1]
if metric == 'mae':
score.append(mae(ytrue,ypred))
elif metric == 'mse':
score.append(mse(ytrue,ypred))
elif metric == 'rrmse':
score.append(rrmse(ytrue,ypred))
else:
score.append(rmse(xtest[:,-1],ypred))
if verbose:
print('-'*30)
print(f'\nFold {i+1} out of {folds}')
print(f'{metric}: {score[i]}')
i+=1
if verbose:
print(f'\n Overall Score:')
print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')
return score
def cross_valid_key(model,x,key,preds,target,metric,verbose=True):
"""
This function does cross validation for general regressors.
model: Sklearn model or customized model with fit and predict methods;
x : Data as a numpy matrix containg with ***the last column as target***;
key: Column name containing keys for spliting the folds;
metrics : 'mae': mse,'rmse','rrmse'
verbose: Flag to print report over iterations;
returns: List with scores over the folders
"""
score=[]
keys = x[key].unique().tolist()
for idx, item in enumerate([1,2,3,4,5]):
xtrain,xtest = split_camp(x,keys,0.2)
model.fit(xtrain[feat],xtrain[target])
ypred = model.predict(xtest[feat])
ytrue= xtest[target].values
if metric == 'mae':
score.append(mae(ytrue,ypred))
elif metric == 'mse':
score.append(mse(ytrue,ypred))
elif metric == 'rrmse':
score.append(rrmse(ytrue,ypred))
else:
score.append(rmse(xtest[target].tolist(),ypred))
if verbose:
print('-'*30)
print(f'\nFold {idx} out of 5')
print(f'Key {item}')
print(f'{metric}: {score[idx]}')
if verbose:
print(f'\n Overall Score:')
print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')
return score
# Adpated from https://machinelearningmastery.com/convert-time-series-supervised-learning-problem-python/
def dataTimeSeries(timesteps,df,predictors,target,dropnan,out=2,dropVars=True):
"""
This function transforms a dataframe in a timeseries for surpervised learning.
timesteps: Number of delays (i.e: timesteps =2 (t),(t-1),(t-2));
df: Dataframe;
predictors: List of columns in dataframe as features for the ML algorithm;
target: Target of the supervised learning;
dropnan: Flag to drop the NaN values after transforming the
out: Number of steps to forecast (i.e: out = 2 (t),(t+1));
dropVars= Leave only the Target of the last timestep on the resulting dataframe;
"""
series = series_to_supervised(df[predictors+[target]].copy(),timesteps,out,dropnan=dropnan)
if dropnan==False:
series.replace(pd.np.nan,0,inplace=True)
# Dropping other variables:
if dropVars:
index = list(np.arange(series.shape[1]-2,
series.shape[1]-len(predictors)-2,
-1))
labels = [item for idx,item in enumerate(series.columns)
if idx in index]
#print("Eliminando variáveis: {}".format(labels))
series.drop(labels,axis=1,inplace=True)
return series
class Cross_valid_clf():
"""
This class does cross validation for general classifiers.
model: Sklearn model or customized model with fit and predict methods;
X: array with values for features
y:array with values for target
folds: Number of folds;
metrics : accuracy,f1score, precision,recall,fbeta score;
stratified: Use stratified Kfold to keep the ratio of classes in all folds;
beta: Beta parameter for fbeta score metric;
verbose: Flag to print report over iterations;
returns: List with scores over the folders
"""
#score method
#f1score method
#precision score
#Recall score
#fbeta score
class Cross_valid_reg():
"""
This class does cross validation for general regressors.
model: Sklearn model or customized model with fit and predict methods;
x : features;
y: target
folds: Number of folds;
metrics : RMSE =root mean squared error; MAE= mean absolute error
stratified: Use stratified Kfold to keep the ratio of classes in all folds;
verbose: Flag to print report over iterations;
returns: List with scores over the folders
"""
#score method
#mae
#precision score
def feature_importance_plot(algorithm,X_train,y_train,of_type):
"""This function does the feature importance for any classifiers or regressors.
Parameters
----------------
algorithm: Algorithm which one wants to importance the relevant features
X_train: axis x of the train dataframe
y_train: axis y of the target dataframe
of_type: 'coef' or 'feat', depending on the algorithm.
Return
-----------------
Plot with feature importances
"""
if of_type == "coef":
algorithm.fit(X_train,y_train)
coef = pd.DataFrame(algorithm.coef_.ravel())
coef["coef"] = X_train.columns
plt.figure(figsize=(14,4))
ax1 = sns.barplot(coef["coef"],coef[0],palette="jet_r",
linewidth=2,edgecolor="k"*coef["coef"].nunique())
#ax1.set_facecolor("lightgrey")
ax1.axhline(0,color="k",linewidth=2)
plt.ylabel("coefficients")
plt.xlabel("features")
plt.xticks(rotation='vertical')
plt.title('FEATURE IMPORTANCES')
elif of_type == "feat":
algorithm.fit(X_train,y_train)
coef = pd.DataFrame(algorithm.feature_importances_)
coef["feat"] = X_train.columns
plt.figure(figsize=(14,4))
ax2 = sns.barplot(coef["feat"],coef[0],palette="jet_r",
linewidth=2,edgecolor="k"*coef["feat"].nunique())
#ax2.set_facecolor("lightgrey")
ax2.axhline(0,color="k",linewidth=2)
plt.ylabel("coefficients")
plt.xlabel("features")
plt.xticks(rotation='vertical')
plt.title('FEATURE IMPORTANCES')
| [
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
629,
541,
88,
13,
34242,
1330,
4235,
11,
31986,
31562,
62,
74,
2934,
201,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
17775,
11,
427,
2188... | 2.153241 | 3,687 |
import time
import cv2
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
cap.set(cv2.CAP_PROP_FPS, 40)
start = time.time()
frames = 400
for i in range(frames):
ret, img = cap.read()
print("Time for {0} frames: {1} seconds".format(frames, time.time() - start))
| [
11748,
640,
198,
11748,
269,
85,
17,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
198,
11128,
13,
2617,
7,
33967,
17,
13,
33177,
62,
4805,
3185,
62,
10913,
10067,
62,
54,
2389,
4221,
11,
33759,
8,
198,
11128,
1... | 2.292857 | 140 |
from slot import WeaponBase
from slot.w import agito_buffs
flame = Agito_Nothung
water = HDT2_Absolute_Aqua
wind = HDT2_Absolute_Tempest
light = HDT2_Absolute_Lightning
shadow = Agito_Yitian_Jian | [
6738,
10852,
1330,
13072,
14881,
198,
6738,
10852,
13,
86,
1330,
556,
10094,
62,
65,
18058,
198,
198,
49621,
796,
2449,
10094,
62,
45,
849,
2150,
198,
7050,
796,
5572,
51,
17,
62,
24849,
3552,
62,
32,
39566,
198,
7972,
796,
5572,
51... | 2.648649 | 74 |
from __future__ import (
annotations,
)
from abc import (
ABC,
abstractmethod,
)
from typing import (
TYPE_CHECKING,
AsyncIterator,
Awaitable,
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
MinosSetup,
)
from ..queries import (
_Condition,
_Ordering,
)
from ..transactions import (
TRANSACTION_CONTEXT_VAR,
TransactionEntry,
)
if TYPE_CHECKING:
from ..entities import (
RootEntity,
)
class SnapshotRepository(ABC, MinosSetup):
"""Base Snapshot class.
The snapshot provides a direct accessor to the ``RootEntity`` instances stored as events by the event repository
class.
"""
async def get(self, name: str, uuid: UUID, transaction: Optional[TransactionEntry] = None, **kwargs) -> RootEntity:
"""Get a ``RootEntity`` instance from its identifier.
:param name: Class name of the ``RootEntity``.
:param uuid: Identifier of the ``RootEntity``.
:param transaction: The transaction within the operation is performed. If not any value is provided, then the
transaction is extracted from the context var. If not any transaction is being scoped then the query is
performed to the global snapshot.
:param kwargs: Additional named arguments.
:return: The ``RootEntity`` instance.
"""
if transaction is None:
transaction = TRANSACTION_CONTEXT_VAR.get()
await self.synchronize(**kwargs)
return await self._get(name=name, uuid=uuid, transaction=transaction, **kwargs)
@abstractmethod
async def find(
self,
name: str,
condition: _Condition,
ordering: Optional[_Ordering] = None,
limit: Optional[int] = None,
streaming_mode: bool = False,
transaction: Optional[TransactionEntry] = None,
**kwargs,
) -> AsyncIterator[RootEntity]:
"""Find a collection of ``RootEntity`` instances based on a ``Condition``.
:param name: Class name of the ``RootEntity``.
:param condition: The condition that must be satisfied by the ``RootEntity`` instances.
:param ordering: Optional argument to return the instance with specific ordering strategy. The default behaviour
is to retrieve them without any order pattern.
:param limit: Optional argument to return only a subset of instances. The default behaviour is to return all the
instances that meet the given condition.
:param streaming_mode: If ``True`` return the values in streaming directly from the database (keep an open
database connection), otherwise preloads the full set of values on memory and then retrieves them.
:param transaction: The transaction within the operation is performed. If not any value is provided, then the
transaction is extracted from the context var. If not any transaction is being scoped then the query is
performed to the global snapshot.
:param kwargs: Additional named arguments.
:return: An asynchronous iterator that containing the ``RootEntity`` instances.
"""
if transaction is None:
transaction = TRANSACTION_CONTEXT_VAR.get()
await self.synchronize(**kwargs)
iterable = self._find(
name=name,
condition=condition,
ordering=ordering,
limit=limit,
streaming_mode=streaming_mode,
transaction=transaction,
**kwargs,
)
async for instance in iterable:
yield instance
@abstractmethod
def synchronize(self, **kwargs) -> Awaitable[None]:
"""Synchronize the snapshot to the latest available version.
:param kwargs: Additional named arguments.
:return: This method does not return anything.
"""
return self._synchronize(**kwargs)
@abstractmethod
| [
6738,
11593,
37443,
834,
1330,
357,
198,
220,
220,
220,
37647,
11,
198,
8,
198,
198,
6738,
450,
66,
1330,
357,
198,
220,
220,
220,
9738,
11,
198,
220,
220,
220,
12531,
24396,
11,
198,
8,
198,
6738,
19720,
1330,
357,
198,
220,
220,... | 2.774737 | 1,425 |
#!/usr/bin/env python2
import ptvsd
# Allow other computers to attach to ptvsd at this IP address and port, using the secret
# ptvsd.enable_attach("my_secret", address = ('0.0.0.0', 3000))
# Pause the program until a remote debugger is attached
# ptvsd.wait_for_attach()
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('bmh')
import argparse
print("""
Note: This example assumes that `name i` corresponds to `label i`
in `labels.csv`.
""")
parser = argparse.ArgumentParser()
parser.add_argument('workDir', type=str)
parser.add_argument('--names', type=str, nargs='+', required=True)
args = parser.parse_args()
y = pd.read_csv("{}/labels.csv".format(args.workDir), header=None).as_matrix()[:, 0]
X = pd.read_csv("{}/reps.csv".format(args.workDir), header=None).as_matrix()
target_names = np.array(args.names)
colors = cm.Dark2(np.linspace(0, 1, len(target_names)))
nc = None if len(X) < 50 else 50
X_pca = PCA(n_components=nc).fit_transform(X, X)
for p in [2,5,10,30,50,100]:
tsne = TSNE(n_components=2, init='random', random_state=0, perplexity=p)
X_r = tsne.fit_transform(X_pca)
plt.figure()
for c, i, target_name in zip(colors,
list(range(1, len(target_names) + 1)),
target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1],
c=c, label=target_name)
plt.legend()
out = "{}/tsne_{}.pdf".format(args.workDir, p)
plt.savefig(out)
print("Saved to: {}".format(out))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
11748,
42975,
14259,
67,
198,
198,
2,
22507,
584,
9061,
284,
10199,
284,
42975,
14259,
67,
379,
428,
6101,
2209,
290,
2493,
11,
1262,
262,
3200,
198,
2,
42975,
14259,
67,
13,
216... | 2.316298 | 724 |
import numpy as np
# Einsen
# float
E = np.ones((2, 3))
print(E)
print("---------------------------")
# int
F = np.ones((3, 4), dtype=int)
print(F)
print("---------------------------")
# Nullen
# float
Z = np.zeros((2, 4))
print(Z)
print("---------------------------")
# int
Z = np.zeros((3, 4), dtype=int)
print(Z)
print("---------------------------")
x = np.array([2, 5, 18, 14, 4])
E = np.ones_like(x)
print(E)
print("---------------------------")
Z = np.zeros_like(x)
print(Z) | [
11748,
299,
32152,
355,
45941,
198,
198,
2,
412,
1040,
268,
198,
2,
12178,
198,
36,
796,
45941,
13,
1952,
19510,
17,
11,
513,
4008,
198,
4798,
7,
36,
8,
198,
198,
4798,
7203,
22369,
6329,
4943,
198,
198,
2,
493,
198,
37,
796,
45... | 2.544041 | 193 |
import os
import shutil
import sys
from matplotlib import pyplot as plt
from scipy import ndimage
from scipy.ndimage import morphology
import numpy as np
import astropy
from astropy import units as u
from astropy.io import fits
from astropy.table import Table
from astropy.coordinates import SkyCoord
from spherical_geometry.polygon import SphericalPolygon
from PIL import Image, ImageDraw
from stwcs.wcsutil import HSTWCS
from .. import wcs_functions
# Default grid definition file
_fpath = os.path.abspath(os.path.dirname(__file__))
PCELL_PATH = os.path.join(os.path.dirname(_fpath), 'pars')
PCELL_FILENAME = 'allsky_cells.fits'
PCELL_STRLEN = 4
# SkyCell format: "skycell_p0000_x000y000"
SKYCELL_NAME_FMT = f"skycell_p{{:{str(PCELL_STRLEN).zfill(2)}d}}_x{{:03d}}y{{:03d}}"
SKYCELL_NXY = 50
SKYCELL_OVERLAP = 256
NDIMAGE_STRUCT2 = ndimage.generate_binary_structure(2, 2)
def get_sky_cells(visit_input, input_path=None, scale=None, cell_size=None):
"""Return all sky cells that overlap the exposures in the input.
Parameters
-----------
visit_input : str or list
Input specifying the exposures from a single visit; either
a poller output file or a simple list of exposure filenames.
Exposures in an input list are assumed to be in the current
working directory when running the code, unless `input_path`
has been provided which points to the location of the exposures
to be processed.
input_path : str, optional
Location of input exposures, if provided. If not provided,
location will be assumed to be the current working directory.
scale : float, optional
User-defined value for the pixel scale, in arcseconds/pixel,
of the sky cells and projection cells. If `None`, default
value from grid definition file will be used.
cell_size : float, optional
User-specified size, in degrees, for each projection cell.
If `None`, default value from grid definition file will be
used.
Returns
--------
sky_cells : list of objects
List of `SkyCell` objects for all sky cells which overlap the
exposures provided in `visit_input`.
"""
# Interpret input
if isinstance(visit_input, list):
expnames = visit_input.copy()
else:
expnames = Table.read(visit_input, format='ascii.fast_no_header')[0]
# Check that exposures are located in current working directory
if not os.path.exists(expnames[0]):
if not input_path:
msg = "No exposures found in cwd(). Please specify path to files!"
raise (ValueError, msg)
bad_files = 0
for file in expnames:
fullfile = os.path.join(input_path, file)
if not os.path.exists(fullfile):
bad_files.append(fullfile)
print("Could not find {}".format(fullfile))
bad_files += 1
continue
shutil.copy(fullfile, file)
if bad_files:
msg = "Could not find {} specified input files".format(bad_files)
raise (ValueError, msg)
# Check that all exposures have up-to-date WCS solutions
# This will weed out exposures which were not processed by the pipeline
# such as those with EXPTIME==0
for filename in expnames:
with fits.open(filename) as fimg:
print("Checking {}".format(filename))
if 'wcsname' not in fimg[1].header:
expnames.remove(filename)
if len(expnames) == 0:
print("No valid exposures to define sky cells")
return None
# Initialize all sky tessellation object definitions
# This includes setting the pixel scale.
sky_grid = GridDefs(scale=scale, cell_size=cell_size)
# build reference wcs for combined footprint of all input exposures
meta_wcs = wcs_functions.make_mosaic_wcs(expnames, scale=sky_grid.scale)
# create footprint on the sky (as a tangent plane array) for all input exposures using meta_wcs
footprint = SkyFootprint(meta_wcs)
footprint.build(expnames)
# Use this footprint to identify overlapping sky cells
sky_cells = sky_grid.get_sky_cells(footprint)
return sky_cells
#
# Utility functions used in generating or supporting the grid definitions
#
def update_grid_defs(pc_size=5.0, output=None, grid_file=None):
"""Computes updated values for bands and projection cells.
Parameters
-----------
pc_size : float
Size of each side of the projection cell or width of each band on
the sky in degrees. If `None`, the default value will be read in
from the `PC_SIZE` keyword from the PRIMARY header of the default
grid definitions file.
output : str, optional
Name of output grid definition file. If `None`, it will write out
the updated table with the original filename in the current directory
overwriting any previous file.
"""
# read in default grid definition file
if not grid_file:
grid_file = os.path.join(PCELL_PATH, PCELL_FILENAME)
grid_defs = fits.open(grid_file)
grid = grid_defs[1].data
pc_scale = grid_defs[0].header['PC_SCALE']
if not pc_size:
pc_size = grid_defs[0].header['PC_SIZE']
pos_angle = [0.0 * u.deg, 90.0 * u.deg, 180.0 * u.deg, 270.0 * u.deg]
pc_edge = pc_size / 2.0 * u.deg
# Compute size on the sky of the first cell in each band
# Compute edges using:
for nband in range(len(grid)):
c1 = SkyCoord(ra=0. * u.deg, dec=grid[nband]['DEC'] * u.deg, frame='icrs')
pcell = ProjectionCell(index=0, band=grid[nband], scale=pc_scale)
# Compute offset to center of each edge, +/- RA and +/- Dec
c1_edges = [c1.directional_offset_by(p, pc_edge) for p in pos_angle]
# Convert offset to edge center into distance in pixels from center of cell
c1_pixels = [np.abs(pcell.wcs.world_to_pixel_values(e.ra, e.dec)) for e in c1_edges]
# Compute overall size of cell in pixels
naxis1, naxis2 = (np.array(c1_pixels).sum(axis=0) + 1).astype(np.int)
# apply new definition to cell WCS
pcell.wcs.wcs.crpix = [naxis1 / 2. + 0.5, naxis2 / 2. + 0.5]
pcell.wcs.naxis1 = naxis1
pcell.wcs.naxis2 = naxis2
# Determine extent of band
min_dec, max_dec = compute_band_height(pcell.wcs)
# Account for wrapping over each pole
if nband == 0:
min_dec = grid[nband]['DEC']
if nband == len(grid) - 1:
max_dec = grid[nband]['DEC']
# Update definition for this band in table with newly computed values
grid[nband]['DEC_MIN'] = min_dec
grid[nband]['DEC_MAX'] = max_dec
grid[nband]['XCELL'] = naxis1 # supposed to be sky cell size
grid[nband]['YCELL'] = naxis2 # supposed to be sky cell size
# write out updated grid definitions file
if not output:
output = PCELL_FILENAME
# write to path included in 'output', defaulting to current working dir
grid_defs.writeto(output, overwrite=True)
def compute_band_height(wcs):
"""Compute size in pixels of tangent plane"""
edges = []
edges += [[0, i] for i in range(wcs.naxis2)]
edges += [[wcs.naxis1, i] for i in range(wcs.naxis2)]
edges += [[i, 0] for i in range(wcs.naxis1)]
edges += [[i, wcs.naxis2] for i in range(wcs.naxis1)]
edge_sky = wcs.pixel_to_world_values(edges)
return min(edge_sky[:, 1]), max(edge_sky[:, 1])
#
#
# CKmeans implementation from github/llimllib/ckmeans
#
#
import numpy as np
##
## HELPER CODE FOR TESTS
##
# partition recipe modified from
# http://wordaligned.org/articles/partitioning-with-python
from itertools import chain, combinations
def sliceable(xs):
'''Return a sliceable version of the iterable xs.'''
try:
xs[:0]
return xs
except TypeError:
return tuple(xs)
# given a partition, return the sum of the squared distances of each part
# brute force the correct answer by testing every partition.
# if __name__ == "__main__":
| [
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
25064,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
1330,
299,
67,
9060,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
46320,
198,
11748... | 2.591185 | 3,131 |
#if
answer=input("Do you need express shipping?(yes/no) :")
if answer.lower()==("yes" or "y"):
print("That will be an extra $50")
print("Have a nice day")
| [
2,
361,
198,
41484,
28,
15414,
7203,
5211,
345,
761,
4911,
8440,
30,
7,
8505,
14,
3919,
8,
1058,
4943,
198,
361,
3280,
13,
21037,
3419,
855,
7203,
8505,
1,
393,
366,
88,
1,
2599,
198,
220,
220,
220,
3601,
7203,
2504,
481,
220,
3... | 2.728814 | 59 |
from constants import *
if __name__ == "__main__":
# host = input("Enter the host of server: ")
# port = input("Enter the port of server: ")
client = Client('localhost', 1234)
| [
6738,
38491,
1330,
1635,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1303,
2583,
796,
5128,
7203,
17469,
262,
2583,
286,
4382,
25,
366,
8,
198,
220,
220,
220,
1303,
2493,
796,
5128,
72... | 2.938462 | 65 |
# -*- coding: utf-8 -*-
# Copyright 2015 CMU
# Author: Yihan Wang <wangff9@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from monasca.microservice import thresholding_processor as processor
from monasca.openstack.common import log
from monasca import tests
import time
LOG = log.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1853,
16477,
52,
198,
2,
6434,
25,
575,
4449,
272,
15233,
1279,
47562,
487,
24,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
... | 3.560345 | 232 |
from flask import url_for, redirect, Blueprint, render_template, request, current_app
from .model_consignacion import Consignacion
from .form_Consignacion import ConsignacionForm
from flask_login import login_required, login_user, logout_user, current_user
from werkzeug.exceptions import NotFound
BP_NM = 'consignaciones'
consignaciones = Blueprint(BP_NM, __name__, template_folder='templates')
@consignaciones.route('/')
@login_required
@consignaciones.route('/new', methods=['GET', 'POST'], defaults={'usuario_id': None})
@login_required
'''
@consignaciones.route('/consignacion/<int:consignacion_id>')
@login_required
def Consignacion(consignacion_id):
Consignacion = Consignacion.get_by_id(Consignacion_id)
if consignacion is None:
raise NotFound(consignacion_id)
return render_template('consignacion.html', consignacion=consignacion, seccion='consignaciones')
@consignaciones.route('/<int:consignacion_id>/edit', methods=['GET', 'POST'])
@login_required
def consignacion_edit(consignacion_id=None):
consignacion = Consignacion.get_by_id(consignacion_id)
if consignacion is None:
raise NotFound(consignacion_id)
form = ConsignacionForm()
if form.validate_on_submit():
consignacion.nombre=form.nombre.data
consignacion.save()
return redirect(url_for('consignaciones.consignaciones_index'))
form.nombre.data=consignacion.nombre
return render_template('form_consignacion.html', form=form, seccion='consignaciones')
''' | [
6738,
42903,
1330,
19016,
62,
1640,
11,
18941,
11,
39932,
11,
8543,
62,
28243,
11,
2581,
11,
1459,
62,
1324,
198,
6738,
764,
19849,
62,
5936,
570,
49443,
1330,
3515,
570,
49443,
198,
6738,
764,
687,
62,
9444,
570,
49443,
1330,
3515,
... | 2.694494 | 563 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Parametric sphere
See http://matplotlib.org/examples/mplot3d/surface3d_demo2.html
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# Build datas ###############
u = np.linspace(0, 2.*np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = 10 * np.outer(np.cos(u), np.sin(v))
y = 10 * np.outer(np.sin(u), np.sin(v))
z = 10 * np.outer(np.ones(np.size(u)), np.cos(v))
# Plot data #################
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(x, y, z, rstride=4, cstride=4)
ax.set_title("Parametric Sphere")
# Save file and plot ########
plt.savefig("parametric_sphere.png")
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
22973,
19482,
16558,
198,
198,
6214,
2638,
1378,
6759,
29487,
8019,
13,
2398,
14,
1069,
12629,
14,... | 2.312903 | 310 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from base64 import urlsafe_b64decode
from itertools import chain
from pyasn1.codec.der.encoder import encode as der_encode
from pyasn1.codec.native.decoder import decode as nat_decode
from cryptoconditions.condition import Condition
from cryptoconditions.crypto import base64_add_padding
from cryptoconditions.fulfillment import Fulfillment
from cryptoconditions.exceptions import MissingDataError
from cryptoconditions.types.base_sha256 import BaseSha256
from cryptoconditions.schemas.fingerprint import PrefixFingerprintContents
CONDITION = 'condition'
FULFILLMENT = 'fulfillment'
class PrefixSha256(BaseSha256):
""" """
TYPE_ID = 1
TYPE_NAME = 'prefix-sha-256'
TYPE_ASN1 = 'prefixSha256'
TYPE_ASN1_CONDITION = 'prefixSha256Condition'
TYPE_ASN1_FULFILLMENT = 'prefixSha256Fulfillment'
TYPE_CATEGORY = 'compound'
CONSTANT_BASE_COST = 16384
CONSTANT_COST_DIVISOR = 256
def __init__(self):
"""
PREFIX-SHA-256: Prefix condition using SHA-256.
A prefix condition will prepend a static prefix to the message
before passing the prefixed message on to a single subcondition.
You can use prefix conditions to effectively narrow the scope of
a public key or set of public keys. Simply take the condition
representing the public key and place it as a subcondition in a
prefix condition. Now any message passed to the subcondition
will be prepended with a prefix.
Prefix conditions are especially useful in conjunction with
threshold conditions. You could have a group of signers, each
using a different prefix to sign a common message.
PREFIX-SHA-256 is assigned the type ID 1. It relies on the
SHA-256 and PREFIX feature suites which corresponds to a feature
bitmask of 0x05.
"""
self._prefix = b''
self._subcondition = None
self._max_message_length = 16384
@property
def subcondition(self):
"""The (unfulfilled) subcondition.
Each prefix condition builds on an existing condition which is
provided via this method.
Args:
subcondition {Condition| str}: Condition object or URI
string representing the condition that will receive the
prefixed message.
"""
return self._subcondition
@subcondition.setter
def _set_subfulfillment(self, subfulfillment):
"""Set the (fulfilled) subcondition.
When constructing a prefix fulfillment, this method allows you to
pass in a fulfillment for the condition that will receive the
prefixed message.
Note that you only have to add either the subcondition or a
subfulfillment, but not both.
Args:
subfulfillment {Fulfillment|str}: Fulfillment object or URI
string representing the fulfillment to use as the
subcondition.
"""
if isinstance(subfulfillment, str):
subfulfillment = Fulfillment.from_uri(subfulfillment)
elif not isinstance(subfulfillment, Fulfillment):
raise Exception(
'Subfulfillments must be URIs or objects of type Fulfillment')
self._subcondition = subfulfillment
@property
def prefix(self):
"""Set the prefix.
The prefix will be prepended to the message during validation
before the message is passed on to the subcondition.
Args:
prefix (bytes): Prefix to apply to the message.
"""
return self._prefix
@prefix.setter
@property
@max_message_length.setter
def max_message_length(self, max_message_length):
"""The threshold.
Determines the threshold that is used to consider this
condition fulfilled. If the number of valid subfulfillments is
greater or equal to this number, the threshold condition is
considered to be fulfilled.
Args:
max_message_length (int): Threshold.
"""
if not isinstance(max_message_length, int) or max_message_length < 0:
raise TypeError(
'Max message length must be an integer greater than or '
'equal to zero, was: {}'.format(max_message_length)
)
self._max_message_length = max_message_length
@property
def subtypes(self):
"""Get types used in this condition.
This is a type of condition that contains a subcondition. A
complete set of subtypes must contain the set of types that must
be supported in order to validate this fulfillment. Therefore,
we need to join the type of this condition to the types used in
the subcondition.
Returns:
:obj:`set` of :obj:`str`: Complete type names for this
fulfillment.
Note:
Never include our own type as a subtype. The reason is that
we already know that the validating implementation knows how
to interpret this type, otherwise it wouldn't be able to
verify this fulfillment to begin with.
"""
return {t for t in chain(self.subcondition.subtypes,
(self.subcondition.type_name,))
if t != self.TYPE_NAME}
@property
def fingerprint_contents(self):
"""Produce the contents of the condition hash.
This function is called internally by the ``condition``
method/property.
Returns:
bytes: Encoded contents of fingerprint hash.
"""
if not self.subcondition:
raise MissingDataError('Requires subcondition')
try:
subcondition_asn1_dict = self.subcondition.condition.to_asn1_dict()
except AttributeError:
subcondition_asn1_dict = self.subcondition.to_asn1_dict()
return der_encode(nat_decode({
'prefix': self.prefix,
'maxMessageLength': self.max_message_length,
'subcondition': subcondition_asn1_dict,
}, asn1Spec=PrefixFingerprintContents()))
@property
def calculate_cost(self):
"""Calculate the cost of fulfilling this condition.
The cost of the prefix condition equals
(1 + l/256) * (16384 + s)
where l is the prefix length in bytes and s is the subcondition
cost.
Returns:
int: Expected maximum cost to fulfill this condition
"""
if self.prefix is None:
raise MissingDataError('Prefix must be specified')
if not self.subcondition:
raise MissingDataError('Subcondition must be specified')
try:
subcondition_cost = self.subcondition.cost
except AttributeError:
subcondition_cost = self.subcondition.condition.cost
cost = (len(self.prefix) +
self.max_message_length + subcondition_cost + 1024)
return cost
def validate(self, message):
"""Check whether this fulfillment meets all validation criteria.
This will validate the subfulfillment. The message will be
prepended with the prefix before being passed to the
subfulfillment's validation routine.
Args:
message (bytes): Message to validate against.
Returns:
bool: Whether this fulfillment is valid.
"""
if not isinstance(self.subcondition, Fulfillment):
raise Exception('Subcondition is not a fulfillment')
if not isinstance(message, bytes):
raise Exception(
'Message must be provided as a bytes, was: '.format(message))
return self.subcondition.validate(message=self.prefix + message)
| [
6738,
2779,
2414,
1330,
2956,
7278,
8635,
62,
65,
2414,
12501,
1098,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
12972,
292,
77,
16,
13,
19815,
721,
13,
1082,
13,
12685,
12342,
1330,
37773,
355,
4587,
62,
268,
8189,
198,
... | 2.579103 | 3,034 |
# Import required modules
import numpy as np
import cv2 as cv
import math
import argparse
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
description = "The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch")
parser.add_argument('--input',
help='Path to input image. Skip this argument to capture frames from a camera.')
parser.add_argument('--ocr', default="/home/moo/Desktop/ocr/clovaai/new_model_8_4/DenseNet.onnx",
help="Path to a binary .pb or .onnx file contains trained recognition network", )
parser.add_argument('--width', type=int, default=100,
help='Preprocess input image by resizing to a specific width.')
parser.add_argument('--height', type=int, default=32,
help='Preprocess input image by resizing to a specific height.')
args = parser.parse_args()
############ Utility functions ############
if __name__ == "__main__":
main()
| [
2,
17267,
2672,
13103,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
10688,
198,
11748,
1822,
29572,
198,
198,
7804,
4242,
3060,
4578,
30751,
329,
3141,
1627,
7159,
1303,
7804,
21017,
198,
48610,... | 2.954301 | 372 |
from django.urls import path
from . import views
urlpatterns = [
path("posts/", views.ListCreatePostAPIView.as_view(), name="get_posts"),
path(
"posts/<int:pk>/",
views.RetrieveUpdateDestroyPostAPIView.as_view(),
name="get_delete_update_post",
),
path(
"posts/<int:pk>/upvote",
views.UpvoteView.as_view(),
name="get_delete_update_post",
),
path(
"posts/<int:pk>/comments/",
views.ListCreateCommentAPIView.as_view(),
name="get_post_comment",
),
path(
"posts/<int:id>/comments/<int:pk>/",
views.RetrieveUpdateDestroyCommentAPIView.as_view(),
name="get_delete_update_post_comment",
),
path(
"posts/<int:id>/comments/<int:pk>/replies",
views.RetrieveUpdateDestroyCommentReplyAPIView.as_view(),
name="get_delete_update_post_comment",
),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
1330,
5009,
628,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
7203,
24875,
14,
1600,
5009,
13,
8053,
16447,
6307,
2969,
3824,
769,
13,
292,
62,
1177,
22784,
... | 2.122642 | 424 |
from setuptools import setup
from setuptools import find_packages
setup(name='vinci',
version='1.0.0~dev',
description='Deep Reinforcement Learning for Keras',
author='Matthias Plappert, Pierre Manceron',
url='https://github.com/phylliade/vinci',
license='MIT',
install_requires=['keras>=1.0.7'],
extras_require={
'gym': ['gym'],
},
packages=find_packages())
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
900,
37623,
10141,
1330,
1064,
62,
43789,
628,
198,
40406,
7,
3672,
11639,
7114,
979,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
16,
13,
15,
13,
15,
93,
7959,
3256,
198,
220,
220... | 2.38764 | 178 |
import pytest
from eai import eaichecker
| [
11748,
12972,
9288,
198,
198,
6738,
304,
1872,
1330,
304,
64,
14234,
15280,
628
] | 3.071429 | 14 |
# -*- coding: utf-8 -*-
import sqlite3 as lite
import sys
import datetime
import csv
now = datetime.datetime.now()
currdate = now.strftime("%Y%m%d")
orig_stdout = sys.stdout
dbcon = lite.connect(r"WORK_PATH\\Purge\\suivi_purge.db")
#Analysis of current day turnover
cur = dbcon.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cur.fetchall()
nomtable = tables[0][0]
nomtable = nomtable.decode("utf-8")
cur.execute("select count(*) from purge%s" % currdate)
rownumber = cur.fetchone()
rownum = rownumber[0]
cur.execute("select msisdn_9 from purge%s group by msisdn_9 having count(*)>1;" % currdate)
duplicate = cur.fetchall()
cur.execute("select count(*) from purge%s where Flag_priorite1 = 1 or Flag_priorite2 = 1 or Flag_priorite3 = 1 or Flag_priorite4 = 1 or Flag_priorite5 = 1 or Flag_priorite6 = 1;" % currdate)
flagnumber = cur.fetchone()
# Priorité 1
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = '' -- KYC
and
Statut_IN = 'Desactive'
and
stk_flag = 0
and
date(OM_date_registration) is null
and
main_account = '0.00'
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0);""" % currdate)
P1Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite1 = 1;" % currdate)
P1F = cur.fetchone()
P1diff = P1Q[0] - P1F[0]
# Priorité 2
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
date(OM_date_registration) is null
and
main_account = '0.00'
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) >365
and
(date(last_recharge) < date('now','-1 year','-1 day')
or date(last_recharge) is null)""" % currdate)
P2Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite2 = 1;" % currdate)
P2F = cur.fetchone()
P2diff = P2Q[0] - P2F[0]
# Priorité 3
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
OM_date_registration = '\N'
and
(round(main_account,2) <= 50.00
and round(main_account,2) > 0.00)
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) >365
and
(date(last_recharge) < date('now','-1 year','-2 day')
or date(last_recharge) is null)""" % currdate)
P3Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite3 = 1" % currdate)
P3F = cur.fetchone()
P3diff = P3Q[0] - P3F[0]
# Priorité 4
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
date(OM_date_registration) is null
and
main_account = '0.00'
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) <= 365
and
cast(duree_inactivite as int) > 180
and
(date(last_recharge) < date('now','-1 year','-2 day')
or date(last_recharge) is null)""" % currdate)
P4Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite4 = 1" % currdate)
P4F = cur.fetchone()
P4diff = P4Q[0] - P4F[0]
# Priorité 5
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
OM_date_registration = '\N'
and
(round(main_account,2) <= 50.00
and round(main_account,2) > 0.00)
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) <= 365
and
cast(duree_inactivite as int) > 180
and
(date(last_recharge) < date('now','-1 year','-2 day')
or date(last_recharge) is null)""" % currdate)
P5Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite5 = 1" % currdate)
P5F = cur.fetchone()
P5diff = P5Q[0] - P5F[0]
# Priorité 6
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
OM_date_registration = '\N'
and
(round(main_account,2) <= 2000.00
and round(main_account,2) > 50.00)
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) > 365
and
(date(last_recharge) < date('now','-1 year','-2 day')
or date(last_recharge) is null)""" % currdate)
P6Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite6 = 1" % currdate)
P6F = cur.fetchone()
P6diff = P6Q[0] - P6F[0]
# Priorité 7
cur.execute("""select count(*) from purge%s
where
plan_tarifaire in('5001','5006','5007')
and
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
date(OM_date_registration) is null
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)""" % currdate)
P7Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite7 = 1" % currdate)
P7F = cur.fetchone()
P7diff = P7Q[0] - P7F[0]
# Priorité 8
cur.execute("""select count(*) from purge%s
where
Statut_FT = 'INACTIF'
and
KYC = ''
and
Statut_IN = 'Inactif'
and
stk_flag = 0
and
date(OM_date_registration) is null
and
Channel_user_category = '\N'
and
(Channel_balance = '\N' or Channel_balance = 0)
and
cast(duree_inactivite as int) >= 365""" % currdate)
P8Q = cur.fetchone()
cur.execute("select count(*) from purge%s where Flag_priorite8 = 1" % currdate)
P8F = cur.fetchone()
P8diff = P8Q[0] - P8F[0]
dbcon.close()
filename_log = "log_priority%s.txt" % currdate
f = file(r"WORK_PATH\\Purge\\Result\\%s" % filename_log, 'w')
sys.stdout = f
print "------------------ Analysis of table %s ------------------" % nomtable
print "- The table %s has %s rows" % (nomtable,rownum)
print "- There are %s duplicate in table %s: " % (len(duplicate),nomtable)
for i in duplicate:
print i[0]
print "- %s MSISDN have a flag in the table %s" % (flagnumber[0],nomtable)
print ""
for x in range(1,9):
PXQ = "P" + str(x) + "Q"
PXF = "P" + str(x) + "F"
PXdiff = "P" + str(x) + "diff"
print "------------------ Priority %i ------------------" % x
if eval(PXdiff) == 0 :
print "Verification of Priority %i OK" % x
print "Number of MSISDN with a flag priority %i: %i" % (x,eval(PXF)[0])
else:
print "The difference is: %i for priority %i " % (eval(PXdiff),x)
print "Results of query %i" % eval(PXQ)[0]
print "Results of flag %i" % eval(PXF)[0]
print ""
sys.stdout = orig_stdout
f.close()
filename_csv = "result_purge_Query%s.csv" % currdate
f = open(r"WORK_PATH\\Purge\\Result\\%s" % filename_csv, 'wb')
try:
writer = csv.writer(f,delimiter=";")
writer.writerow( (u'Priority', 'Nombre MSISDN'))
for i in range(1,9):
PXQ = "P" + str(i) + "Q"
priority = "Priority %s" % i
writer.writerow((priority,eval(PXQ)[0]))
finally:
f.close()
filename_csv = "result_purge_Flag%s.csv" % currdate
f = open(r"WORK_PATH\\Purge\\Result\\%s" % filename_csv, 'wb')
try:
writer = csv.writer(f,delimiter=";")
writer.writerow((u'Priority', 'Number of MSISDN'))
for i in range(1,9):
PXF = "P" + str(i) + "F"
priority = "Priority %s" % i
writer.writerow( (priority,eval(PXF)[0]))
finally:
f.close() | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
44161,
578,
18,
355,
300,
578,
201,
198,
11748,
25064,
201,
198,
11748,
4818,
8079,
201,
198,
11748,
269,
21370,
201,
198,
201,
198,
2197,
796,
4... | 2.222943 | 3,391 |
import argparse
import torch
import random
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from PIL import Image
import numpy as np
import time
import matplotlib.pyplot as plt
from model import VDSR
from util import *
if __name__ == '__main__':
main() | [
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
4738,
198,
11748,
28034,
13,
1891,
2412,
13,
66,
463,
20471,
355,
269,
463,
20471,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
40085,
355,
6436,
198,
6738,
28034,... | 3.292035 | 113 |
from __future__ import absolute_import
import pynndescent
from ann_benchmarks.algorithms.base import BaseANN
import numpy as np
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
279,
2047,
358,
45470,
198,
6738,
1529,
62,
26968,
14306,
13,
282,
7727,
907,
13,
8692,
1330,
7308,
22846,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.513514 | 37 |
import numpy as np
import random
import shutil
from ibllib.ephys.np2_converter import NP2Converter
from ibllib.io import spikeglx
from ci.tests import base
class TestNeuropixel2ConverterNP24(base.IntegrationTest):
"""
Check NP2 converter with NP2.4 type probes
"""
def testDecimate(self):
"""
Check integrity of windowing and downsampling by comparing results when using different
window lengths for iterating through data
:return:
"""
FS = 30000
np_a = NP2Converter(self.file_path, post_check=False)
np_a.init_params(nwindow=0.5 * FS, extra='_0_5s_test', nshank=[0])
np_a.process()
np_b = NP2Converter(self.file_path, post_check=False)
np_b.init_params(nwindow=1 * FS, extra='_1s_test', nshank=[0])
np_b.process()
np_c = NP2Converter(self.file_path, post_check=False)
np_c.init_params(nwindow=3 * FS, extra='_2s_test', nshank=[0])
np_c.process()
sr = spikeglx.Reader(self.file_path)
self.sglx_instances.append(sr)
sr_a_ap = spikeglx.Reader(np_a.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_a_ap)
sr_b_ap = spikeglx.Reader(np_b.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_b_ap)
sr_c_ap = spikeglx.Reader(np_c.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_c_ap)
# Make sure all the aps are the same regardless of window size we used
assert np.array_equal(sr_a_ap[:, :], sr_b_ap[:, :])
assert np.array_equal(sr_a_ap[:, :], sr_c_ap[:, :])
assert np.array_equal(sr_b_ap[:, :], sr_c_ap[:, :])
# For AP also check that all values are the same as the original file
assert np.array_equal(sr_a_ap[:, :], sr[:, np_a.shank_info['shank0']['chns']])
assert np.array_equal(sr_b_ap[:, :], sr[:, np_b.shank_info['shank0']['chns']])
assert np.array_equal(sr_c_ap[:, :], sr[:, np_c.shank_info['shank0']['chns']])
sr_a_lf = spikeglx.Reader(np_a.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_a_lf)
sr_b_lf = spikeglx.Reader(np_b.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_b_lf)
sr_c_lf = spikeglx.Reader(np_c.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_c_lf)
# Make sure all the lfps are the same regardless of window size we used
assert np.array_equal(sr_a_lf[:, :], sr_b_lf[:, :])
assert np.array_equal(sr_a_lf[:, :], sr_c_lf[:, :])
assert np.array_equal(sr_b_lf[:, :], sr_c_lf[:, :])
def testProcessNP24(self):
"""
Check normal workflow of splittig data into individual shanks
:return:
"""
# Make sure normal workflow runs without problems
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process()
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# Test a random ap metadata file and make sure it all makes sense
shank_n = random.randint(0, 3)
sr_ap = spikeglx.Reader(np_conv.shank_info[f'shank{shank_n}']['ap_file'])
assert np.array_equal(sr_ap.meta['acqApLfSy'], [96, 0, 1])
assert np.array_equal(sr_ap.meta['snsApLfSy'], [96, 0, 1])
assert np.equal(sr_ap.meta['nSavedChans'], 97)
assert (sr_ap.meta['snsSaveChanSubset'] == '0:96')
assert np.equal(sr_ap.meta['NP2.4_shank'], shank_n)
assert (sr_ap.meta['original_meta'] == 'False')
sr_ap.close()
# Test a random lf metadata file and make sure it all makes sense
shank_n = random.randint(0, 3)
sr_lf = spikeglx.Reader(np_conv.shank_info[f'shank{shank_n}']['lf_file'])
assert np.array_equal(sr_lf.meta['acqApLfSy'], [0, 96, 1])
assert np.array_equal(sr_lf.meta['snsApLfSy'], [0, 96, 1])
assert np.equal(sr_lf.meta['nSavedChans'], 97)
assert (sr_lf.meta['snsSaveChanSubset'] == '0:96')
assert np.equal(sr_lf.meta['NP2.4_shank'], shank_n)
assert (sr_lf.meta['original_meta'] == 'False')
assert np.equal(sr_lf.meta['imSampRate'], 2500)
sr_lf.close()
# Rerun again and make sure that nothing happens because folders already exists
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process()
self.assertTrue(np_conv.already_exists)
self.assertFalse(status)
# But if we set the overwrite flag to True we force rerunning
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process(overwrite=True)
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# Change some of the data and make sure the checking function is working as expected
shank_n = random.randint(0, 3)
ap_file = np_conv.shank_info[f'shank{shank_n}']['ap_file']
with open(ap_file, "r+b") as f:
f.write((chr(10) + chr(20) + chr(30) + chr(40)).encode())
# Now that we have changed the file we expect an assertion error when we do the check
with self.assertRaises(AssertionError) as context:
np_conv.check_NP24()
self.assertTrue('data in original file and split files do no match'
in str(context.exception))
# Finally test that we cannot process a file that has already been split
np_conv = NP2Converter(ap_file)
status = np_conv.process()
self.assertTrue(np_conv.already_processed)
self.assertFalse(status)
class TestNeuropixel2ConverterNP21(base.IntegrationTest):
"""
Check NP2 converter with NP2.1 type probes
"""
def testProcessNP21(self):
"""
Check normal workflow of getting LFP data out and storing in main probe folder
:return:
"""
# make sure it runs smoothly
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# test the meta file
sr_ap = spikeglx.Reader(np_conv.shank_info[f'shank0']['lf_file'])
assert np.array_equal(sr_ap.meta['acqApLfSy'], [0, 384, 1])
assert np.array_equal(sr_ap.meta['snsApLfSy'], [0, 384, 1])
assert np.equal(sr_ap.meta['nSavedChans'], 385)
assert (sr_ap.meta['snsSaveChanSubset'] == '0:384')
assert np.equal(sr_ap.meta['NP2.1_shank'], 0)
assert (sr_ap.meta['original_meta'] == 'False')
sr_ap.close()
# now run again and make sure that it doesn't run
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertTrue(np_conv.already_exists)
self.assertFalse(status)
# Now try with the overwrite flag and make sure it runs
np_conv = NP2Converter(self.file_path)
status = np_conv.process(overwrite=True)
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
class TestNeuropixel2ConverterNP1(base.IntegrationTest):
"""
Check NP2 converter with NP1 type probes
"""
def testProcessNP1(self):
"""
Check normal workflow -> nothing should happen!
"""
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertFalse(status)
if __name__ == "__main__":
import unittest
unittest.main(exit=False)
| [
11748,
299,
32152,
355,
45941,
201,
198,
11748,
4738,
201,
198,
11748,
4423,
346,
201,
198,
201,
198,
6738,
24283,
297,
571,
13,
27446,
893,
13,
37659,
17,
62,
1102,
332,
353,
1330,
28498,
17,
3103,
332,
353,
201,
198,
6738,
24283,
... | 2.080364 | 3,733 |
import numpy as np
from random import sample
'''
split data into train (70%), test (15%) and valid(15%)
return tuple( (trainX, trainY), (testX,testY), (validX,validY) )
'''
'''
generate batches, by random sampling a bunch of items
yield (x_gen, y_gen)
'''
'''
a generic decode function
inputs : sequence, lookup
'''
| [
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
6291,
198,
198,
7061,
6,
198,
6626,
1366,
656,
4512,
357,
2154,
15920,
1332,
357,
1314,
4407,
290,
4938,
7,
1314,
4407,
198,
220,
220,
220,
1441,
46545,
7,
357,
27432,
55,
11,
45... | 2.809917 | 121 |
from django.urls import re_path
from oscar.core.application import OscarConfig
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
302,
62,
6978,
198,
6738,
267,
13034,
13,
7295,
13,
31438,
1330,
15694,
16934,
628
] | 3.636364 | 22 |
from django.dispatch import Signal
translation_imported = Signal()
| [
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
628,
198,
41519,
62,
320,
9213,
796,
26484,
3419,
198
] | 3.833333 | 18 |
from pyfix.FIX44 import msgtype, messages
__author__ = 'tom'
beginstring = 'FIX.4.4'
| [
6738,
12972,
13049,
13,
47084,
2598,
1330,
31456,
4906,
11,
6218,
198,
198,
834,
9800,
834,
796,
705,
39532,
6,
198,
198,
27471,
8841,
796,
705,
47084,
13,
19,
13,
19,
6,
628
] | 2.666667 | 33 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.types.struct import HStructField
from hwt.interfaces.structIntf import StructIntf
from hwt.interfaces.utils import propagateClkRstn
from hwt.synthesizer.unit import Unit
from hwtLib.abstract.busEndpoint import BusEndpoint
from hwtLib.amba.axi4Lite import Axi4Lite
from hwtLib.amba.axiLite_comp.endpoint import AxiLiteEndpoint
from hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTC
from hwtLib.amba.axi_comp.builder import AxiBuilder
class AxiLiteEpWithReg(Unit):
"""
:class:`hwt.synthesizer.unit.Unit` with AxiLiteEndpoint and AxiLiteReg together
"""
@staticmethod
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(Axi4_wDatapumpTC('test_singleLong'))
suite.addTest(unittest.makeSuite(AxiRegTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
289,
46569,
13,
71,
25404,
13,
19199,
13,
7249,
1330,
367,
44909,
15878,
198,
6738,
289,
46569,
13,
38... | 2.517808 | 365 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Functions: Idenfy tomcat password
# Code By BlAck.Eagle
import threading, time, random, sys, urllib2, httplib, base64
from copy import copy
import re
from collections import defaultdict, deque
if __name__ == '__main__':
if len(sys.argv) !=5:
print "\nUsage: ./TomcatBrute.py <urlList> <port> <userlist> <wordlist>\n"
print "ex: python TomcatBrute.py ip.txt 8080 users.txt wordlist.txt\n"
sys.exit(1)
try:
users = open(sys.argv[3], "r").readlines()
except(IOError):
print "Error: Check your userlist path\n"
sys.exit(1)
try:
words = open(sys.argv[4], "r").readlines()
except(IOError):
print "Error: Check your wordlist path\n"
sys.exit(1)
try:
port = sys.argv[2]
except(IOError):
print "Error: Check your port\n"
path = '/manager/html'
WEAK_USERNAME = [p.replace('\n','') for p in users]
WEAK_PASSWORD = [p.replace('\n','') for p in words]
#WEAK_USERNAME = ['tomcat','user']
#WEAK_PASSWORD = ['tomcat','user']
accounts =deque() #list数组
for username in WEAK_USERNAME:
for password in WEAK_PASSWORD:
accounts.append((username,password))
#print len(accounts)
#server = sys.argv[1]
host_open = open(sys.argv[1], 'r')
ip = [p.replace('\n','') for p in host_open]
for server in ip:
print "[+] Server:",server
print "[+] Port:",port
print "[+] Users Loaded:",len(WEAK_USERNAME)
print "[+] Words Loaded:",len(WEAK_PASSWORD)
print "[+] Started",timer(),"\n"
for I in range(len(accounts)):
work = Tomcatbrute(server,port,path,accounts[I][0],accounts[I][1])
work.setDaemon(1)
work.start()
time.sleep(0.1)
print "\n[-] Done -",timer(),"\n"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
220,
220,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
220,
220,
198,
2,
40480,
25,
5121,
268,
24928,
16667,
9246,
9206,
198,
2,
6127,
2750,
1086,
32,
694,
13,
36,
1... | 1.790143 | 1,258 |
import logging
import sys
from typing import Optional
from brutemethehorizon import helper
from brutemethehorizon.config import Colors, Config
logger = logging.getLogger('horizon')
| [
11748,
18931,
198,
11748,
25064,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
8938,
368,
10567,
17899,
8637,
1330,
31904,
198,
6738,
8938,
368,
10567,
17899,
8637,
13,
11250,
1330,
29792,
11,
17056,
198,
198,
6404,
1362,
796,
18931,
13,... | 3.77551 | 49 |
# Copyright 2017 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit import exception
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils import encoding
CONF = cloudbaseinit_conf.CONF
MODPATH = "cloudbaseinit.metadata.services.azureservice.AzureService"
| [
2,
15069,
2177,
10130,
8692,
23555,
21714,
75,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11... | 3.431973 | 294 |
import tensorflow as tf
import _init_paths
from models.object.audioset_feature_extractor import AudiosetFeatureExtractor
if __name__ == '__main__':
# Load text module graph
PATH_TO_TEST_AUDIO = 'test_DB/audios/BBT0624.wav'
PATH_TO_VGG_GRAPH = '../weights/audioset/vggish_fr.pb'
PATH_TO_PCA_PARAMS = '../weights/audioset/vggish_pca_params.npz'
# Scene start&end timestamp in milliseconds
sc_start = 0
sc_end = 2000
vgg_graph = tf.Graph()
with vgg_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_VGG_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
audioset_feature_extractor = AudiosetFeatureExtractor(vgg_graph, PATH_TO_PCA_PARAMS)
embeddings = audioset_feature_extractor.extract(PATH_TO_TEST_AUDIO, sc_start, sc_end)
print(embeddings[0])
print(embeddings.shape)
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
4808,
15003,
62,
6978,
82,
198,
6738,
4981,
13,
15252,
13,
3885,
4267,
316,
62,
30053,
62,
2302,
40450,
1330,
7591,
4267,
316,
38816,
11627,
40450,
198,
198,
361,
11593,
3672,
834,
6624,... | 2.285714 | 434 |
from cmd3.shell import command
| [
6738,
23991,
18,
13,
29149,
1330,
3141,
628
] | 4 | 8 |
""" """
from __future__ import unicode_literals, division, print_function, absolute_import
import argparse
import codecs
import sys
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from sqlacodegen.codegen import CodeGenerator
import sqlacodegen
| [
37811,
37227,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
7297,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
11748,
1822,
29572,
198,
11748,
40481,
82,
198,
11748,
25064,
198,
198,
6738,
44161,
282,
26599,
13,
... | 3.628205 | 78 |
#!/usr/bin/env python
import pytest
import soothingsounds as ss
nsec = 1
nbitfile = 16
nbitfloat = 32 # from generator.py
Noises = ['white', 'pink', 'blue', 'brown', 'violet']
if __name__ == '__main__':
pytest.main([__file__])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
12972,
9288,
198,
11748,
523,
849,
654,
3733,
355,
37786,
198,
198,
77,
2363,
796,
352,
198,
77,
2545,
7753,
796,
1467,
198,
77,
2545,
22468,
796,
3933,
220,
1303,
422,
17301,
... | 2.55914 | 93 |
import matplotlib as mpl
import matplotlib.pyplot as plt
from colorsys import hsv_to_rgb
| [
11748,
2603,
29487,
8019,
355,
285,
489,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
7577,
893,
1330,
289,
21370,
62,
1462,
62,
81,
22296,
628,
628,
198
] | 2.818182 | 33 |
import numpy as np
from typing import Tuple
from gym.envs.registration import register
from onpolicy.envs.highway.highway_env import utils
from onpolicy.envs.highway.highway_env.envs.common.abstract import AbstractEnv
from onpolicy.envs.highway.highway_env.envs.common.action import Action
from onpolicy.envs.highway.highway_env.road.road import Road, RoadNetwork
from onpolicy.envs.highway.highway_env.vehicle.controller import ControlledVehicle
import random
class HighwayEnv(AbstractEnv):
"""
A highway driving environment.
The vehicle is driving on a straight highway with several lanes, and is rewarded for reaching a high speed,
staying on the rightmost lanes and avoiding collisions.
"""
RIGHT_LANE_REWARD: float = 0.1
"""The reward received when driving on the right-most lanes, linearly mapped to zero for other lanes."""
HIGH_SPEED_REWARD: float = 0.9
"""The reward received when driving at full speed, linearly mapped to zero for lower speeds according to config["reward_speed_range"]."""
LANE_CHANGE_REWARD: float = 0
"""The reward received at each lane change action."""
def _create_road(self) -> None:
"""Create a road composed of straight adjacent lanes."""
self.road = Road(network=RoadNetwork.straight_road_network(self.config["lanes_count"]),
np_random=self.np_random, record_history=self.config["show_trajectories"])
def _create_vehicles(self) -> None:
"""Create some new random vehicles of a given type, and add them on the road."""
# the number of agent with initialized postions overlapping with each other
self.controlled_vehicles = []
number_overlap = 0
for i in range(self.config["controlled_vehicles"]):
#for i in range(self.config["n_defenders"]):
# vehicle = self.action_type.vehicle_class.create_random(self.road,
# speed=25,
# lane_id=self.config["initial_lane_id"],
# spacing=self.config["ego_spacing"],
# )
default_spacing = 12.5 # 0.5 * speed
longitude_position = 40+5*np.random.randint(1)
initial_lane_idx = random.choice( [4*i for i in range(self.config["lanes_count"])] )
# To separate cars in different places to avoid collision
for vehicle_ in self.controlled_vehicles:
if abs(longitude_position - vehicle_.position[0]) < 5 and initial_lane_idx == 4*vehicle_.lane_index[2]:
longitude_position = longitude_position - (number_overlap+1)*default_spacing
number_overlap = number_overlap + 1
vehicle = self.action_type.vehicle_class(road=self.road, position=[longitude_position, initial_lane_idx], heading=0, speed=25)
self.controlled_vehicles.append(vehicle)
self.road.vehicles.append(vehicle)
vehicles_type = utils.class_from_path(self.config["npc_vehicles_type"])
for _ in range(self.config["vehicles_count"]):
vehicle = vehicles_type.create_random(self.road, spacing=1 / self.config["vehicles_density"])
self.road.vehicles.append(vehicle)
#self.controlled_vehicles.append(vehicle)
# observation size depents on the firstly controlled_vehicles in the initilization process
# but after defined the observation type doesn't change.
def _reward(self, action: Action) :#-> float: now we return a list
"""
The reward is defined to foster driving at high speed, on the rightmost lanes, and to avoid collisions.
:param action: the last action performed
:return: the corresponding reward
"""
# -> float: now we change it to return a list!!!!!
rewards=[]
for i,vehicle in enumerate(self.controlled_vehicles):
neighbours = self.road.network.all_side_lanes(vehicle.lane_index)
lane = vehicle.target_lane_index[2] if isinstance(vehicle, ControlledVehicle) \
else vehicle.lane_index[2]
scaled_speed = utils.lmap(vehicle.speed, self.config["reward_speed_range"], [0, 1])
reward = \
self.config["collision_reward"] * vehicle.crashed \
+ self.RIGHT_LANE_REWARD * lane / max(len(neighbours) - 1, 1) \
+ self.HIGH_SPEED_REWARD * np.clip(scaled_speed, 0, 1)
#reward = utils.lmap(reward,
# [self.config["collision_reward"], self.HIGH_SPEED_REWARD + self.RIGHT_LANE_REWARD],
# [0, 1])
#reward = 0 if not vehicle.on_road else reward
reward = -1 if not vehicle.on_road else reward
if self.config['task_type']=='attack':
if i>=self.config['n_defenders'] and i <(self.config['n_defenders']+self.config['n_attackers']):
reward*=0
reward = -0.5 if not vehicle.on_road or vehicle.crashed else 0
rewards.append(reward)
return rewards
def _is_done(self) :#-> bool:
"""The episode is over if the ego vehicle crashed or the time is out."""
####env done!
dones = []
for vehicle in self.controlled_vehicles:
dones.append(vehicle.crashed or \
self.steps >= self.config["duration"] or \
(self.config["offroad_terminal"] and not vehicle.on_road))
defender_done = dones[:self.config["n_defenders"]]
attacker_done = dones[self.config["n_defenders"]:self.config["n_defenders"] + self.config["n_attackers"]]
if np.all(defender_done):
return True
elif len(attacker_done)>0 and np.all(attacker_done):
return True
else:
return False
def _cost(self, action: int) -> float:
"""The cost signal is the occurrence of collision."""
return float(self.vehicle.crashed)
def get_available_actions(self):
"""
Get the list of currently available actions.
Lane changes are not available on the boundary of the road, and speed changes are not available at
maximal or minimal speed.
:return: the list of available actions
"""
from onpolicy.envs.highway.highway_env.envs.common.action import DiscreteMetaAction,MultiAgentAction
if isinstance(self.action_type, DiscreteMetaAction):
actions = [self.action_type.actions_indexes['IDLE']]
for l_index in self.road.network.side_lanes(self.vehicle.lane_index):
if l_index[2] < self.vehicle.lane_index[2] \
and self.road.network.get_lane(l_index).is_reachable_from(self.vehicle.position) \
and self.action_type.lateral:
actions.append(self.action_type.actions_indexes['LANE_LEFT'])
if l_index[2] > self.vehicle.lane_index[2] \
and self.road.network.get_lane(l_index).is_reachable_from(self.vehicle.position) \
and self.action_type.lateral:
actions.append(self.action_type.actions_indexes['LANE_RIGHT'])
if self.vehicle.speed_index < self.vehicle.SPEED_COUNT - 1 and self.action_type.longitudinal:
actions.append(self.action_type.actions_indexes['FASTER'])
if self.vehicle.speed_index > 0 and self.action_type.longitudinal:
actions.append(self.action_type.actions_indexes['SLOWER'])
return actions
elif isinstance(self.action_type, MultiAgentAction):
multi_actions=[]
for vehicle,action_type in zip(self.controlled_vehicles,self.action_type.agents_action_types):
actions = [action_type.actions_indexes['IDLE']]
for l_index in self.road.network.side_lanes(vehicle.lane_index):
if l_index[2] < vehicle.lane_index[2] \
and self.road.network.get_lane(l_index).is_reachable_from(self.vehicle.position) \
and action_type.lateral:
actions.append(action_type.actions_indexes['LANE_LEFT'])
if l_index[2] > vehicle.lane_index[2] \
and self.road.network.get_lane(l_index).is_reachable_from(self.vehicle.position) \
and action_type.lateral:
actions.append(action_type.actions_indexes['LANE_RIGHT'])
if vehicle.speed_index < vehicle.SPEED_COUNT - 1 and action_type.longitudinal:
actions.append(action_type.actions_indexes['FASTER'])
if vehicle.speed_index > 0 and action_type.longitudinal:
actions.append(action_type.actions_indexes['SLOWER'])
multi_actions.append(actions)
return multi_actions
register(
id='highway-v0',
entry_point='onpolicy.envs.highway.highway_env.envs:HighwayEnv',
)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
19720,
1330,
309,
29291,
198,
6738,
11550,
13,
268,
14259,
13,
2301,
33397,
1330,
7881,
198,
198,
6738,
319,
30586,
13,
268,
14259,
13,
8929,
1014,
13,
8929,
1014,
62,
24330,
1330,
3384,
4487,
... | 2.217099 | 4,164 |
import os
import sys
import pysam
import time
def count_depth(chr_name, size, threshold, input):
"""
Count the depth of the read. For each genomic coordinate return the
number of reads
-----
Parameters :
chr : (str) name of the chromosome
threshold : (int) minimum value to count pileup
-----
Returns :
int : count of pileups above threshold
"""
bp = 0
bamfile = pysam.AlignmentFile(input, 'rb')
for pileupcolumn in bamfile.pileup(chr_name):
depth = pileupcolumn.nsegments
if depth >= threshold:
bp += 1
bamfile.close()
return bp
bam = snakemake.input["samples"]
threshold = snakemake.params["threshold"]
print(f'Starting depth estimate for bam: {bam} at threshold {threshold}')
sum = 0
list_chrs, list_sizes = get_chromosomes_names(bam)
print("Found {} chromosomes to count".format(len(list_chrs)))
for chr, size in zip(list_chrs, list_sizes):
sum += count_depth(chr, size, threshold, bam)
print(f'Finished with chr: {chr}. {size} {sum}')
print(f'Total bases: {sum}')
with open(snakemake.output["samdepth"], 'w') as final:
final.write(f'{sum}\n')
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
279,
893,
321,
198,
11748,
640,
628,
198,
4299,
954,
62,
18053,
7,
354,
81,
62,
3672,
11,
2546,
11,
11387,
11,
5128,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
2764,
262,
6795,
... | 2.542299 | 461 |
from main import BotClient
import os
# run bot
bot = BotClient()
bot.run(os.getenv("TOKEN"))
| [
6738,
1388,
1330,
18579,
11792,
198,
11748,
28686,
198,
198,
2,
1057,
10214,
198,
13645,
796,
18579,
11792,
3419,
198,
13645,
13,
5143,
7,
418,
13,
1136,
24330,
7203,
10468,
43959,
48774,
198
] | 2.848485 | 33 |
# **************************************************************************** #
# #
# ::: :::::::: #
# __init__.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: winshare <tanwenxuan@live.com> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/02/28 11:45:13 by winshare #+# #+# #
# Updated: 2020/02/28 11:50:11 by winshare ### ########.fr #
# #
# **************************************************************************** #
| [
2,
41906,
17174,
46068,
1303,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.445705 | 617 |
import pytest
@pytest.mark.xfail(reason="apostrophe not escaped")
| [
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
26152,
603,
7,
41181,
2625,
499,
455,
22599,
407,
13537,
4943,
628,
628
] | 2.958333 | 24 |
# WetterKassel module
from config import config
from weatherapi import weatherapi, weathericons
from twitterapi import twitterapi
from tweetbuild import tweetbuild
| [
2,
32930,
353,
42,
292,
741,
8265,
220,
198,
6738,
4566,
1330,
4566,
220,
198,
6738,
6193,
15042,
1330,
6193,
15042,
11,
6193,
34280,
198,
6738,
17044,
15042,
1330,
17044,
15042,
198,
6738,
6126,
11249,
1330,
6126,
11249,
198
] | 4.25641 | 39 |
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
OAUTH_TOKEN = ''
OAUTH_TOKEN_SECRET = ''
ACCOUNTS = ['', '']
| [
10943,
50,
5883,
1137,
62,
20373,
796,
10148,
198,
10943,
50,
5883,
1137,
62,
23683,
26087,
796,
10148,
198,
23621,
24318,
62,
10468,
43959,
796,
10148,
198,
23621,
24318,
62,
10468,
43959,
62,
23683,
26087,
796,
10148,
198,
26861,
19385,... | 2.12766 | 47 |
"""
*************************************************
* @Project: Self Balance
* @Platform: Raspberry PI 2 B+
* @Description: GY80 Header - Orientation sensor via I2C bus
* HMC5883L (3-Axis Digital Compass)
* ADXL345 (3-Axis Digital Accelerometer)
* L3G4200D (3-Axis Angular Rate Sensor / Gyro)
* BMP085 (Barometric Pressure / Temperature Sensor)
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
import math
#
#L3G42000D Gyro Registers
#
L3G42000D_ADDRESS = 0x69 #I2C address, 0b11010001
L3G42000D_CTRL_REG1 = 0x20 #Enable Power and X,Y,Z axes
L3G42000D_CTRL_REG2 = 0x21 #Full scale selection
L3G42000D_CTRL_REG3 = 0x22
L3G42000D_CTRL_REG4 = 0x23
L3G42000D_CTRL_REG5 = 0x24
L3G42000D_OUT_X_L = 0x28 #X-axis data 0
L3G42000D_OUT_X_H = 0x29 #X-axis data 1
L3G42000D_OUT_Y_L = 0x2A #Y-axis data 0
L3G42000D_OUT_Y_H = 0x2B #Y-axis data 1
L3G42000D_OUT_Z_L = 0x2C #Z-axis data 0
L3G42000D_OUT_Z_H = 0x2D #Z-axis data 1
GYRO_SENSITIVITY = 0.07 # 2000dps datasheet
#
#ADXL345 Accel Registers
#
ADXL345_ADDRESS = 0x53 #I2C address
ADXL345_REG_DEVID = 0x00 # Device ID
ADXL345_REG_DATAX0 = 0x32 # X-axis data 0 (6 bytes for X/Y/Z)
ADXL345_REG_POWER_CTL = 0x2D # Power-saving features control
ADXL345_DATARATE_0_10_HZ = 0x00
ADXL345_DATARATE_0_20_HZ = 0x01
ADXL345_DATARATE_0_39_HZ = 0x02
ADXL345_DATARATE_0_78_HZ = 0x03
ADXL345_DATARATE_1_56_HZ = 0x04
ADXL345_DATARATE_3_13_HZ = 0x05
ADXL345_DATARATE_6_25HZ = 0x06
ADXL345_DATARATE_12_5_HZ = 0x07
ADXL345_DATARATE_25_HZ = 0x08
ADXL345_DATARATE_50_HZ = 0x09
ADXL345_DATARATE_100_HZ = 0x0A # (default)
ADXL345_DATARATE_200_HZ = 0x0B
ADXL345_DATARATE_400_HZ = 0x0C
ADXL345_DATARATE_800_HZ = 0x0D
ADXL345_DATARATE_1600_HZ = 0x0E
ADXL345_DATARATE_3200_HZ = 0x0F
ADXL345_RANGE_2_G = 0x00 # +/- 2g (default)
ADXL345_RANGE_4_G = 0x01 # +/- 4g
ADXL345_RANGE_8_G = 0x02 # +/- 8g
ADXL345_RANGE_16_G = 0x03 # +/- 16g
ACC_SCALE_MULTIPLIER = 0.004 #scale 255=1g=9.81m/s2 1/255=0.0004
#
#BMP085 Barometer Registers
#
BMP085_ADDRESS = 0x77 #I2C address
BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits)
BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
BMP085_CONTROL = 0xF4
BMP085_TEMPDATA = 0xF6
BMP085_PRESSUREDATA = 0xF6
BMP085_READTEMPCMD = 0x2E
BMP085_READPRESSURECMD = 0x34
# Operating Modes
BMP085_ULTRALOWPOWER = 0
BMP085_STANDARD = 1
BMP085_HIGHRES = 2
BMP085_ULTRAHIGHRES = 3
#
#HMC5883L Compass Registers
#
HMC5883L_ADDRESS = 0x1E #I2C address
HMC5883L_ConfigurationRegisterA = 0x00
HMC5883L_ConfigurationRegisterB = 0x01
HMC5883L_ModeRegister = 0x02
HMC5883L_AxisXDataRegisterMSB = 0x03
HMC5883L_AxisXDataRegisterLSB = 0x04
HMC5883L_AxisZDataRegisterMSB = 0x05
HMC5883L_AxisZDataRegisterLSB = 0x06
HMC5883L_AxisYDataRegisterMSB = 0x07
HMC5883L_AxisYDataRegisterLSB = 0x08
HMC5883L_StatusRegister = 0x09
HMC5883L_IdentificationRegisterA = 0x10
HMC5883L_IdentificationRegisterB = 0x11
HMC5883L_IdentificationRegisterC = 0x12
#Operations Modes
HMC5883L_MeasurementContinuous = 0x00
HMC5883L_MeasurementSingleShot = 0x01
HMC5883L_MeasurementIdle = 0x03
#
#Others
#
RAD_TO_DEG = 180.0/math.pi
DEG_TO_RAD = math.pi/180.0
EARTH_GRAVITY_MS2 = 9.80665 # earth acceleration
CF = 0.95 # Complementary filter constant
X = 0
Y = 1
Z = 2
#magOffsetPath = "magnetometer_calibration_offsets.txt"
magOffsetPath = "modules/IMU/Mag/magnetometer_calibration_offsets.txt"
| [
37811,
198,
17174,
8412,
9,
198,
9,
2488,
16775,
25,
12189,
22924,
220,
220,
198,
9,
2488,
37148,
25,
24244,
30434,
362,
347,
10,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.827913 | 2,429 |
"""Age prediction using MRI, fMRI and MEG data."""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from joblib import Memory, Parallel, delayed
from camcan.utils import run_ridge
from threadpoolctl import threadpool_limits
from camcan.processing import map_tangent
##############################################################################
# Paths
DRAGO_PATH = '/storage/inria/agramfor/camcan_derivatives'
OLEH_PATH = '/storage/tompouce/okozynet/projects/camcan_analysis/data'
PANDAS_OUT_FILE = './data/age_prediction_exp_data_denis_{}-rep.h5'
STRUCTURAL_DATA = f'{OLEH_PATH}/structural/structural_data.h5'
CONNECT_DATA_CORR = f'{OLEH_PATH}/connectivity/connect_data_correlation.h5'
CONNECT_DATA_TAN = f'{OLEH_PATH}/connectivity/connect_data_tangent.h5'
MEG_EXTRA_DATA = './data/meg_extra_data.h5'
MEG_PEAKS = './data/evoked_peaks.csv'
MEG_PEAKS2 = './data/evoked_peaks_task_audvis.csv'
##############################################################################
# Control paramaters
# common subjects 574
N_REPEATS = 10
N_JOBS = 10
N_THREADS = 6
REDUCE_TO_COMMON_SUBJECTS = False
memory = Memory(location=DRAGO_PATH)
##############################################################################
# MEG features
#
# 1. Marginal Power
# 2. Cross-Power
# 3. Envelope Power
# 4. Envelope Cross-Power
# 5. Envelope Connectivity
# 6. Envelope Orthogonalized Connectivity
# 7. 1/f
# 8. Alpha peak
# 9. ERF delay
FREQ_BANDS = ('alpha',
'beta_high',
'beta_low',
'delta',
'gamma_high',
'gamma_lo',
'gamma_mid',
'low',
'theta')
meg_source_types = (
'mne_power_diag',
'mne_power_cross',
'mne_envelope_diag',
'mne_envelope_cross',
'mne_envelope_corr',
'mne_envelope_corr_orth'
)
def vec_to_sym(data, n_rows, skip_diag=True):
"""Put vector back in matrix form"""
if skip_diag:
k = 1
# This is usually true as we write explicitly
# the diag info in asecond step and we only
# store the upper triangle, hence all files
# have equal size.
else:
k = 0
C = np.zeros((n_rows, n_rows), dtype=np.float64)
C[np.triu_indices(n=n_rows, k=k)] = data
C += C.T
if not skip_diag:
C.flat[::n_rows + 1] = np.diag(C) / 2.
return C
@memory.cache
def read_meg_rest_data(kind, band, n_labels=448):
"""Read the resting state data (600 subjects)
Read connectivity outptus and do some additional
preprocessing.
Parameters
----------
kind : str
The type of MEG feature.
band : str
The frequency band.
n_label: int
The number of ROIs in source space.
"""
if kind == 'mne_power_diag':
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_source_power_diag-{band}.h5'),
key=kind)
elif kind == 'mne_power_cross':
# We need the diagonal powers to do tangent mapping.
# but then we will discard it.
diag = read_meg_rest_data(kind='mne_power_diag', band=band)
# undp log10
diag = diag.transform(lambda x: 10 ** x)
index = diag.index.copy()
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_source_power_cross-{band}.h5'),
key=kind)
covs = make_covs(diag, data, n_labels)
data = map_tangent(covs, diag=True)
data = pd.DataFrame(data=data, index=index)
if kind == 'mne_envelope_diag':
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_diag_{band}.h5'),
key=kind)
elif kind == 'mne_envelope_cross':
# We need the diagonal powers to do tangent mapping.
# but then we will discard it.
diag = read_meg_rest_data(kind='mne_envelope_diag', band=band)
# undp log10
diag = diag.transform(lambda x: 10 ** x)
index = diag.index.copy()
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_cross_{band}.h5'),
key=kind)
covs = make_covs(diag, data, n_labels)
data = map_tangent(covs, diag=True)
data = pd.DataFrame(data=data, index=index)
elif kind == 'mne_envelope_corr':
# The diagonal is simply one.
diag = 1.0
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_corr_{band}.h5'),
key=kind)
index = data.index.copy()
data = map_tangent(make_covs(diag, data, n_labels),
diag=True)
data = pd.DataFrame(data=data, index=index)
elif kind == 'mne_envelope_corr_orth':
data = pd.read_hdf(
op.join(DRAGO_PATH, f'mne_envelopes_corr_orth_{band}.h5'), key=kind)
# The result here is not an SPD matrix.
# We do do Fisher's Z-transform instead.
# https://en.wikipedia.org/wiki/Fisher_transformation
data = data.transform(np.arctanh)
return data
meg_power_alpha = read_meg_rest_data(
kind='mne_power_diag', band='alpha')
meg_power_subjects = set(meg_power_alpha.index)
# source level subjects all the same for resting state
##############################################################################
# MRI features
area_data = pd.read_hdf(STRUCTURAL_DATA, key='area')
thickness_data = pd.read_hdf(STRUCTURAL_DATA, key='thickness')
volume_data = pd.read_hdf(STRUCTURAL_DATA, key='volume')
# read connectivity data
connect_data_tangent_modl = pd.read_hdf(CONNECT_DATA_TAN, key='modl256')
fmri_subjects = set(connect_data_tangent_modl.index)
##############################################################################
# Bundle all data
# Add extra dfeatures
meg_extra = pd.read_hdf(MEG_EXTRA_DATA, key='MEG_rest_extra')
meg_peaks = pd.read_csv(MEG_PEAKS).set_index('subject')[['aud', 'vis']]
meg_peaks2 = pd.read_csv(MEG_PEAKS2).set_index('subject')
meg_common_subjects = (meg_power_subjects.intersection(meg_extra.index)
.intersection(meg_peaks.index)
.intersection(meg_peaks2.index))
meg_union_subjects = (meg_power_subjects.union(meg_extra.index)
.union(meg_peaks.index)
.union(meg_peaks2.index))
print(f"Got {len(meg_union_subjects)} (union) and "
f"{len(meg_common_subjects)} (intersection) MEG subject")
common_subjects = list(meg_common_subjects.intersection(area_data.index)
.intersection(thickness_data.index)
.intersection(volume_data.index)
.intersection(fmri_subjects))
common_subjects.sort()
union_subjects = list(meg_union_subjects.union(area_data.index)
.union(thickness_data.index)
.union(volume_data.index)
.union(fmri_subjects))
union_subjects.sort()
print(f"Got {len(union_subjects)} (union) and "
f"{len(common_subjects)} (intersection) subjects")
if REDUCE_TO_COMMON_SUBJECTS:
union_subjects = common_subjects[:]
print(f"Using {len(union_subjects)} subjects")
# read information about subjects
subjects_data = pd.read_csv('./data/participant_data.csv', index_col=0)
# for storing predictors data
subjects_template = pd.DataFrame(index=union_subjects,
dtype=float)
subjects_predictions = subjects_data.loc[subjects_template.index, ['age']]
print('Data was read successfully.')
data_ref = {
'MEG 1/f low': meg_extra[
[cc for cc in meg_extra.columns if '1f_low' in cc]],
'MEG 1/f gamma': meg_extra[
[cc for cc in meg_extra.columns if '1f_gamma' in cc]],
'MEG alpha_peak': meg_extra[['alpha_peak']],
'MEG aud': meg_peaks[['aud']],
'MEG vis': meg_peaks[['vis']],
'MEG audvis': meg_peaks2[['audvis']],
'Cortical Surface Area': area_data,
'Cortical Thickness': thickness_data,
'Subcortical Volumes': volume_data,
'Connectivity Matrix, MODL 256 tan': connect_data_tangent_modl,
}
for band in FREQ_BANDS:
for kind in meg_source_types:
data_ref[f"MEG {kind} {band}"] = dict(kind=kind, band=band)
for kind in ('mne_power_diag', 'mne_envelope_diag'):
this_data = list()
for band in FREQ_BANDS:
band_data = read_meg_rest_data(kind=kind, band=band)
band_data.columns = [cc + f'_{band}' for cc in band_data.columns]
this_data.append(band_data)
this_data = pd.concat(this_data, axis=1)
key = f'MEG {"power" if "power" in kind else "envelope"} diag'
data_ref[key] = this_data
##############################################################################
# Main analysis
out = Parallel(n_jobs=40)(delayed(run_10_folds)(data_ref, repeat)
for repeat in range(N_REPEATS))
out = zip(*out)
regression_mae = pd.concat(next(out), axis=0)
regression_r2 = pd.concat(next(out), axis=0)
subjects_predictions = pd.concat(next(out), axis=0)
learning_curves = next(out)
# # save results
PANDAS_OUT_FILE = PANDAS_OUT_FILE.format(N_REPEATS)
with open(f'./data/learning_curves_denis_{N_REPEATS}.pkl', 'wb') as handle:
pickle.dump(learning_curves, handle, protocol=pickle.HIGHEST_PROTOCOL)
if not REDUCE_TO_COMMON_SUBJECTS:
PANDAS_OUT_FILE = PANDAS_OUT_FILE.replace('exp_data', 'exp_data_na')
subjects_predictions.to_hdf(PANDAS_OUT_FILE, key='predictions', complevel=9)
regression_mae.to_hdf(PANDAS_OUT_FILE, key='regression', complevel=9)
regression_r2.to_hdf(PANDAS_OUT_FILE, key='r2', complevel=9)
| [
37811,
23396,
17724,
1262,
30278,
11,
277,
40952,
290,
337,
7156,
1366,
526,
15931,
198,
2,
6434,
25,
33089,
317,
13,
1985,
368,
1236,
1279,
6559,
271,
13,
1516,
368,
1236,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
13789,
25,
347,
1... | 2.183199 | 4,476 |
from __future__ import absolute_import
import logging
import six
import time
import threading
from appenlight_client.utils import import_from_module
from datetime import datetime, timedelta
from functools import wraps
from operator import itemgetter
default_timer = time.time
TIMING_REGISTERED = False
local_timing = threading.local()
appenlight_storage = AppenlightLocalStorage()
log = logging.getLogger(__name__)
def _e_trace(info_gatherer, min_duration, e_callable, *args, **kw):
""" Used to wrap dbapi2 driver methods """
start = default_timer()
result = e_callable(*args, **kw)
end = default_timer()
info = {'start': start,
'end': end,
'min_duration': min_duration}
info.update(info_gatherer(*args, **kw))
appenlight_storage = get_local_storage()
if len(appenlight_storage.slow_calls) < 1000:
appenlight_storage.slow_calls.append(info)
return result
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
18931,
198,
11748,
2237,
198,
11748,
640,
198,
11748,
4704,
278,
198,
198,
6738,
598,
268,
2971,
62,
16366,
13,
26791,
1330,
1330,
62,
6738,
62,
21412,
198,
6738,
4818,
8079,
... | 2.777448 | 337 |
import numpy as np
import torch
import torch.nn.functional as F
import scipy.sparse as sps
from scipy.linalg import eigh, inv, det
from numpy import zeros
import math
from .source import Source
from utils import roll
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
82,
198,
6738,
629,
541,
88,
13,
75,
1292,
70,
1330,
304,
394,
11,
800,
1... | 3.142857 | 70 |
# -*- coding: utf-8 -*-
import os.path
import unittest
import unittest.mock
from prajna.generators import Sloka, SlokaGenerator
import pelican.settings
from pelican.writers import Writer
from pyfakefs import fake_filesystem
from sure import expect
class SlokaGeneratorTests(unittest.TestCase):
"""
Tests for sloka generators.
- test_generate_context_self.settings
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
13,
6978,
198,
11748,
555,
715,
395,
198,
11748,
555,
715,
395,
13,
76,
735,
198,
198,
6738,
7201,
73,
2616,
13,
8612,
2024,
1330,
3454,
17411,
11,
3454,... | 2.932331 | 133 |
from django import forms
from django.forms import inlineformset_factory
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
26098,
687,
2617,
62,
69,
9548,
220,
220,
628,
198
] | 3.454545 | 22 |
from django.db import models
from django.core.cache import cache
from django.http import Http404
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
26429,
628,
198
] | 3.413793 | 29 |
import httplib
import time
import socket
import logging
import smtplib
import sys
sys.path.append("YoPy")
import yopy
import configReader
config = configReader.ConfigReader()
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def ping():
'''
if you want to simulate some errors you may try returning one of the following:
-> raise socket.timeout
-> return 404,"lol"
'''
conn = httplib.HTTPConnection(config.read_url(), timeout=config.read_connection_timeout())
conn.request("HEAD", "/")
ping = conn.getresponse()
return ping.status, ping.reason
main()
| [
11748,
1841,
489,
571,
198,
11748,
640,
198,
11748,
17802,
198,
11748,
18931,
198,
11748,
895,
83,
489,
571,
198,
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7203,
38101,
20519,
4943,
198,
11748,
331,
11081,
198,
11748,
4566,
33634,
... | 3.069652 | 201 |
import requests
url = 'https://finance.naver.com/item/board.nhn?code=000020&page=1'
req = requests.get(url)
html = req.text
print(html)
| [
11748,
7007,
220,
198,
198,
6371,
796,
705,
5450,
1378,
69,
14149,
13,
2616,
332,
13,
785,
14,
9186,
14,
3526,
13,
77,
21116,
30,
8189,
28,
2388,
1238,
5,
7700,
28,
16,
6,
198,
42180,
796,
7007,
13,
1136,
7,
6371,
8,
198,
6494,
... | 2.509091 | 55 |
from sklearn.linear_model import LogisticRegression
import argparse
import os
import numpy as np
from sklearn.metrics import mean_squared_error
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
from azureml.core.run import Run
from azureml.data.dataset_factory import TabularDatasetFactory
# TODO: Create TabularDataset using TabularDatasetFactory
# Data is located at:
# "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv"
### YOUR CODE HERE ###
run = Run.get_context()
if __name__ == '__main__':
main() | [
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,
62,
18224,
198,
... | 3.036697 | 218 |
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from dateutil import parser
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
# See definition of duration field
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file
duration = RegexValidator(
r"[0-9]+(ms|[smhdwy])",
"Invalid or missing duration suffix. Example: 30s, 5m, 1h ([0-9]+(ms|[smhdwy])",
)
# Label Value Definition
# https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
metricname = RegexValidator(
r"[a-zA-Z_:][a-zA-Z0-9_:]*", "Only alphanumeric characters are allowed."
)
labelname = RegexValidator(
r"[a-zA-Z_][a-zA-Z0-9_]*", "Only alphanumeric characters are allowed."
)
# While Prometheus accepts label values of any unicode character, our values sometimes
# make it into URLs, so we want to make sure we do not have stray / characters
labelvalue = RegexValidator(
r"^[\w][- \w]+\Z", "Unicode letters, numbers, underscores, or hyphens or spaces"
)
| [
2,
15069,
357,
66,
8,
2177,
48920,
10501,
198,
2,
2312,
4237,
389,
2716,
739,
262,
2846,
286,
262,
17168,
5964,
25,
766,
38559,
24290,
198,
198,
6738,
3128,
22602,
1330,
30751,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
... | 3.010782 | 371 |
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "0ug+9=vnaebyh$s*i05w!m5@(0(w!p#13(pj(048+e+j4d*fq1"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get('DEBUG',default=0))
# ALLOWED_HOSTS
ALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS').split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'userauth',
'ideas_core_api',
'knox'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Ideas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Ideas.wsgi.application'
# Database
DATABASES = {
'default': {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# USER MODEL
AUTH_USER_MODEL = 'userauth.Thinker'
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES':
('knox.auth.TokenAuthentication',)
}
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
2,
10934,
13532,
2641,
262,
1628,
588,
428,
25,
49688,
62,
34720,
1220,
705,
7266,
15908,
4458,
198,
33,
11159,
62,
34720,
796,
10644,
7,
834,
7753,
834,
737,
411,
6442,
2244... | 2.272861 | 1,356 |
"""
utility functions
"""
from itertools import chain
import automol
from ioformat import indent
def pt_format(header, hess, vlabel, vval,
slabel=None, sval=None, geo=None, grad=None):
""" write a point string
"""
# Intialize with header
pt_str = '*{}'.format(header)
pt_str += '\n\n'
# Write the energy and coordinate along reaction coordinate
pt_str += energy_format(vlabel, vval)
pt_str += '\n'
if sval is not None:
pt_str += energy_format(slabel, sval)
pt_str += '\n'
pt_str += '\n'
# Write the structurea information
if geo is not None:
pt_str += geometry_format(geo)
pt_str += '\n\n'
if grad is not None:
pt_str += gradient_format(grad)
pt_str += '\n\n'
pt_str += hessian_format(hess)
pt_str += '\n'
pt_str = indent(pt_str, 2)
return pt_str
def energy_format(label, ene):
""" write an energy
"""
assert label in ('smep', 'vmep', 'svalue', 'vvalue'), (
'Label {} != smep, vmep, svalue, or vvalue'.format(label)
)
ene_str = '{0:<8s}{1:<14.12f}'.format(label, ene)
return ene_str
def geometry_format(geo):
""" Write geom
"""
xyzs = tuple(xyz for _, xyz in geo)
geo_str = automol.util.mat.string(xyzs, val_format='{:>12.8f}')
return _end_format('geom', 'end', geo_str)
def gradient_format(grad):
""" format hessian
"""
grad_str = automol.util.mat.string(grad, val_format='{:>12.8f}')
return _end_format('grads', 'end', grad_str)
def hessian_format(hess):
""" format hessian
"""
hess = list(chain.from_iterable(hess))
hess_str = automol.util.vec.string(
hess, num_per_row=6, val_format='{0:>12.8f}')
return _end_format('hessian', 'end', hess_str)
def list_format(lst):
""" Unpack a list of values and write them to a string
"""
return '\n'.join(lst)
def _end_format(header, ender, dat_str):
""" Write a block with an end
"""
return (
header + '\n' +
dat_str + '\n' +
ender
)
| [
37811,
198,
315,
879,
5499,
198,
37811,
198,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
11748,
3557,
349,
198,
6738,
33245,
18982,
1330,
33793,
628,
198,
4299,
42975,
62,
18982,
7,
25677,
11,
339,
824,
11,
410,
18242,
11,
410,
2100,... | 2.230193 | 934 |
from .data_check import *
from .functions import *
from .information_theory import *
from .qpfs_body import qpfs_body
| [
6738,
764,
7890,
62,
9122,
1330,
1635,
198,
6738,
764,
12543,
2733,
1330,
1635,
198,
6738,
764,
17018,
62,
1169,
652,
1330,
1635,
198,
6738,
764,
80,
79,
9501,
62,
2618,
1330,
10662,
79,
9501,
62,
2618,
198
] | 3.105263 | 38 |
import unittest
import discretize
import numpy as np
from SimPEG import survey, utils, data
np.random.seed(100)
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
1221,
1186,
1096,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3184,
47,
7156,
1330,
5526,
11,
3384,
4487,
11,
1366,
198,
198,
37659,
13,
25120,
13,
28826,
7,
3064,
8,
628,
198,
198,
361,
11593,
3... | 2.672131 | 61 |
# Implements a min-heap. For max-heap, simply reverse all comparison orders.
#
# Note on alternate subroutine namings (used in some textbooks):
# - _bubble_up = siftdown
# - _bubble_down = siftup
# Example usage
heap = [3, 2, 1, 0]
heapify(heap)
print('Heap(0, 1, 2, 3):', heap)
heappush(heap, 4)
heappush(heap, 7)
heappush(heap, 6)
heappush(heap, 5)
print('Heap(0, 1, 2, 3, 4, 5, 6, 7):', heap)
sorted_list = [heappop(heap) for _ in range(8)]
print('Heap-sorted list:', sorted_list)
# Large test case, for randomized tests
import random
# Heapify 0 ~ 99
heap = list(range(100))
random.shuffle(heap)
heapify(heap)
# Push 100 ~ 199 in random order
new_elems = list(range(100, 200))
random.shuffle(new_elems)
for elem in new_elems:
heappush(heap, elem)
sorted_list = [heappop(heap) for _ in range(200)]
print(sorted_list == sorted(sorted_list))
| [
2,
1846,
1154,
902,
257,
949,
12,
258,
499,
13,
1114,
3509,
12,
258,
499,
11,
2391,
9575,
477,
7208,
6266,
13,
198,
2,
198,
2,
5740,
319,
13527,
850,
81,
28399,
299,
321,
654,
357,
1484,
287,
617,
31814,
2599,
198,
2,
220,
220,
... | 2.364384 | 365 |