content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
#!/usr/bin/env python3
"""
doc: https://raw.githubusercontent.com/nasa/SC/master/docs/users_guide/CFS%20SC%20User%20Guide%20Doc%20No%20582-2012-003%20Ver%201.1%202014_12-18.pdf
Table specifications: https://github.com/nasa/SC/tree/master/fsw/tables
Table image examples: https://github.com/solar-wine/flatsat_data_firmware/tree/master/upgrade/cFS/modules
"""
from scapy.all import *
from ccsds_base import CCSDSPacket
from packets_eyassat_if_cmd import *
class ATC(Packet):
"""
Absolute Time Command
"""
fields_desc = [
# unique number
LEShortField("ID", 0),
# seconds
IntField("TimeTag", 0),
ConditionalField(
FCSField("Padding", 0, fmt="B"),
lambda pkt: len(pkt.payload) % 2 != 0 # XXX check
),
]
bind_layers(ATC, CCSDSPacket)
class RTC(Packet):
"""
Relative Time Command
doc: sc_rts*.c (https://github.com/nasa/SC/tree/master/fsw/tables)
"""
fields_desc = [
# seconds
ShortField("TimeTag", 0),
]
bind_layers(RTC, CCSDSPacket)
if __name__ == '__main__':
RTCs = [
# EYASSAT_IF_ADCS_PWM_BASELINE_CmdPkt
RTC(TimeTag=0) / CCSDSPacket(apid=469, cmd_func_code=5) / EYASSAT_IF_ADCS_PWM_BASELINE_CmdPkt(PWM=0),
]
image = RTSFile(RTS=RTCs)
with open('stop_flywheel-rts.tbl', 'wb') as f:
f.write(bytes(image))
f.write(b'\x00' * 1000)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
15390,
25,
3740,
1378,
1831,
13,
12567,
43667,
13,
785,
14,
77,
15462,
14,
6173,
14,
9866,
14,
31628,
14,
18417,
62,
41311,
14,
34,
10652,
4,
1238,
6173,
4,
1238,
1... | 2.08651 | 682 |
import cv2
cap = cv2.VideoCapture(0) # 0 => pc kamerası, 1 => usb'ye bağlı kamera, 2 =>
# video'yu kaydetmek için
fourcc = cv2.VideoWriter_fourcc(*'XVID') # 4 byte'lık video codec kodu alarak int veri döndürür.
out = cv2.VideoWriter('img/output.avi', fourcc, 20.0, (640,480)) # video adı, codec code, fps, video size
while True:
ret, frame = cap.read() # kameradan o anki görüntü okunuyor. ret => kamera çalışıp çalışmadığını döndürür.
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # frame genişliğini döndürür
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # frame yüksekliğini döndürür
out.write(frame)
cv2.imshow("camera",frame) # görüntü ekrana bastırılıyor.
if cv2.waitKey(30) & 0xFF == ord('q'): # 30 ms'de bir görüntü alınıyor ve q'ya basılırsa döngüden çıkılıyor.
break
cap.release() # kamera serbest bırakılıyor.
out.release() # kayıt çıktısı serbest bırakılıyor.
cv2.destroyAllWindows() | [
11748,
269,
85,
17,
198,
198,
11128,
796,
269,
85,
17,
13,
10798,
49630,
7,
15,
8,
1303,
657,
5218,
40653,
479,
2382,
292,
30102,
11,
352,
5218,
38551,
6,
5948,
26605,
33133,
75,
30102,
479,
18144,
11,
362,
5218,
220,
198,
198,
2,... | 2.071588 | 447 |
c = 0
while True:
n = int(input('Digite um número para ver sua tabuada: '))
c += 1
if n < 0:
print('Programa encerrado!')
break
else:
for m in range(1, 11):
print(f'{n} x {m} = {n * m}')
| [
66,
796,
657,
198,
4514,
6407,
25,
198,
220,
220,
220,
299,
796,
493,
7,
15414,
10786,
19511,
578,
23781,
299,
21356,
647,
78,
31215,
3326,
424,
64,
7400,
84,
4763,
25,
705,
4008,
198,
220,
220,
220,
269,
15853,
352,
198,
220,
220... | 1.832061 | 131 |
from django.template import Template, RequestContext
| [
6738,
42625,
14208,
13,
28243,
1330,
37350,
11,
19390,
21947,
628
] | 4.909091 | 11 |
__all__ = ["allchecks", "checkrsa", "checkpubkey", "checkprivkey", "checkcrt",
"checkcsr", "checksshpubkey", "detectandcheck"]
from .checks import (allchecks, checkrsa, checkpubkey, checkprivkey, checkcrt,
checkcsr, checksshpubkey, detectandcheck)
| [
834,
439,
834,
796,
14631,
439,
42116,
1600,
366,
9122,
3808,
64,
1600,
366,
9122,
12984,
2539,
1600,
366,
9122,
13776,
2539,
1600,
366,
9122,
6098,
83,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
9122,
6359,
8... | 2.413793 | 116 |
#!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
from json.decoder import JSONDecodeError
from seqgen import Sequences
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=('Create genetic sequences according to a '
'JSON specification file and write them to stdout.'))
parser.add_argument(
'--specification', metavar='FILENAME', default=sys.stdin, type=open,
help=('The name of the JSON sequence specification file. Standard input '
'will be read if no file name is given.'))
parser.add_argument(
'--defaultIdPrefix', metavar='PREFIX', default=Sequences.DEFAULT_ID_PREFIX,
help=('The default prefix that sequence ids should have (for those that '
'are not named individually in the specification file) in the '
'resulting FASTA. Numbers will be appended to this value.'))
parser.add_argument(
'--defaultLength', metavar='N', default=Sequences.DEFAULT_LENGTH, type=int,
help=('The default length that sequences should have (for those that do '
'not have their length given in the specification file) in the '
'resulting FASTA.'))
args = parser.parse_args()
try:
sequences = Sequences(args.specification,
defaultLength=args.defaultLength,
defaultIdPrefix=args.defaultIdPrefix)
except JSONDecodeError:
print('Could not parse your specification JSON. Stacktrace:',
file=sys.stderr)
raise
else:
for sequence in sequences:
print(sequence.toString('fasta'), end='')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
33918,
13,
12501,
12342,
1330,
19449,
10707,
1098,
12331,
198,
6738,
33756,
... | 2.78438 | 589 |
from Calculator.calculator import Calculator
from StatsCalculations.mean import mean
from StatsCalculations.median import median
from StatsCalculations.mode import mode
from StatsCalculations.variance import variance
from StatsCalculations.standardDeviation import standard_deviation
# Source for mean, median, and mode:
# https://www.geeksforgeeks.org/finding-mean-median-mode-in-python-without-libraries/
# Source for variance: https://www.geeksforgeeks.org/python-variance-of-list/
# Source for standard deviation: https://www.geeksforgeeks.org/python-standard-deviation-of-list/
| [
6738,
43597,
13,
9948,
3129,
1352,
1330,
43597,
198,
6738,
20595,
9771,
3129,
602,
13,
32604,
1330,
1612,
198,
6738,
20595,
9771,
3129,
602,
13,
1150,
666,
1330,
14288,
198,
6738,
20595,
9771,
3129,
602,
13,
14171,
1330,
4235,
198,
6738... | 3.520958 | 167 |
import nml
import locator
from ooo import main
| [
11748,
299,
4029,
198,
11748,
1179,
1352,
198,
6738,
267,
2238,
1330,
1388,
198
] | 3.357143 | 14 |
import os
import unittest
from ..test.factory import suite
from ..test.protein import Protein_Test
from ..test.general import General_Test
from ..calculators.gebf_dft import GEBF_DFT
from ..calculators.gebf_pm6 import GEBF_PM6
path = os.path.join(os.getcwd(), "data", "systems")
proteins_path = os.path.join(path, "proteins")
| [
11748,
28686,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
11485,
9288,
13,
69,
9548,
1330,
18389,
198,
6738,
11485,
9288,
13,
48693,
1330,
31702,
62,
14402,
198,
6738,
11485,
9288,
13,
24622,
1330,
3611,
62,
14402,
198,
6738,
11485,... | 2.788136 | 118 |
from setuptools import setup
setup(name='venv_easy',
version='0.1',
description='Easily automating virtual environment creation and implementation from within your Python3 Application.',
url='https://github.com/AndrewNeudegg/venv_easy',
author='Andrew Neudegg',
author_email='andrew.neudegg@gmail.com',
license='MIT',
packages=['venv_easy'],
zip_safe=False)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
3672,
11639,
574,
85,
62,
38171,
3256,
198,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
16,
3256,
198,
220,
220,
220,
220,
220,
6764,
11639,
36,
292,
813,
3557,
803,
7166,
... | 2.695364 | 151 |
import tensorflow as tf
import numpy as np
import os
batch_size = 10
img_path = "/gpfs/fs01/user/s076-844c78348e985f-04662317cedd/notebook/work/Dataset/flickr30k-images/"
try:
files = sorted(np.array(os.listdir("/gpfs/fs01/user/s076-844c78348e985f-04662317cedd/notebook/work/Dataset/flickr30k-images/")))
n_batch = len(files) / batch_size
except:
pass
with open('/gpfs/global_fs01/sym_shared/YPProdSpark/user/s076-844c78348e985f-04662317cedd/notebook/work/farman-image-caption/ConvNets/inception_v4.pb', 'rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
tf.import_graph_def(graph_def)
graph = tf.get_default_graph()
input_layer = graph.get_tensor_by_name("import/InputImage:0")
output_layer = graph.get_tensor_by_name(
"import/InceptionV4/Logits/AvgPool_1a/AvgPool:0")
'''
OLD PRE-PROCESSING MODULES : SLOW
import cv2
from PIL import Image
def old_load_image(x, new_h=299, new_w=299):
image = Image.open(x)
h, w = image.size
if image.format != "PNG":
image = np.asarray(image)/255.0
else:
image = np.asarray(image)/255.0
image = image[:,:,:3]
##To crop or not?
if w == h:
resized = cv2.resize(image, (new_h,new_w))
elif h < w:
resized = cv2.resize(image, (int(w * float(new_h)/h), new_w))
crop_length = int((resized.shape[1] - new_h) / 2)
resized = resized[:,crop_length:resized.shape[1] - crop_length]
else:
resized = cv2.resize(image, (new_h, int(h * float(new_w) / w)))
crop_length = int((resized.shape[0] - new_w) / 2)
resized = resized[crop_length:resized.shape[0] - crop_length,:]
return cv2.resize(image, (new_h, new_w))
'''
if __name__ == "__main__":
print "#Images:", len(files)
print "Extracting Features"
io = build_prepro_graph()
forward_pass(io)
print "done"
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
198,
43501,
62,
7857,
796,
838,
198,
9600,
62,
6978,
796,
12813,
31197,
9501,
14,
9501,
486,
14,
7220,
14,
82,
2998,
21,
12,
23,
2598,
... | 2.170872 | 872 |
from flask import Flask,render_template
app = Flask(__name__)
@app.route("/")
@app.route("/pagina2")
if __name__ == "__main__":
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
13287,
62,
28243,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
31,
1324,
13,
38629,
7203,
14,
4943,
198,
198,
31,
1324,
13,
38629,
7203,
14,
79,
363,
1437,
17,
4943,
198,
220,
220,
220,... | 2.120482 | 83 |
import unittest
from predict_model import main
| [
11748,
555,
715,
395,
198,
198,
6738,
4331,
62,
19849,
1330,
1388,
628
] | 3.769231 | 13 |
"""
This file contains custom elements defined by Adriaan Rol and Felix Schmidt
The intention is that these get merged into SchemDraw.elements after cleaning
up so as to merge them into the master of CDelker
"""
import numpy as np
import SchemDraw.elements as e
# TODO: SQUID
# TODO: SQUID with flux bias line
# TODO: SQUID with gate voltage line
# TODO: BIAS_TEE
_gap = [np.nan, np.nan]
# Transmission line
# TODO: it would be nice if it was possible to draw the inner conductor in grey, but I don't see a way to set the edgecolor of paths/poly
_tl_r = .5
tllength = 6
x0 = 0.5+_tl_r
TL = {
'name': 'TL',
'paths': [[[0, 0], [x0, 0], _gap, [x0, _tl_r], [tllength-x0-_tl_r, _tl_r], _gap, [x0, -_tl_r], [tllength-x0-_tl_r, -_tl_r], _gap, [tllength-x0-0.25*_tl_r, 0], [tllength-0.5, 0]]],
'shapes': [
{'shape': 'arc',
'center': [x0, 0],
'theta1': 90,
'theta2': 270,
'width': 1.25*_tl_r,
'height': 2*_tl_r},
{'shape': 'arc',
'center': [x0, 0],
'theta1': -90,
'theta2': 90,
'width': 1.25*_tl_r,
'height': 2*_tl_r},
{'shape': 'arc',
'center': [tllength-x0-.5, 0],
'theta1': -90,
'theta2': 90,
'width': 1.25*_tl_r,
'height': 2*_tl_r}
],
'extend': False
}
# Josephson junction with gate electrode
jjgh = 0.25
jjgc = 0.4
JJG = {
'name': 'JJG',
'base': e.JJ,
'paths': [[[-jjgc, -2*jjgh], [jjgc, -2*jjgh]],
[[0, -2*jjgh], [0, -4*jjgh]]],
'lblloc': 'bot',
'anchors': {'gate': [0, jjgh*-4]}
}
# Low pass filter
LOW_PASS = {
'name': 'LOW_PASS',
'base': e.RBOX,
'paths': [[[0.15, 0.05],
[0.6, 0.05],
[0.8, -.15]]]
}
# PI filter
PI_FILTER = {
'name': 'PI_FILTER',
'base': e.RBOX,
'labels': [{'label': '$\pi$', 'pos': [.5, 0]}]
}
# Single port amplifier
AMP = {'name': 'AMP',
'paths': [[[0, 0],
[np.nan, np.nan],
[0.7, 0]]],
'anchors': {'center': [2, 0]},
'shapes': [{'shape': 'poly', 'xy': np.array([[0., 0.5],
[0.7, 0.],
[0., -0.5]]), 'fill': False}]}
dircoup_w = 2
dircoup_h = .5
h_offset = 0.01
dx = .07
dy = .07
# Directional coupler
DIR_COUP = {
'name': 'DIR_COUP',
'paths': [[[0, h_offset], [0, dircoup_h], [dircoup_w, dircoup_h], [dircoup_w, -dircoup_h],
[0, -dircoup_h], [0, h_offset], [dircoup_w, h_offset]
]],
'shapes': [{'shape': 'arc',
'center': [dircoup_w*.9, -dircoup_h],
'theta1':90, 'theta2':180,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'arc',
'center': [dircoup_w*.1, -dircoup_h],
'theta1':0, 'theta2':90,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'circle',
'center': [dircoup_w*.333, -dircoup_h],
'radius':dx,
'fill': True,
'fillcolor':'black'
},
{'shape': 'circle',
'center': [dircoup_w*.666, -dircoup_h],
'radius':dx,
'fill': True,
'fillcolor':'black'
},
{'shape': 'circle',
'center': [0, 0],
'radius':dx,
'fill': True,
'fillcolor':'black'
},
{'shape': 'circle',
'center': [dircoup_w, h_offset],
'radius':dx,
'fill': True,
'fillcolor':'black'
},
],
'anchors': {'port3': [dircoup_w*.333, -dircoup_h], 'port4': [dircoup_w*.666, -dircoup_h]}
}
IQMIXER = {
'name': 'IQMIXER',
'base': e.SOURCE,
'paths': [[[-.35+dx, -.35], [.35+dx, .35],
[np.nan, np.nan],
[.35+dx, -.35], [-.35+dx, .35],
[np.nan, np.nan],
[0.5, -1], [0.5, -.50],
[np.nan, np.nan],
[0.5, .5], [0.5, 1],
]]
}
# Isolator
h = .65
ISOLATOR = {
'name': 'ISOLATOR',
'base': e.SOURCE,
'shapes': [{'shape': 'arc', 'center': [.5, 0],
'width':h, 'height':h, 'theta1':130, 'theta2':320, 'arrow':'ccw'}], # 'arrow':'cw'}
}
# Circulator
CIRCULATOR = {
'name': 'CIRCULATOR',
'base': ISOLATOR,
'paths': [[[0.5, .5], [0.5, 1],
]],
'anchors': {'port3': [0.5, 1]}
}
# TODO: Circulator 4port
| [
37811,
198,
1212,
2393,
4909,
2183,
4847,
5447,
416,
1215,
7496,
272,
371,
349,
290,
29721,
24740,
198,
464,
6778,
318,
326,
777,
651,
23791,
656,
1446,
4411,
25302,
13,
68,
3639,
706,
12724,
198,
929,
523,
355,
284,
20121,
606,
656,
... | 1.669052 | 2,795 |
import logging
import time
from datetime import datetime, timedelta
from logging import LogRecord, StreamHandler
from typing import List
import httpx
from pytest_zebrunner.api.client import ZebrunnerAPI
from pytest_zebrunner.api.models import LogRecordModel
from pytest_zebrunner.context import zebrunner_context
class ZebrunnerHandler(StreamHandler):
"""
A class that inherit from StreamHandler useful for recording logs.
Attributes:
logs (List[LorRecordModel]): List of logs to be handled.
"""
logs: List[LogRecordModel] = []
def emit(self, record: LogRecord) -> None:
"""
Try to send logs to test_run_id if the last attempt was more than a second ago. If not, and test is active,
adds a new log to the list.
Args:
record (LogRecord): The log to be recorded.
"""
if datetime.utcnow() - self.last_push >= timedelta(seconds=1):
self.push_logs()
if zebrunner_context.test_is_active:
self.logs.append(
LogRecordModel(
test_id=str(zebrunner_context.test_id),
timestamp=str(round(time.time() * 1000)),
level=record.levelname,
message=str(record.msg),
)
)
def push_logs(self) -> None:
"""
Updates last_push datetime, resets logs list and send the to Zebrunner API
for reporting if test_run_id is active.
"""
try:
if zebrunner_context.test_run_id and zebrunner_context.settings.send_logs:
self.api.send_logs(zebrunner_context.test_run_id, self.logs)
except httpx.HTTPError as e:
logging.error("Failed to send logs to zebrunner", exc_info=e)
finally:
self.logs = []
self.last_push = datetime.utcnow()
| [
11748,
18931,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
18931,
1330,
5972,
23739,
11,
13860,
25060,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
2638,
87,
198,
198,
6738,
12972,
9288,
62,
2... | 2.238095 | 840 |
import os
import time
import warnings
from queue import Queue
from typing import Any, Dict, Optional, Tuple, Union
import gym
import numpy as np
import tensorflow as tf
from stable_baselines.bench import load_results
from stable_baselines.common.callbacks import BaseCallback, EventCallback
from stable_baselines.common.evaluation import evaluate_policy
from stable_baselines.common.vec_env import (DummyVecEnv, VecEnv, VecNormalize,
sync_envs_normalization)
from stable_baselines.results_plotter import X_EPISODES, X_TIMESTEPS
from envs.env_eval_callback import EnvEvalCallback
from evaluation import custom_evaluate_policy
from execution.execution_result import ExecutionResult
from log import Log
from log_utils import _ts2xy
class SaveVecNormalizeCallback(BaseCallback):
"""
Callback for saving a VecNormalize wrapper every ``save_freq`` steps
:param save_freq: (int)
:param save_path: (str) Path to the folder where ``VecNormalize`` will be saved, as ``vecnormalize.pkl``
:param name_prefix: (str) Common prefix to the saved ``VecNormalize``, if None (default)
only one file will be kept.
"""
class ProgressBarCallback(BaseCallback, EvalBaseCallback):
"""
:param pbar: (tqdm.pbar) Progress bar object
"""
class EvalCallback(EventCallback):
"""
Callback for evaluating an agent.
:param eval_env: (Union[gym.Env, VecEnv]) The environment used for initialization
:param callback_on_new_best: (Optional[BaseCallback]) Callback to trigger
when there is a new best model according to the `mean_reward`
:param n_eval_episodes: (int) The number of episodes to test the agent
:param eval_freq: (int) Evaluate the agent every eval_freq call of the callback.
:param log_path: (str) Path to a folder where the evaluations (`evaluations.npz`)
will be saved. It will be updated at each evaluation.
:param best_model_save_path: (str) Path to a folder where the best model
according to performance on the eval env will be saved.
:param deterministic: (bool) Whether the evaluation should
use a stochastic or deterministic actions.
:param render: (bool) Whether to render or not the environment during evaluation
:param verbose: (int)
"""
class LoggingTrainingMetricsCallback(BaseCallback):
"""
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param log_every: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
"""
| [
11748,
28686,
198,
11748,
640,
198,
11748,
14601,
198,
6738,
16834,
1330,
4670,
518,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
11748,
11550,
198,
11748,
299,
32152,
355,
45941,
198,
11748... | 3.079775 | 890 |
# conversion constants to atomic units
hbar = 1.0
m_e = 1.0
a_0 = 1.0
e = 1.0
hartree = 1.0
Eh = hartree
nm = 1.8897261246257702e1
Å = 1.8897261246257702
eV = 0.03674932217565499
ps = 4.134137333518212e4
fs = 4.134137333518212
V = 0.03674932217565499
V_m = 1.9446903811488876e-12
T = 4.254382157326325e-06
m = 1.8897261246257702e10
C = 6.241509074460763e+18
s = 4.134137333518173e+16
Hz = 2.4188843265857225e-17
kg = 1.0977691057577634e30
J = 2.293712278396328e+17
A = 150.974884744557
# some physical constants, expressed in atomic units
k = 0.5 # hbar**2 / (2*m_e)
m_p = 1836.1526734400013
𝜇0 = 0.0006691762566207213
ε0 = 0.0795774715459477
c = 137.035999083818
α = 0.0072973525693
| [
2,
11315,
38491,
284,
17226,
4991,
198,
198,
71,
5657,
796,
352,
13,
15,
198,
76,
62,
68,
796,
352,
13,
15,
198,
64,
62,
15,
796,
352,
13,
15,
198,
68,
796,
352,
13,
15,
198,
18647,
631,
796,
352,
13,
15,
198,
43894,
796,
28... | 1.963068 | 352 |
import os
import json
| [
11748,
28686,
201,
198,
11748,
33918,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198
] | 2 | 16 |
from unittest import skip
from django.core.management import call_command
from django.test import TestCase
class ProductAvailabilityTest(TestCase):
"""Test bootstrap_devsite script (touching many codepaths)"""
@skip
def test_bootstrap_script(self):
"""If no orders have been made, the product is still available."""
call_command("bootstrap_devsite")
| [
6738,
555,
715,
395,
1330,
14267,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
1330,
869,
62,
21812,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
628,
198,
4871,
8721,
29841,
14402,
7,
14402,
20448,
2599,
198,
220,
220,
... | 3.157025 | 121 |
"""
jans.pycloudlib.pki
~~~~~~~~~~~~~~~~~~~
This module contains various Public Key Infrastucture (PKI) helpers.
"""
import os
from datetime import datetime
from datetime import timedelta
from ipaddress import IPv4Address
from cryptography.hazmat.backends import default_backend
from cryptography import x509
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
def generate_private_key(filename):
"""Generate private key.
:param filename: Path to generated private key.
"""
private_key = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend(),
)
alg = serialization.NoEncryption()
with open(filename, "wb") as f:
f.write(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=alg,
))
return private_key
def generate_public_key(filename, private_key, is_ca=False, add_san=False, add_key_usage=False, **kwargs):
"""Generate public key (cert).
:param filename: Path to generated public key.
:param private_key: An instance of PrivateKey object.
:param is_ca: Whether add constraint extension as CA.
:param add_san: Whether to add SubjectAlternativeName extension.
:param add_key_usage: Whether to add KeyUsage extension.
:param kwargs: Optional arguments.
Keyword arguments:
- ``email``: Email address for subject/issuer.
- ``hostname``: Hostname (common name) for subject/issuer.
- ``org_name``: Organization name for subject/issuer.
- ``country_code``: Country name in ISO format for subject/issuer.
- ``state``: State/province name for subject/issuer.
- ``city``: City/locality name for subject/issuer.
- ``extra_dns``: Additional DNS names (added if ``add_san`` argument is set to ``True``).
- ``extra_ips``: Additional IP addresses (added if ``add_san`` argument is set to ``True``).
"""
valid_from = datetime.utcnow()
valid_to = valid_from + timedelta(days=365)
# issuer equals subject because we use self-signed
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, kwargs.get("country_code")),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, kwargs.get("state")),
x509.NameAttribute(NameOID.LOCALITY_NAME, kwargs.get("city")),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, kwargs.get("org_name")),
x509.NameAttribute(NameOID.COMMON_NAME, kwargs.get("hostname")),
x509.NameAttribute(NameOID.EMAIL_ADDRESS, kwargs.get("email")),
])
builder = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(private_key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to)
.add_extension(
x509.BasicConstraints(ca=is_ca, path_length=None),
critical=is_ca,
)
)
if add_san:
# SANs
suffix, _ = os.path.splitext(os.path.basename(filename))
sans = [
x509.DNSName(kwargs.get("hostname")),
x509.DNSName(suffix),
]
# add Domains to SAN
extra_dns = kwargs.get("extra_dns") or []
for dn in extra_dns:
sans.append(x509.DNSName(dn))
# add IPs to SAN
extra_ips = kwargs.get("extra_ips") or []
for ip in extra_ips:
sans.append(x509.IPAddress(IPv4Address(ip)))
# make SANs unique
sans = list(set(sans))
builder = builder.add_extension(
x509.SubjectAlternativeName(sans),
critical=False,
)
if add_key_usage:
builder = builder.add_extension(
x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=True,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
critical=False,
)
public_key = builder.sign(
private_key, hashes.SHA256(), backend=default_backend(),
)
with open(filename, "wb") as f:
f.write(public_key.public_bytes(
encoding=serialization.Encoding.PEM,
))
return public_key
def generate_csr(filename, private_key, add_san=False, add_key_usage=False, **kwargs):
"""Generate a certificate signing request (CSR).
:param filename: Path to generate CSR.
:param private_key: An instance of PrivateKey object.
:param add_san: Whether to add SubjectAlternativeName extension.
:param add_key_usage: Whether to add KeyUsage extension.
:param kwargs: Optional arguments.
Keyword arguments:
- ``email``: Email address for subject/issuer.
- ``hostname``: Hostname (common name) for subject/issuer.
- ``org_name``: Organization name for subject/issuer.
- ``country_code``: Country name in ISO format for subject/issuer.
- ``state``: State/province name for subject/issuer.
- ``city``: City/locality name for subject/issuer.
- ``extra_dns``: Additional DNS names (added if ``add_san`` argument is set to ``True``).
- ``extra_ips``: Additional IP addresses (added if ``add_san`` argument is set to ``True``).
"""
subject = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, kwargs.get("country_code")),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, kwargs.get("state")),
x509.NameAttribute(NameOID.LOCALITY_NAME, kwargs.get("city")),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, kwargs.get("org_name")),
x509.NameAttribute(NameOID.COMMON_NAME, kwargs.get("hostname")),
x509.NameAttribute(NameOID.EMAIL_ADDRESS, kwargs.get("email")),
])
builder = (
x509.CertificateSigningRequestBuilder()
.subject_name(subject)
)
if add_san:
# SANs
suffix, _ = os.path.splitext(os.path.basename(filename))
sans = [
x509.DNSName(kwargs.get("hostname")),
x509.DNSName(suffix),
]
# add Domains to SAN
extra_dns = kwargs.get("extra_dns") or []
for dn in extra_dns:
sans.append(x509.DNSName(dn))
# add IPs to SAN
extra_ips = kwargs.get("extra_ips") or []
for ip in extra_ips:
sans.append(x509.IPAddress(IPv4Address(ip)))
# make SANs unique
sans = list(set(sans))
builder = builder.add_extension(
x509.SubjectAlternativeName(sans),
critical=False,
)
if add_key_usage:
builder = builder.add_extension(
x509.KeyUsage(
digital_signature=True,
content_commitment=True,
key_encipherment=True,
data_encipherment=False,
key_agreement=False,
key_cert_sign=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
critical=False,
)
csr = builder.sign(private_key, hashes.SHA256(), backend=default_backend())
with open(filename, "wb") as f:
f.write(csr.public_bytes(
serialization.Encoding.PEM
))
return csr
def sign_csr(filename, csr, ca_private_key, ca_public_key):
"""Sign a certificate signing request (CSR).
:param filename: Path to signed certificate.
:param csr: An instance of CertificateSigningRequest object.
:param ca_private_key: An instance of CA PrivateKey object.
:param ca_public_key: An instance of CA Certificate object.
"""
valid_from = datetime.utcnow()
valid_to = valid_from + timedelta(days=365)
builder = (
x509.CertificateBuilder()
.subject_name(csr.subject)
.issuer_name(ca_public_key.subject)
.public_key(csr.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(valid_from)
.not_valid_after(valid_to)
)
for ext in csr.extensions:
builder = builder.add_extension(ext.value, ext.critical)
public_key = builder.sign(
ca_private_key, hashes.SHA256(), backend=default_backend(),
)
with open(filename, "wb") as f:
f.write(public_key.public_bytes(
serialization.Encoding.PEM
))
return public_key
| [
37811,
198,
73,
504,
13,
9078,
17721,
8019,
13,
79,
4106,
198,
27156,
4907,
93,
198,
198,
1212,
8265,
4909,
2972,
5094,
7383,
4806,
5685,
4782,
495,
357,
47,
37845,
8,
49385,
13,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
4818,
... | 2.285789 | 3,849 |
from django.conf.urls.defaults import *
# Uncomment this for admin:
#from django.contrib import admin
urlpatterns = patterns('',
# Example:
# (r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment this for admin docs:
#(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment this for admin:
#('^admin/(.*)', admin.site.root),
)
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
13,
12286,
82,
1330,
1635,
198,
198,
2,
791,
23893,
428,
329,
13169,
25,
198,
2,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6371,
33279,
82,
796,
7572,
10786,
3256,
198,
22... | 2.588235 | 153 |
"""
configuration object that holds data about the language detection api
"""
config = {
"url": 'https://ws.detectlanguage.com/0.2/detect',
"headers": {
'User-Agent': 'Detect Language API Python Client 1.4.0',
'Authorization': 'Bearer {}',
}
}
| [
37811,
198,
11250,
3924,
2134,
326,
6622,
1366,
546,
262,
3303,
13326,
40391,
198,
37811,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
366,
6371,
1298,
705,
5450,
1378,
18504,
13,
15255,
478,
16129,
13,
785,
14,
15,
13,
17,
14,
1... | 2.650485 | 103 |
#
# This file is part of Python Module for Cube Builder AWS.
# Copyright (C) 2019-2021 INPE.
#
# Cube Builder AWS is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
import json
import os
import re
import shutil
from copy import deepcopy
from datetime import datetime
from operator import itemgetter
from pathlib import Path
import numpy
import rasterio
from bdc_catalog.models import Band, Collection, GridRefSys, Item, Tile
from bdc_catalog.models.base_sql import db
from geoalchemy2 import func
from rasterio.io import MemoryFile
from rasterio.transform import Affine
from rasterio.warp import Resampling, reproject
from .constants import (APPLICATION_ID, CLEAR_OBSERVATION_ATTRIBUTES,
CLEAR_OBSERVATION_NAME, COG_MIME_TYPE,
DATASOURCE_ATTRIBUTES, DATASOURCE_NAME, HARMONIZATION,
PROVENANCE_ATTRIBUTES, PROVENANCE_NAME, SRID_BDC_GRID,
TOTAL_OBSERVATION_ATTRIBUTES, TOTAL_OBSERVATION_NAME)
from .logger import logger
from .utils.processing import (QAConfidence, apply_landsat_harmonization,
create_asset_definition, create_cog_in_s3,
create_index, encode_key, format_version,
generateQLook, get_qa_mask, qa_statistics)
from .utils.scene_parser import SceneParser
from .utils.timeline import Timeline
###############################
# HARMONIZATION
###############################
###############################
# SEARCH
###############################
###############################
# BLEND
###############################
###############################
# POS BLEND
###############################
###############################
# PUBLISH
############################### | [
2,
198,
2,
770,
2393,
318,
636,
286,
11361,
19937,
329,
23315,
35869,
30865,
13,
198,
2,
15069,
357,
34,
8,
13130,
12,
1238,
2481,
3268,
11401,
13,
198,
2,
198,
2,
23315,
35869,
30865,
318,
1479,
3788,
26,
345,
460,
17678,
4163,
3... | 2.725146 | 684 |
"""
Precisely APIs
Enhance & enrich your data, applications, business processes, and workflows with rich location, information, and identify APIs. # noqa: E501
The version of the OpenAPI document: 11.9.3
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from com.precisely.apis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from com.precisely.apis.exceptions import ApiAttributeError
class School(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'assigned': (str,), # noqa: E501
'phone': (str,), # noqa: E501
'website': (str,), # noqa: E501
'address_type': (str,), # noqa: E501
'address': (Address,), # noqa: E501
'lowest_grade': (str,), # noqa: E501
'highest_grade': (str,), # noqa: E501
'school_type': (str,), # noqa: E501
'school_type_desc': (str,), # noqa: E501
'school_sub_type': (str,), # noqa: E501
'school_sub_type_desc': (str,), # noqa: E501
'gender': (str,), # noqa: E501
'gender_desc': (str,), # noqa: E501
'education_level': (str,), # noqa: E501
'education_level_desc': (str,), # noqa: E501
'greatschools': (Greatschools,), # noqa: E501
'nces_school_id': (str,), # noqa: E501
'nces_district_id': (str,), # noqa: E501
'nces_data_year': (str,), # noqa: E501
'school_ranking': ([SchoolRanking],), # noqa: E501
'students': (str,), # noqa: E501
'teachers': (str,), # noqa: E501
'status': (str,), # noqa: E501
'student_teacher_ratio': (str,), # noqa: E501
'choice': (str,), # noqa: E501
'coextensiv': (str,), # noqa: E501
'school_districts': (SchoolDistrict,), # noqa: E501
'school_profile': (SchoolProfile,), # noqa: E501
'grade_levels_taught': (GradeLevelsTaught,), # noqa: E501
'distance': (Distance,), # noqa: E501
'geometry': (Geometry,), # noqa: E501
}
@cached_property
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'assigned': 'assigned', # noqa: E501
'phone': 'phone', # noqa: E501
'website': 'website', # noqa: E501
'address_type': 'addressType', # noqa: E501
'address': 'address', # noqa: E501
'lowest_grade': 'lowestGrade', # noqa: E501
'highest_grade': 'highestGrade', # noqa: E501
'school_type': 'schoolType', # noqa: E501
'school_type_desc': 'schoolTypeDesc', # noqa: E501
'school_sub_type': 'schoolSubType', # noqa: E501
'school_sub_type_desc': 'schoolSubTypeDesc', # noqa: E501
'gender': 'gender', # noqa: E501
'gender_desc': 'genderDesc', # noqa: E501
'education_level': 'educationLevel', # noqa: E501
'education_level_desc': 'educationLevelDesc', # noqa: E501
'greatschools': 'greatschools', # noqa: E501
'nces_school_id': 'ncesSchoolId', # noqa: E501
'nces_district_id': 'ncesDistrictId', # noqa: E501
'nces_data_year': 'ncesDataYear', # noqa: E501
'school_ranking': 'schoolRanking', # noqa: E501
'students': 'students', # noqa: E501
'teachers': 'teachers', # noqa: E501
'status': 'status', # noqa: E501
'student_teacher_ratio': 'studentTeacherRatio', # noqa: E501
'choice': 'choice', # noqa: E501
'coextensiv': 'coextensiv', # noqa: E501
'school_districts': 'schoolDistricts', # noqa: E501
'school_profile': 'schoolProfile', # noqa: E501
'grade_levels_taught': 'gradeLevelsTaught', # noqa: E501
'distance': 'distance', # noqa: E501
'geometry': 'geometry', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""School - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
assigned (str): [optional] # noqa: E501
phone (str): [optional] # noqa: E501
website (str): [optional] # noqa: E501
address_type (str): [optional] # noqa: E501
address (Address): [optional] # noqa: E501
lowest_grade (str): [optional] # noqa: E501
highest_grade (str): [optional] # noqa: E501
school_type (str): [optional] # noqa: E501
school_type_desc (str): [optional] # noqa: E501
school_sub_type (str): [optional] # noqa: E501
school_sub_type_desc (str): [optional] # noqa: E501
gender (str): [optional] # noqa: E501
gender_desc (str): [optional] # noqa: E501
education_level (str): [optional] # noqa: E501
education_level_desc (str): [optional] # noqa: E501
greatschools (Greatschools): [optional] # noqa: E501
nces_school_id (str): [optional] # noqa: E501
nces_district_id (str): [optional] # noqa: E501
nces_data_year (str): [optional] # noqa: E501
school_ranking ([SchoolRanking]): [optional] # noqa: E501
students (str): [optional] # noqa: E501
teachers (str): [optional] # noqa: E501
status (str): [optional] # noqa: E501
student_teacher_ratio (str): [optional] # noqa: E501
choice (str): [optional] # noqa: E501
coextensiv (str): [optional] # noqa: E501
school_districts (SchoolDistrict): [optional] # noqa: E501
school_profile (SchoolProfile): [optional] # noqa: E501
grade_levels_taught (GradeLevelsTaught): [optional] # noqa: E501
distance (Distance): [optional] # noqa: E501
geometry (Geometry): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""School - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
name (str): [optional] # noqa: E501
assigned (str): [optional] # noqa: E501
phone (str): [optional] # noqa: E501
website (str): [optional] # noqa: E501
address_type (str): [optional] # noqa: E501
address (Address): [optional] # noqa: E501
lowest_grade (str): [optional] # noqa: E501
highest_grade (str): [optional] # noqa: E501
school_type (str): [optional] # noqa: E501
school_type_desc (str): [optional] # noqa: E501
school_sub_type (str): [optional] # noqa: E501
school_sub_type_desc (str): [optional] # noqa: E501
gender (str): [optional] # noqa: E501
gender_desc (str): [optional] # noqa: E501
education_level (str): [optional] # noqa: E501
education_level_desc (str): [optional] # noqa: E501
greatschools (Greatschools): [optional] # noqa: E501
nces_school_id (str): [optional] # noqa: E501
nces_district_id (str): [optional] # noqa: E501
nces_data_year (str): [optional] # noqa: E501
school_ranking ([SchoolRanking]): [optional] # noqa: E501
students (str): [optional] # noqa: E501
teachers (str): [optional] # noqa: E501
status (str): [optional] # noqa: E501
student_teacher_ratio (str): [optional] # noqa: E501
choice (str): [optional] # noqa: E501
coextensiv (str): [optional] # noqa: E501
school_districts (SchoolDistrict): [optional] # noqa: E501
school_profile (SchoolProfile): [optional] # noqa: E501
grade_levels_taught (GradeLevelsTaught): [optional] # noqa: E501
distance (Distance): [optional] # noqa: E501
geometry (Geometry): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
37811,
198,
220,
220,
220,
28737,
786,
306,
23113,
628,
220,
220,
220,
32999,
1222,
22465,
534,
1366,
11,
5479,
11,
1597,
7767,
11,
290,
670,
44041,
351,
5527,
4067,
11,
1321,
11,
290,
5911,
23113,
13,
220,
1303,
645,
20402,
25,
412... | 2.056978 | 8,828 |
import mitdeeplearning.util
import mitdeeplearning.lab1
import mitdeeplearning.lab2
import mitdeeplearning.lab3
| [
11748,
10255,
22089,
40684,
13,
22602,
198,
198,
11748,
10255,
22089,
40684,
13,
23912,
16,
198,
11748,
10255,
22089,
40684,
13,
23912,
17,
198,
11748,
10255,
22089,
40684,
13,
23912,
18,
198
] | 3.53125 | 32 |
from AdmissibleSet import (AdmissibleSparseGridNodeSet,
RefinableNodesSet)
from LocalRefinementStrategy import (CreateAllChildrenRefinement,
ANOVARefinement,
AddNode)
from RefinementManager import RefinementManager
from RefinementStrategy import (SurplusRanking,
SquaredSurplusRanking,
WeightedSurplusRanking,
WeightedL2OptRanking,
ExpectationValueOptRanking,
VarianceOptRanking,
MeanSquaredOptRanking,
SurplusRatioRanking,
SurplusRatioEstimationRanking,
ExpectationValueBFRanking,
VarianceBFRanking,
SquaredSurplusBFRanking,
WeightedSurplusBFRanking,
PredictiveRanking,
WeightedL2BFRanking,
AnchoredWeightedL2OptRanking,
AnchoredVarianceOptRanking,
AnchoredMeanSquaredOptRanking,
AnchoredExpectationValueOptRanking)
from pysgpp.extensions.datadriven.uq.quadrature.bilinearform import BilinearGaussQuadratureStrategy
from pysgpp.extensions.datadriven.uq.quadrature.HashQuadrature import HashQuadrature
| [
6738,
1215,
21597,
7248,
1330,
357,
2782,
21597,
50,
29572,
41339,
19667,
7248,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
6524,
259,
... | 1.66212 | 953 |
"""A setuptools based setup module."""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import path
# Extract the tag from the system
from subimage import __version__
# Get the long description from the README file
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
# For more details: https://github.com/pypa/sampleproject
setup(
name="subimage",
version=__version__,
description="A sample Python project to detect image subsets",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://gist.github.com/jakebrinkmann/ff2e7d5dd0bc3f107ef2a22601b50c15",
author="Jake Brinkmann",
author_email="jake.brinkmann@gmail.com",
packages=find_packages(exclude=["contrib", "docs", "test"]),
entry_points={"console_scripts": ["subimage=subimage.cli:main"]},
)
| [
37811,
32,
900,
37623,
10141,
1912,
9058,
8265,
526,
15931,
198,
198,
2,
16622,
4702,
900,
37623,
10141,
625,
1233,
26791,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
6738,
28686,
1330,
3108,
198,
198,
2,
29677,
... | 2.944625 | 307 |
'''
Created on Jan 11, 2012
@author: Mirna Lerotic, 2nd Look Consulting
http://www.2ndlookconsulting.com/
Copyright (c) 2013, Stefan Vogt, Argonne National Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the Argonne National Laboratory nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
from __future__ import division
import numpy as np
import os
import sys
sys.path.append('./')
sys.path.append('file_io')
import h5py
import maps_hdf5
import logging
""" ------------------------------------------------------------------------------------------------"""
#-----------------------------------------------------------------------------
if __name__ == '__main__':
import sys
file1 = sys.argv[1]
file2 = sys.argv[2]
main(file1, file2)
| [
7061,
6,
201,
198,
41972,
319,
2365,
1367,
11,
2321,
201,
198,
201,
198,
31,
9800,
25,
7381,
2616,
31831,
6210,
11,
362,
358,
6803,
41005,
201,
198,
220,
220,
220,
220,
220,
220,
220,
220,
2638,
1378,
2503,
13,
17,
358,
5460,
5936... | 3.231884 | 690 |
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
import time
import s1ap_types
import s1ap_wrapper
import ipaddress
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
15269,
357,
66,
8,
1584,
12,
25579,
11,
3203,
11,
3457,
13,
198,
3237,
2489,
10395,
13,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
347,
10305,
12,
7635,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
6808,... | 3.463415 | 123 |
class MyClass:
"This is my second class"
a = 10
# Output: 10
print(MyClass.a)
# Output: <function MyClass.func at 0x0000000003079BF8>
print(MyClass.func.self)
# Output: 'This is my second class'
print(MyClass.__doc__) | [
4871,
2011,
9487,
25,
201,
198,
197,
1,
1212,
318,
616,
1218,
1398,
1,
201,
198,
197,
64,
796,
838,
201,
198,
201,
198,
2,
25235,
25,
838,
201,
198,
4798,
7,
3666,
9487,
13,
64,
8,
201,
198,
201,
198,
2,
25235,
25,
1279,
8818,... | 2.427083 | 96 |
"""This module contains functions that sets up material models
.. codeauthor:: Knut Andreas Meyer
"""
# Abaqus imports
from abaqusConstants import *
import material
def add_material(the_model, material_spec, name):
"""Add a material to the_model according to material_spec with name=name.
:param the_model: The model to which the sketch will be added
:type the_model: Model object (Abaqus)
:param material_spec: Dictionary containing the fields `'material_model'` and `'mpar'`:
- `'material_model'`: which material model to use, currently `'elastic'`,
`'chaboche'`, and `'user'` are supported.
- `'mpar'`: Material parameters, please see function corresponding to
`'material_model'` below for detailed requirements
:type material_spec: dict
:param name: The name of the material
:type name: str (max len = 80)
:returns: None
:rtype: None
"""
if name in the_model.materials.keys():
raise ValueError('A material with name ' + name + ' has already been created')
the_material = the_model.Material(name=name)
matmod = material_spec['material_model']
mpar = material_spec['mpar']
if matmod=='elastic':
setup_elastic(the_material, mpar)
elif matmod=='chaboche':
setup_chaboche(the_material, mpar)
elif matmod=='user':
setup_user(the_material, mpar)
else:
apt.log('Material model ' + matmod + ' is not supported')
def setup_elastic(the_material, mpar):
"""Setup elastic material behavior
:param the_material: The material to which elastic behavior will be added
:type the_material: Material object (Abaqus)
:param mpar: Dictionary containing the fields
- `'E'`: Young's modulus
- `'nu'`: Poissons ratio
:type mpar: dict
:returns: None
:rtype: None
"""
the_material.Elastic(table=((mpar['E'], mpar['nu']), ))
def setup_chaboche(the_material, mpar):
"""Setup plastic material behavior with the chaboche model
:param the_material: The material to which elastic behavior will be added
:type the_material: Material object (Abaqus)
:param mpar: Dictionary containing the fields
- `'E'`: Young's modulus
- `'nu'`: Poissons ratio
- `'Y0'`: Initial yield limit
- `'Qinf'`: Saturated isotropic yield limit increase
- `'biso'`: Speed of saturation for isotropic hardening
- `'Cmod'`: List of kinematic hardening modulii
- `'gamma'`: List of kinematic saturation parameters
:type mpar: dict
:returns: None
:rtype: None
"""
setup_elastic(the_material, mpar)
kinpar = [mpar['Y0']]
for Cmod, gamma in zip(mpar['Cmod'], mpar['gamma']):
kinpar.append(Cmod)
kinpar.append(gamma)
the_material.Plastic(table=(tuple(kinpar),), hardening=COMBINED, dataType=PARAMETERS,
numBackstresses=len(mpar['Cmod']))
the_material.plastic.CyclicHardening(table=((mpar['Y0'], mpar['Qinf'], mpar['biso']),),
parameters=ON)
def setup_user(the_material, mpar):
"""Setup user material behavior
:param the_material: The material to which elastic behavior will be added
:type the_material: Material object (Abaqus)
:param mpar: Dictionary containing the fields
- `'user_mpar_array'`: List of user material parameters
- `'nstatv'`: Number of state variables for user material model
:type mpar: dict
:returns: None
:rtype: None
"""
the_material.UserMaterial(type=MECHANICAL, unsymm=OFF,
mechanicalConstants=mpar['user_mpar_array'])
the_material.Depvar(n=mpar['nstatv'])
| [
37811,
1212,
8265,
4909,
5499,
326,
5621,
510,
2587,
4981,
198,
198,
492,
2438,
9800,
3712,
6102,
315,
33728,
23975,
198,
37811,
198,
2,
2275,
30188,
385,
17944,
198,
6738,
450,
30188,
385,
34184,
1187,
1330,
1635,
198,
11748,
2587,
628... | 2.104279 | 2,033 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from ml.rl import types as rlt
from ml.rl.models.base import ModelBase
from ml.rl.models.fully_connected_network import gaussian_fill_w_gain
from ml.rl.tensorboardX import SummaryWriterContext
logger = logging.getLogger(__name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
1439,
2489,
10395,
13,
198,
198,
11748,
18931,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
... | 3.266667 | 135 |
# -*- coding: utf-8 -*-
import pytest
import IP2Location
apikey = "demo"
package = "WS24"
usessl = True
addons = ["continent", "country", "region", "city", "geotargeting", "country_groupings", "time_zone_info"]
language = "en"
ws = IP2Location.IP2LocationWebService(apikey,package,usessl) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
6101,
17,
14749,
198,
198,
499,
522,
88,
796,
366,
9536,
78,
1,
198,
26495,
796,
366,
19416,
1731,
1,
198,
385,
408,
75,
79... | 2.607143 | 112 |
import graphene
# cookbook/schema.py
from graphene_django import DjangoObjectType
from ingredients.models import Category, Ingredient
schema = graphene.Schema(query=MainQuery)
| [
11748,
42463,
628,
628,
198,
2,
4255,
2070,
14,
15952,
2611,
13,
9078,
198,
6738,
42463,
62,
28241,
14208,
1330,
37770,
10267,
6030,
198,
198,
6738,
9391,
13,
27530,
1330,
21743,
11,
17589,
445,
1153,
628,
628,
628,
628,
628,
628,
628... | 3.37931 | 58 |
import logging
import requests
from urllib.parse import urljoin
from .context import ContextManager, ctx
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
7007,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
6738,
764,
22866,
1330,
30532,
13511,
11,
269,
17602,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
6... | 3.418605 | 43 |
from django.db import migrations, models
| [
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.818182 | 11 |
# -*- coding: utf-8 -*-
import logging
import os
import sys
import csv
import codecs
from io import BytesIO
from django_extensions.management.signals import post_command, pre_command
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
def setup_logger(logger, stream, filename=None, fmt=None):
"""Sets up a logger (if no handlers exist) for console output,
and file 'tee' output if desired."""
if len(logger.handlers) < 1:
console = logging.StreamHandler(stream)
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(fmt))
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
if filename:
outfile = logging.FileHandler(filename)
outfile.setLevel(logging.INFO)
outfile.setFormatter(logging.Formatter("%(asctime)s " + (fmt if fmt else '%(message)s')))
logger.addHandler(outfile)
class RedirectHandler(logging.Handler):
"""Redirect logging sent to one logger (name) to another."""
def signalcommand(func):
"""A decorator for management command handle defs that sends out a pre/post signal."""
return inner
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
269,
21370,
198,
11748,
40481,
82,
198,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
198,
6738,
42625,
14... | 2.57716 | 648 |
from gazette.spiders.base.fecam import FecamGazetteSpider
| [
6738,
308,
1031,
5857,
13,
2777,
4157,
13,
8692,
13,
69,
721,
321,
1330,
376,
721,
321,
38,
1031,
5857,
41294,
628
] | 2.681818 | 22 |
# -*- coding:utf8 -*-
# Performance optimization model(Maybe Only Linux)
if __name__ == "__main__":
from main import app
from werkzeug.contrib.profiler import ProfilerMiddleware
from config import GLOBAL
Host = GLOBAL.get('Host')
Port = GLOBAL.get('Port')
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions = [60])
app.run(debug=True, host=Host, port=Port)
| [
2,
532,
9,
12,
19617,
25,
40477,
23,
532,
9,
12,
198,
2,
15193,
23989,
2746,
7,
13300,
5514,
7020,
8,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
422,
1388,
1330,
598,
198,
220,
220,
22... | 2.664596 | 161 |
import collections
from typing import Tuple, Union, List
import numpy as np
from .mapping_utils import *
class SimpleInstance:
"""
Detected instance inside an image
> processes like so: output.pred_classes, output.scores, output.pred_boxes.tensor
"""
class IntermediateOutput:
"""
Contains all instances for one image
"""
class IntermediateOutputs:
"""
Contains intermediate outputs for a list of images
"""
class IntermediateInput:
"""
Contains intermediate inputs for one image
"""
class IntermediateInputs:
"""
Contains a list of intermediate inputs for a list of images
"""
class IntermediateData:
"""
Contains a list of intermediate outputs for a list of images.
And: a list of intermediate inputs for the same list of images.
"""
| [
11748,
17268,
198,
6738,
19720,
1330,
309,
29291,
11,
4479,
11,
7343,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
76,
5912,
62,
26791,
1330,
1635,
628,
198,
4871,
17427,
33384,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220... | 3.391837 | 245 |
# -*- coding: utf-8 -*-
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198
] | 1.714286 | 14 |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
RIDE_STATUS_CHOICES = (
('waiting', 'Waiting'),
('ongoing', 'Ongoing'),
('complete', 'Complete')
)
AVAILABLE_DRIVER_CHOICES = (
(1, 'Driver 1'),
(2, 'Driver 2'),
(3, 'Driver 3'),
(4, 'Driver 4'),
(5, 'Driver 5')
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
198,
2,
13610,
534,
4981,
994,
13,
198,
49,
14114,
62,
35744,
2937,
62,
44899,
34444,
796,
357,
198,
220,
220,
220,
... | 2.353741 | 147 |
"""Test bench for the Verilog module 'nt_gen_replay_top'."""
# The MIT License
#
# Copyright (c) 2017-2019 by the author(s)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author(s):
# - Andreas Oeldemann <andreas.oeldemann@tum.de>
#
# Description:
#
# Test bench for the Verilog module 'nt_gen_replay_top'.
import cocotb
from cocotb.triggers import RisingEdge
from lib.axilite import AXI_Lite_Writer, AXI_Lite_Reader
from lib.axis import AXIS_Reader
from lib.mem import Mem
from lib.file import File
from lib.net import axis_data_to_packet
from lib.tb import clk_gen, rstn, wait_n_cycles, check_value, toggle_signal
from scapy.all import Ether
import binascii
from nt_gen_replay_cpuregs_defines import *
# clock frequency in MHz
CLK_FREQ_MHZ = 200
# AXI Stream data width
AXIS_BIT_WIDTH = 64
# AXI memory data width
AXI_MEM_BIT_WIDTH = 512
# AXI Lite data width
AXI_LITE_BIT_WIDTH = 32
# maximum byte size of a memory write
WR_TRANSFER_SIZE_MAX = 4096
# size of the ring buffer in memory to which trace data shall be transfered.
# its a factor that is multiplied by the byte size of the trace
# (factor < 1.0 -> ring buffer is smaller than the trace, factor > 1.0 -> ring
# buffer is larger than the trace)
RING_BUFF_SIZES = [1.0, 1.5, 0.1, 0.25, 0.75]
# offset in memory where ring buffer shall be located
RING_BUFF_ADDRS = [0, 2**32-10*(AXI_MEM_BIT_WIDTH/8)]
@cocotb.coroutine
def check_output(dut, trace, axis_reader):
"""Check whether the DUT output is the one that is expected.
Based on a given trace replay file, the coroutine constructs the expected
output behavior of the DUT and compares it to the actual values.
"""
# get trace size
trace_size = trace.size()
# initialize address used to index memory-mapped trace file
addr = 0
while addr < trace_size:
# read 8 byte from trace file. contains packet meta data
meta = trace.read_reverse_byte_order(addr, 8)
addr += 8
if meta == 2**64-1:
# the overall trace data has to be 512 bit aligned. If the actual
# trace size is smaller, we can add padding at the end of the
# trace (in multiples of 64 bit words). all bits of the padding
# data have to be set to 1
continue
# extract meta data
meta_delta_t = meta & 2**32-1
meta_len_snap = (meta >> 32) & 2**11-1
meta_len_wire = (meta >> 48) & 2**11-1
# read packet data from trace file
data = trace.read(addr, meta_len_snap)
# increase address. packet data is aligned to 8 byte aligned
if meta_len_snap % 8 == 0:
addr += meta_len_snap
else:
addr += 8 * (meta_len_snap / 8 + 1)
# if number of bytes on the wire is larger than the number of snap
# bytes, add zero bytes as padding
for _ in range(meta_len_wire - meta_len_snap):
data <<= 8
# create reference ethernet frame from the read data
data = "%x" % data
data = data.zfill(meta_len_wire)
frame_ref = Ether(binascii.unhexlify(data))
# read arriving frame from AXI4-Stream
(tdata, tkeep, tuser) = yield axis_reader.read()
# convert AXI4-Stream data to ethernet frame
frame_recv = axis_data_to_packet(tdata, tkeep, AXIS_BIT_WIDTH)
# make sure frames match
if str(frame_ref) != str(frame_recv):
raise cocotb.result.TestFailure("received wrong data")
# inter-packet time is located in first tuser word
meta_delta_t_recv = tuser[0] & 2**32-1
# make sure the inter-packet time matches the expected one
if meta_delta_t != meta_delta_t_recv:
raise cocotb.result.TestFailure("wrong timing information")
# all other tuser fields must be set to zero
if any(v != 0 for v in tuser[2:]):
raise cocotb.result.TestFailure("invalid tuser data")
# wait some more cycles after last packet. there should not be any data on
# the axi stream anymore
for _ in range(1000):
yield RisingEdge(dut.clk)
check_value("m_axis_tvalid", dut.m_axis_tvalid, 0)
@cocotb.coroutine
def ring_buff_write(dut, ring_buff, trace, ring_buff_addr, axi_lite_reader,
axi_lite_writer):
"""Coroutine writes trace data to the ring buffer in memory.
The coroutine monitors the ring buffer read pointer (set by the DUT) and
writes data to the buffer when a sufficient amount of storage is available.
"""
# get the ring buffer size
ring_buff_size = ring_buff.size()
# get trace size
trace_size = trace.size()
# transfer size must be smaller than ring buffer size
if WR_TRANSFER_SIZE_MAX >= ring_buff_size:
raise cocotb.result.TestFailure("transfer size too large")
# initialize number of bytes that still need to be transfered to memory
trace_size_outstanding = trace_size
# initialize write pointer
wr = 0x0
while True:
# number of outstanding bytes for transfer must never be negative
assert trace_size_outstanding >= 0
# abort if there is no more trace data to be transfered
if trace_size_outstanding == 0:
break
# get the current read pointer
rd = yield axi_lite_reader.read(CPUREG_OFFSET_CTRL_ADDR_RD)
# get memory size from current write pointer position until the end
# of the ring buffer memory location
ring_buff_size_end = ring_buff_size - wr
# calculate the desired transfer size
transfer_size = \
min(ring_buff_size_end,
min(trace_size_outstanding, WR_TRANSFER_SIZE_MAX))
# calculated memory transfer size must always be positive
assert transfer_size > 0
if rd == wr:
# ring buffer is empty --> write data
do_transfer = True
elif rd < wr:
# as long as ring buffer contains valid data, read and write
# pointers must never become equal. If the read pointer is smaller
# than the write pointer, we may fill up the memory until the end.
# This means that the write pointer will may wrap around and have a
# value of 0. Now if the read pointer is currently 0 as well, this
# would result in an error situation in which the memory would be
# assumed to be empty. Thus, special attention is necessary here.
do_transfer = (rd != 0) or (wr + transfer_size) != ring_buff_size
elif rd > wr:
# to make sure that the read pointer does not have the same value
# as the write pointer (which would mean that ring buffer is
# empty), only transfer data if difference between both pointer is
# larger than the transfer size
do_transfer = (rd - wr) > transfer_size
if not do_transfer:
# no data transfer shall take place now, do nothing
continue
# read trace file data
data = trace.read(trace_size - trace_size_outstanding, transfer_size)
# write data to the ring buffer
ring_buff.write(ring_buff_addr + wr, data, transfer_size)
# update the write pointer
if (wr + transfer_size) == ring_buff_size:
# end of memory reached, wrap around
wr = 0x0
else:
assert (wr + transfer_size) < ring_buff_size
wr += transfer_size
# write the write pointer to the DUT
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_ADDR_WR, wr)
# decrement number of bytes that still remain to be written to memory
trace_size_outstanding -= transfer_size
# wait a little bit
yield wait_n_cycles(dut.clk, 100)
@cocotb.test()
def nt_gen_replay_top_test(dut):
"""Test bench main function."""
# start the clock
cocotb.fork(clk_gen(dut.clk, CLK_FREQ_MHZ))
# no software reset
dut.rst_sw <= 0
# reset dut
yield rstn(dut.clk, dut.rstn)
# open trace file
trace = File("files/random.file")
# get trace file size
trace_size = trace.size()
# trace file must be a multiple of the AXI data width
if trace.size() % (AXI_MEM_BIT_WIDTH/8) != 0:
raise cocotb.result.TestFailure("invalid trace size")
# calculate ring buffer sizes
ring_buff_sizes = []
for ring_buff_size in RING_BUFF_SIZES:
# size of ring buffer is determined by multiplying the size factor by
# the size of the trace
ring_buff_size = int(ring_buff_size * trace_size)
# make sure that the ring buffer size is multiple of AXI data width
if ring_buff_size % (AXI_MEM_BIT_WIDTH/8) != 0:
ring_buff_size += AXI_MEM_BIT_WIDTH/8 - \
ring_buff_size % (AXI_MEM_BIT_WIDTH/8)
ring_buff_sizes.append(ring_buff_size)
# create a ring buffer memory (initially of size 0) and connect it to the
# DUT
ring_buff = Mem(0)
ring_buff.connect(dut, "ddr3")
# create axi lite writer, connect and reset
axi_lite_writer = AXI_Lite_Writer()
axi_lite_writer.connect(dut, dut.clk, AXI_LITE_BIT_WIDTH, "ctrl")
yield axi_lite_writer.rst()
# create axi lite reader, connect and reset
axi_lite_reader = AXI_Lite_Reader()
axi_lite_reader.connect(dut, dut.clk, AXI_LITE_BIT_WIDTH, "ctrl")
yield axi_lite_reader.rst()
# create axi stream reader, connect and reset
axis_reader = AXIS_Reader()
axis_reader.connect(dut, dut.clk, AXIS_BIT_WIDTH)
yield axis_reader.rst()
# start the ring buffer memory main routine
cocotb.fork(ring_buff.main())
# toggle m_axis_tready
cocotb.fork(toggle_signal(dut.clk, dut.m_axis_tready))
# iterate over all ring buffer sizes
for i, ring_buff_size in enumerate(ring_buff_sizes):
# set ring buffer size
ring_buff.set_size(ring_buff_size)
# iterate over all addresses where ring buffer shall be located in
# memory
for j, ring_buff_addr in enumerate(RING_BUFF_ADDRS):
# print status
print("Test %d/%d" % (i*len(RING_BUFF_ADDRS) + j + 1,
len(RING_BUFF_ADDRS) * len(RING_BUFF_SIZES)))
print("Ring Buff Addr: 0x%x, Size: %d" %
(ring_buff_addr, ring_buff_size))
# we have a total of 8 GByte of memory. Make sure the ring buffer
# fits at the desired address
if ring_buff_addr + ring_buff_size > 0x1FFFFFFFF:
raise cocotb.result.TestFailure("ring buffer is too large")
# to reduce the simulation memory footprint, provide the memory
# module the first memory address that we acutally care about
ring_buff.set_offset(ring_buff_addr)
# configure ring buffer memory location
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_HI,
ring_buff_addr >> 32)
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_LO,
ring_buff_addr & 0xFFFFFFFF)
# configure ring buffer address range
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_MEM_RANGE,
ring_buff_size - 1)
# configure trace size
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_TRACE_SIZE_HI,
trace_size >> 32)
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_TRACE_SIZE_LO,
trace_size & 0xFFFFFFFF)
# reset write address pointer
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_ADDR_WR, 0x0)
# make sure module initially is inactive
status = yield axi_lite_reader.read(CPUREG_OFFSET_STATUS)
if status & 0x3 != 0:
raise cocotb.reset.TestFailure("module is active")
# start the module
yield axi_lite_writer.write(CPUREG_OFFSET_CTRL_START, 0x1)
# wait a few cycles
yield wait_n_cycles(dut.clk, 10)
# start writing the ring buffer
cocotb.fork(ring_buff_write(dut, ring_buff, trace, ring_buff_addr,
axi_lite_reader, axi_lite_writer))
# start coroutine that checks dut output
coroutine_chk_out = cocotb.fork(check_output(dut, trace,
axis_reader))
# wait a few cycles and make sure module is active
yield wait_n_cycles(dut.clk, 10)
status = yield axi_lite_reader.read(CPUREG_OFFSET_STATUS)
if status & 0x1 == 0x0:
raise cocotb.result.TestFailure("mem read not active")
if status & 0x2 == 0x0:
raise cocotb.result.TestFailure("packet assembly not active")
# wait for output check to complete
yield coroutine_chk_out.join()
# wait a few cycles
yield wait_n_cycles(dut.clk, 10)
# make sure module is now inactive
status = yield axi_lite_reader.read(CPUREG_OFFSET_STATUS)
if status & 0x3 != 0x0:
raise cocotb.result.TestFailure("module does not become " +
"inactive")
# clear the ring buffer contents
ring_buff.clear()
# close the trace file
trace.close()
| [
37811,
14402,
7624,
329,
262,
4643,
346,
519,
8265,
705,
429,
62,
5235,
62,
260,
1759,
62,
4852,
30827,
15931,
198,
2,
383,
17168,
13789,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
12,
23344,
416,
262,
1772,
7,
82,
8,
198,
2,
198,... | 2.37776 | 6,115 |
__all__ = ["Deskew", "YAMLBits", "ImageUtils"]
| [
834,
439,
834,
796,
14631,
5960,
365,
86,
1600,
366,
56,
2390,
30501,
896,
1600,
366,
5159,
18274,
4487,
8973,
628
] | 2.285714 | 21 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Sean Robertson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# --------------------------------------------------------------------------------
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""encoder.py: Implementation of a GRU based encoder for text2text problems (e.g. translation)
Inspiration taken from the corresponding Pytorch tutorial.
See https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html """
__author__ = "Vincent Marois "
import torch
from torch import nn
from utils.app_state import AppState
class EncoderRNN(nn.Module):
"""
GRU Encoder for Encoder-Decoder.
"""
def __init__(self, input_voc_size, hidden_size, bidirectional, n_layers):
"""
Initializes an Encoder network based on a Gated Recurrent Unit.
:param input_voc_size: size of the vocabulary set to be embedded by the Embedding layer.
:param hidden_size: length of embedding vectors.
:param bidirectional: indicates whether the encoder model is bidirectional or not.
:param n_layers: number of layers for the Gated Recurrent Unit.
"""
# call base constructor.
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.n_layers = n_layers
# Embedding: creates a look-up table of the embedding of a vocabulary set
# (size: input_voc_size -> input_language.n_words) on vectors of size hidden_size.
# adds 1 dimension to the shape of the tensor
# WARNING: input must be of type LongTensor
self.embedding = nn.Embedding(
num_embeddings=input_voc_size, embedding_dim=hidden_size)
# Apply a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
# NOTE: default number of recurrent layers is 1
# 1st parameter: expected number of features in the input -> same as hidden_size because of embedding
# 2nd parameter: expected number of features in hidden state -> hidden_size.
# batch_first=True -> input and output tensors are provided as (batch, seq, feature)
# batch_first=True do not affect hidden states
self.gru = nn.GRU(
input_size=hidden_size,
hidden_size=hidden_size,
num_layers=self.n_layers,
batch_first=True,
bidirectional=self.bidirectional)
def forward(self, input, hidden):
"""
Runs the Encoder.
:param input: tensor of indices, of size [batch_size x 1] (word by word looping)
:param hidden: initial hidden state for each element in the input batch.
Should be of size [(n_layers * n_directions) x batch_size x hidden_size]
For every input word, the encoder outputs a vector and a hidden state, and uses the hidden state for
the next input word.
:return: output should be of size [batch_size x seq_len x (hidden_size * n_directions)]: tensor containing the output features h_t from the last layer of the RNN, for each t.
:return: hidden should be of size [(n_layers * n_directions) x batch_size x hidden_size]: tensor containing the hidden state for t = seq_length.
"""
embedded = self.embedding(input)
# embedded: [batch_size x 1 x hidden_size]
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def init_hidden(self, batch_size):
"""
Initializes the hidden states for the encoder.
:param batch_size: batch size
:return: initial hidden states.
"""
if self.bidirectional:
return torch.zeros(self.n_layers * 2, batch_size,
self.hidden_size).type(AppState().dtype)
else:
return torch.zeros(self.n_layers, batch_size,
self.hidden_size).type(AppState().dtype)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
383,
17168,
13789,
357,
36393,
8,
198,
2,
198,
2,
15069,
357,
66,
8,
2177,
11465,
23590,
198,
2,
1... | 2.88958 | 1,929 |
""" Utility functions related to mesh processing """
import numpy as np
def save_point_cloud_ply(output_fname, pts, colors=None):
""" Save a 3D point cloud in PLY acii format
Parameters
----------
output_fname : str
Filename to save to
pts : array_like
The 3D points. Shape = Nx3
colors : array_like
RGB colors of points. Shape = Nx3 (optional)
"""
num_pts = len(pts)
with open(output_fname, 'w') as fd:
fd.write('ply\n')
fd.write('format ascii 1.0\n')
fd.write('element vertex %d\n' % num_pts)
fd.write('property float x\n')
fd.write('property float y\n')
fd.write('property float z\n')
if colors is not None:
fd.write('property uint8 red\n')
fd.write('property uint8 green\n')
fd.write('property uint8 blue\n')
fd.write('end_header\n')
if colors is None:
for pt in pts:
fd.write(f'{pt[0]} {pt[1]} {pt[2]}\n')
else:
for pt,c in zip(pts,colors):
fd.write(f'{pt[0]} {pt[1]} {pt[2]} {c[0]} {c[1]} {c[2]}\n')
def save_mesh_ply(output_fname, verts, faces, vert_colors=None):
""" Save a polygonal mesh in ascii PLY format
Parameters
----------
output_fname : str
filename to write to
verts : array_like
Vertices of the mesh. Shape = Nx3
faces : array_like
Faces of the mesh.
Shape = NxV, where V is the number of vertices per face.
vert_colors : array_like
Per-vertex RGB colors.
Shape = Nx3
"""
num_verts = len(verts)
num_faces = len(faces)
with open(output_fname, 'w') as fd:
fd.write('ply\n')
fd.write('format ascii 1.0\n')
fd.write(f'element vertex {num_verts}\n')
fd.write('property float x\n')
fd.write('property float y\n')
fd.write('property float z\n')
if vert_colors is not None:
fd.write('property uint8 red\n')
fd.write('property uint8 green\n')
fd.write('property uint8 blue\n')
fd.write(f'element face {num_faces}\n')
fd.write('property list uchar int vertex_index\n')
fd.write('end_header\n')
if vert_colors is None:
for vert in verts:
fd.write(f'{vert[0]} {vert[1]} {vert[2]}\n')
else:
assert len(vert_colors) == num_verts, "different number of vertices and colors"
for vert,c in zip(verts, vert_colors):
fd.write(f'{vert[0]} {vert[1]} {vert[2]} {c[0]} {c[1]} {c[2]}\n')
for face in faces:
fd.write(f'{len(face)} {face[0]} {face[1]} {face[2]}\n')
def save_cameras_ply(filename, cam_Ks, cam_Rs, cam_Ts, img_sizes, scale=1.0):
""" Save perspective cameras as meshes in ascii PLY format for visualization
Note that all input lists should have equal length
Parameters
----------
filename : str
filename to write
cam_Ks : list
list of camera intrinisic matrices. Each should be array_like with shape 3x3
cam_Rs : list
list of camera rotation matrices. Each should be array_like with shape 3x3
cam_Ts : list
list of camera translation vectors. Each should be array_like with length 3
img_sizes : list
list of image dimensions. Each should be array_like with form (width, height)
scale : float
size of visualized camera. Specifically, the distance from the image plane to the camera center.
"""
camera_verts = []
camera_faces = []
vert_offset = 0
for cam_K, cam_R, cam_T, img_size in zip(cam_Ks, cam_Rs, cam_Ts, img_sizes):
camera_center = np.dot(-cam_R.transpose(), cam_T)
cam_z = cam_R[2,:]
cam_x = cam_R[0,:]
cam_y = cam_R[1,:]
x_len = (scale / cam_K[0,0]) * img_size[0]
y_len = (scale / cam_K[1,1]) * img_size[1]
verts = [camera_center,]
verts.append(camera_center + scale*cam_z - x_len*cam_x - y_len*cam_y)
verts.append(camera_center + scale*cam_z + x_len*cam_x - y_len*cam_y)
verts.append(camera_center + scale*cam_z + x_len*cam_x + y_len*cam_y)
verts.append(camera_center + scale*cam_z - x_len*cam_x + y_len*cam_y)
faces = [(f[0]+vert_offset, f[1]+vert_offset, f[2]+vert_offset) for f in [(0,1,2), (0,2,3), (0,3,4), (0,4,1)]]
vert_offset += len(verts)
camera_verts.extend(verts)
camera_faces.extend(faces)
save_mesh_ply(filename, camera_verts, camera_faces)
| [
37811,
34030,
5499,
3519,
284,
19609,
7587,
37227,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4299,
3613,
62,
4122,
62,
17721,
62,
2145,
7,
22915,
62,
69,
3672,
11,
43344,
11,
7577,
28,
14202,
2599,
198,
220,
220,
220,
37227,
12793... | 2.119777 | 2,154 |
"""
'''
Description: Problem 1897 (Redistribute Characters to Make All Strings Equal) - Solution 1
Version: 1.0.0.20220322
Author: Arvin Zhao
Date: 2022-03-10 13:58:02
Last Editors: Arvin Zhao
LastEditTime: 2022-03-22 19:36:37
'''
"""
from typing import List
| [
37811,
198,
7061,
6,
198,
11828,
25,
20647,
49429,
357,
7738,
396,
4163,
26813,
284,
6889,
1439,
4285,
654,
28701,
8,
532,
28186,
352,
198,
14815,
25,
352,
13,
15,
13,
15,
13,
1238,
17572,
37283,
198,
13838,
25,
943,
7114,
29436,
19... | 2.932584 | 89 |
import os
try:
print(os.environ['PYTHONPATH'].split(os.pathsep))
finally:
pass
try:
import neoradio2
print(neoradio2.__file__)
except Exception as ex:
input(str(ex))
import time
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
try:
while True:
neoradio2.chain_identify(handle)
time.sleep(2)
print("Requesting Settings {} {}...".format(device.name, device.serial_str))
neoradio2.request_settings(handle, 0, 0xFF)
time.sleep(0.5)
for i in range(8):
if (1 << i) & banks:
print("Reading Settings {} {}...".format(device.name, device.serial_str))
settings = neoradio2.read_settings(handle, 0, i)
time.sleep(0.05)
time.sleep(1)
except Exception as ex:
print(ex)
time.sleep(1)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
"""
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
try:
while True:
neoradio2.chain_identify(handle)
s = time.time()
#points = [-50,0,50,600]
header = neoradio2.neoRADIO2frame_calHeader()
header.channel = 0
header.range = 0
header.num_of_pts = 4
e = time.time()
msg = str(e-s)
print("Requesting Calibration {} {}...".format(device.name, device.serial_str))
neoradio2.request_calibration(handle, 0, 0xFF, header)
time.sleep(0.5)
neoradio2.request_calibration_points(handle, 0, 0xFF, header)
time.sleep(0.5)
for x in range(8):
print("Reading Calibration {} {}...".format(device.name, device.serial_str))
cal = neoradio2.read_calibration_array(handle, 0, x, header)
print(x, cal)
#time.sleep(0.05)
print("Reading Calibration Points {} {}...".format(device.name, device.serial_str))
cal_points = neoradio2.read_calibration_points_array(handle, 0, x, header)
print(x, cal_points)
time.sleep(0.05)
time.sleep(1)
except Exception as ex:
print(ex)
time.sleep(1)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
"""
"""
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
try:
while True:
neoradio2.chain_identify(handle)
s = time.time()
points = [ -50, 0, 75, 650 ]
cal = [ -48.67, 1.19, 75.72, 650.36 ]
#points = [1,1,1,1]
header = neoradio2.neoRADIO2frame_calHeader()
header.channel = 0
header.range = 0
header.num_of_pts = len(points)
e = time.time()
msg = str(e-s)
#neoradio2.request_calibration_info(handle, 0, 0xFF)
for x in range(8):
print("Writing Calibration Points {} {}...".format(device.name, device.serial_str))
neoradio2.write_calibration_points(handle, 0, (1 << x), header, points)
print("Writing Calibration {} {}...".format(device.name, device.serial_str))
neoradio2.write_calibration(handle, 0, (1 << x), header, cal)
print("Storing Calibration {} {}...".format(device.name, device.serial_str))
neoradio2.store_calibration(handle, 0, (1 << x))
time.sleep(0.1)
is_stored = neoradio2.is_calibration_stored(handle, 0, x)
print("{} is cal stored: {}".format(x, is_stored))
time.sleep(0.1)
except Exception as ex:
print(ex)
time.sleep(1)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
"""
"""
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
try:
#print("Starting App {} {}...".format(device.name, device.serial_str))
#neoradio2.app_start(handle, 0, 1)
while True:
s = time.time()
print("Requesting Calibration {} {}...".format(device.name, device.serial_str))
neoradio2.request_calibration_info(handle, 0, 1)
e = time.time()
msg = str(e-s)
for x in range(8):
cal_info = neoradio2.read_calibration_info(handle, 0, x)
print("num_of_pts: {}".format(cal_info.num_of_pts))
print("channel: {}".format(cal_info.channel))
print("range: {}".format(cal_info.range))
print("cal_is_valid: {}".format(cal_info.cal_is_valid))
time.sleep(0.1)
except Exception as ex:
print(ex)
time.sleep(1)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
#time.sleep(3)
#time.sleep(10)
"""
"""
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
neoradio2.app_start(handle, 0, 0xFF)
try:
while True:
s = time.time()
neoradio2.request_calibration(handle, 0, 0xFF)
e = time.time()
msg = str(e-s)
for x in range(8):
value = neoradio2.read_calibration_array(handle, 0, x)
#try:
# neoradio2.toggle_led(handle, 0, 0xFF, neoradio2.neoRADIO2_LEDMode.ON, 1, 255)
#except neoradio2.Exception as ex:
# print(ex)
#value = neoradio2.get_manufacturer_date(handle, 0, x)
msg += ", {}".format(value)
print(msg)
time.sleep(0.1)
except Exception as ex:
print(ex)
time.sleep(1)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
#time.sleep(3)
#time.sleep(10)
"""
"""
if __name__ == "__main__":
for device in neoradio2.find():
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}...".format(device.name, device.serial_str))
neoradio2.app_start(handle, 0, 0xFF)
try:
while True:
s = time.time()
neoradio2.request_sensor_data(handle, 1, 0xFF)
e = time.time()
msg = str(e-s)
for x in range(8):
value = neoradio2.read_sensor_float(handle, 1, x)
#try:
# neoradio2.toggle_led(handle, 0, 0xFF, neoradio2.neoRADIO2_LEDMode.ON, 1, 255)
#except neoradio2.Exception as ex:
# print(ex)
#value = neoradio2.get_manufacturer_date(handle, 0, x)
msg += ", {}".format(value)
print(msg)
time.sleep(0.1)
except Exception as ex:
print(ex)
time.sleep(3)
finally:
neoradio2.close(handle)
input("Press any key to continue...")
time.sleep(3)
time.sleep(10)
"""
"""
def get_bank_info(handle, device, bank):
application_level = "Application" if neoradio2.app_is_started(handle, device, bank) else "Bootloader"
month, day, year = neoradio2.get_manufacturer_date(handle, device, bank)
fw_major, fw_minor = neoradio2.get_firmware_version(handle, device, bank)
hw_major, hw_minor = neoradio2.get_hardware_revision(handle, device, bank)
try:
pcb_sn = neoradio2.get_pcbsn(handle, device, bank)
except neoradio2.Exception as ex:
pcb_sn = str(ex)
print('\tFirmware State: {}'.format(application_level))
print('\tManufacturer Date: {}/{}/{}'.format(month, day, year))
print('\tFirmware Version: {}.{}'.format(fw_major, fw_minor))
print('\tHardware Revision: {}.{}'.format(hw_major, hw_minor))
print('\tFirmware State: {}'.format(application_level))
print('\tPCB Serial Number: {}'.format(pcb_sn))
def get_sensor_info(handle, device, bank):
value = neoradio2.read_sensor_float(handle, device, bank)
print('\tSensor Value: {}'.format(value))
if __name__ == "__main__":
import time
input("Press any key to start...")
try:
devices = neoradio2.find()
for device in devices:
print("Opening {} {}...".format(device.name, device.serial_str))
handle = neoradio2.open(device)
print("Opened {} {}.".format(device.name, device.serial_str))
print("Handle: {}".format(handle))
#neoradio2.enter_bootloader(handle, 0, 2)
#time.sleep(30)
how_many_in_chain = neoradio2.get_chain_count(handle, True);
print("%d devices in the chain" % how_many_in_chain)
for d in range(how_many_in_chain):
print("Entering Bootloader on device {}...".format(d+1))
neoradio2.enter_bootloader(handle, d, 0xFF)
#time.sleep(0.5)
for d in range(how_many_in_chain):
for x in range(8):
print("Getting Info of device {} bank {}...".format(d+1, x+1))
get_bank_info(handle, d, x)
for d in range(how_many_in_chain):
print("Entering Application on device {}...".format(d+1))
neoradio2.app_start(handle, d, 0xFF)
for d in range(how_many_in_chain):
neoradio2.request_pcbsn(handle, d, 0xFF)
time.sleep(0.5)
for x in range(8):
print("Getting Info of device {} bank {}...".format(d+1, x+1))
get_bank_info(handle, d, x)
neoradio2.request_sensor_data(handle, d, 0xFF)
time.sleep(0.5)
for x in range(8):
print("Getting Sensor info of device {} bank {}...".format(d+1, x+1))
get_sensor_info(handle, d, x)
for d in range(how_many_in_chain):
print("Toggling LEDs on device {}...".format(d+1))
for x in range(50):
neoradio2.toggle_led(handle, d, 0xFF, 50)
time.sleep(0.1)
print("Closing {} {}...".format(device.name, device.serial_str))
neoradio2.close(handle)
except Exception as ex:
print("ERROR: ", ex)
finally:
input("Press any key to continue...")
""" | [
11748,
28686,
198,
28311,
25,
198,
220,
220,
220,
3601,
7,
418,
13,
268,
2268,
17816,
47,
56,
4221,
1340,
34219,
6,
4083,
35312,
7,
418,
13,
6978,
325,
79,
4008,
198,
69,
3289,
25,
198,
220,
220,
220,
1208,
198,
198,
28311,
25,
... | 1.829778 | 6,656 |
from .models import *
from django.forms import ModelForm
from django import forms
from .views import *
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
# class bill_form(ModelForm):
# class Meta:
# model = bill
# fields = ['name', 'amount', 'email']
| [
6738,
764,
27530,
1330,
1635,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
764,
33571,
1330,
1635,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
... | 2.918919 | 111 |
from brownie import accounts, interface, Contract
from brownie import (Bank, SimpleBankConfig, SimplePriceOracle, PancakeswapGoblin,
StrategyAllHTOnly, StrategyLiquidate, StrategyWithdrawMinimizeTrading, StrategyAddTwoSidesOptimal, PancakeswapGoblinConfig, TripleSlopeModel, ConfigurableInterestBankConfig, PancakeswapPool1Goblin, ProxyAdminImpl, TransparentUpgradeableProxyImpl)
from brownie import network
from .utils import *
from .constant import *
import eth_abi
# set default gas price
network.gas_price('1 gwei')
| [
6738,
7586,
494,
1330,
5504,
11,
7071,
11,
17453,
198,
6738,
7586,
494,
1330,
357,
28650,
11,
17427,
28650,
16934,
11,
17427,
18124,
48625,
11,
49957,
1124,
86,
499,
38,
672,
2815,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 3.323171 | 164 |
#!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import numpy as np
from ObservationParser.hierarchies import HierarchicalLossShaper
from Utils.utils import VarDictParser, ObsDictParser
#========================================================================
#========================================================================
# return losses[:, 0]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
796,
705,
26953,
666,
367,
589,
6,
198,
198,
2,
23926,
2559,
198,
198,
11748,
299,
32152,
355,
45941,
220,
198,
198,
6738,
11086,
13208,
46677,
13,
71,
959,
998,... | 4.776596 | 94 |
from models import User
SQL_CRIAR_USUARIO = 'INSERT INTO users (name, email, username, password) values (%s, %s, %s, %s)'
SQL_LOGIN_USUARIO = 'SELECT id, name, email, username, password from users where email = %s'
| [
6738,
4981,
1330,
11787,
198,
198,
17861,
62,
34,
7112,
1503,
62,
2937,
52,
1503,
9399,
796,
705,
20913,
17395,
39319,
2985,
357,
3672,
11,
3053,
11,
20579,
11,
9206,
8,
3815,
37633,
82,
11,
4064,
82,
11,
4064,
82,
11,
4064,
82,
3... | 2.765432 | 81 |
from django.contrib import admin
from .models import getintouch
# Register your models here.
admin.site.register(getintouch)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
651,
600,
7673,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
1136,
600,
7673,
8,
198
] | 3.432432 | 37 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import graphene
from fastapi import FastAPI
from starlette.graphql import GraphQLApp
from graphvl.schema import Query, Mutation
app = FastAPI()
app.add_route("/", GraphQLApp(schema=graphene.Schema(query=Query, mutation=Mutation)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
42463,
198,
198,
6738,
3049,
15042,
1330,
12549,
17614,
198,
6738,
3491,
21348,
13,
34960,... | 2.891089 | 101 |
from rook.serverless import serverless_rook
@serverless_rook
| [
6738,
10929,
13,
15388,
1203,
1330,
4382,
1203,
62,
305,
482,
198,
198,
31,
15388,
1203,
62,
305,
482,
198
] | 3.1 | 20 |
"""Tools for Python database scripts."""
_symbols = {
# skytools.adminscript
'AdminScript': 'skytools.adminscript:AdminScript',
# skytools.config
'Config': 'skytools.config:Config',
# skytools.dbservice
'DBService': 'skytools.dbservice:DBService',
'ServiceContext': 'skytools.dbservice:ServiceContext',
'TableAPI': 'skytools.dbservice:TableAPI',
'get_record': 'skytools.dbservice:get_record',
'get_record_list': 'skytools.dbservice:get_record_list',
'make_record': 'skytools.dbservice:make_record',
'make_record_array': 'skytools.dbservice:make_record_array',
# skytools.dbstruct
'SeqStruct': 'skytools.dbstruct:SeqStruct',
'TableStruct': 'skytools.dbstruct:TableStruct',
'T_ALL': 'skytools.dbstruct:T_ALL',
'T_CONSTRAINT': 'skytools.dbstruct:T_CONSTRAINT',
'T_DEFAULT': 'skytools.dbstruct:T_DEFAULT',
'T_GRANT': 'skytools.dbstruct:T_GRANT',
'T_INDEX': 'skytools.dbstruct:T_INDEX',
'T_OWNER': 'skytools.dbstruct:T_OWNER',
'T_PARENT': 'skytools.dbstruct:T_PARENT',
'T_PKEY': 'skytools.dbstruct:T_PKEY',
'T_RULE': 'skytools.dbstruct:T_RULE',
'T_SEQUENCE': 'skytools.dbstruct:T_SEQUENCE',
'T_TABLE': 'skytools.dbstruct:T_TABLE',
'T_TRIGGER': 'skytools.dbstruct:T_TRIGGER',
# skytools.fileutil
'signal_pidfile': 'skytools.fileutil:signal_pidfile',
'write_atomic': 'skytools.fileutil:write_atomic',
# skytools.gzlog
'gzip_append': 'skytools.gzlog:gzip_append',
# skytools.hashtext
'hashtext_old': 'skytools.hashtext:hashtext_old',
'hashtext_new': 'skytools.hashtext:hashtext_new',
# skytools.natsort
'natsort': 'skytools.natsort:natsort',
'natsort_icase': 'skytools.natsort:natsort_icase',
'natsorted': 'skytools.natsort:natsorted',
'natsorted_icase': 'skytools.natsort:natsorted_icase',
'natsort_key': 'skytools.natsort:natsort_key',
'natsort_key_icase': 'skytools.natsort:natsort_key_icase',
# skytools.parsing
'dedent': 'skytools.parsing:dedent',
'hsize_to_bytes': 'skytools.parsing:hsize_to_bytes',
'merge_connect_string': 'skytools.parsing:merge_connect_string',
'parse_acl': 'skytools.parsing:parse_acl',
'parse_connect_string': 'skytools.parsing:parse_connect_string',
'parse_logtriga_sql': 'skytools.parsing:parse_logtriga_sql',
'parse_pgarray': 'skytools.parsing:parse_pgarray',
'parse_sqltriga_sql': 'skytools.parsing:parse_sqltriga_sql',
'parse_statements': 'skytools.parsing:parse_statements',
'parse_tabbed_table': 'skytools.parsing:parse_tabbed_table',
'sql_tokenizer': 'skytools.parsing:sql_tokenizer',
# skytools.psycopgwrapper
'connect_database': 'skytools.psycopgwrapper:connect_database',
'DBError': 'skytools.psycopgwrapper:DBError',
'I_AUTOCOMMIT': 'skytools.psycopgwrapper:I_AUTOCOMMIT',
'I_READ_COMMITTED': 'skytools.psycopgwrapper:I_READ_COMMITTED',
'I_REPEATABLE_READ': 'skytools.psycopgwrapper:I_REPEATABLE_READ',
'I_SERIALIZABLE': 'skytools.psycopgwrapper:I_SERIALIZABLE',
# skytools.querybuilder
'PLPyQuery': 'skytools.querybuilder:PLPyQuery',
'PLPyQueryBuilder': 'skytools.querybuilder:PLPyQueryBuilder',
'QueryBuilder': 'skytools.querybuilder:QueryBuilder',
'plpy_exec': 'skytools.querybuilder:plpy_exec',
'run_exists': 'skytools.querybuilder:run_exists',
'run_lookup': 'skytools.querybuilder:run_lookup',
'run_query': 'skytools.querybuilder:run_query',
'run_query_row': 'skytools.querybuilder:run_query_row',
# skytools.quoting
'db_urldecode': 'skytools.quoting:db_urldecode',
'db_urlencode': 'skytools.quoting:db_urlencode',
'json_decode': 'skytools.quoting:json_decode',
'json_encode': 'skytools.quoting:json_encode',
'make_pgarray': 'skytools.quoting:make_pgarray',
'quote_bytea_copy': 'skytools.quoting:quote_bytea_copy',
'quote_bytea_literal': 'skytools.quoting:quote_bytea_literal',
'quote_bytea_raw': 'skytools.quoting:quote_bytea_raw',
'quote_copy': 'skytools.quoting:quote_copy',
'quote_fqident': 'skytools.quoting:quote_fqident',
'quote_ident': 'skytools.quoting:quote_ident',
'quote_json': 'skytools.quoting:quote_json',
'quote_literal': 'skytools.quoting:quote_literal',
'quote_statement': 'skytools.quoting:quote_statement',
'unescape': 'skytools.quoting:unescape',
'unescape_copy': 'skytools.quoting:unescape_copy',
'unquote_fqident': 'skytools.quoting:unquote_fqident',
'unquote_ident': 'skytools.quoting:unquote_ident',
'unquote_literal': 'skytools.quoting:unquote_literal',
# skytools.scripting
'BaseScript': 'skytools.scripting:BaseScript',
'daemonize': 'skytools.scripting:daemonize',
'DBScript': 'skytools.scripting:DBScript',
'UsageError': 'skytools.scripting:UsageError',
# skytools.skylog
'getLogger': 'skytools.skylog:getLogger',
# skytools.sockutil
'set_cloexec': 'skytools.sockutil:set_cloexec',
'set_nonblocking': 'skytools.sockutil:set_nonblocking',
'set_tcp_keepalive': 'skytools.sockutil:set_tcp_keepalive',
# skytools.sqltools
'dbdict': 'skytools.sqltools:dbdict',
'CopyPipe': 'skytools.sqltools:CopyPipe',
'DBFunction': 'skytools.sqltools:DBFunction',
'DBLanguage': 'skytools.sqltools:DBLanguage',
'DBObject': 'skytools.sqltools:DBObject',
'DBSchema': 'skytools.sqltools:DBSchema',
'DBTable': 'skytools.sqltools:DBTable',
'Snapshot': 'skytools.sqltools:Snapshot',
'db_install': 'skytools.sqltools:db_install',
'exists_function': 'skytools.sqltools:exists_function',
'exists_language': 'skytools.sqltools:exists_language',
'exists_schema': 'skytools.sqltools:exists_schema',
'exists_sequence': 'skytools.sqltools:exists_sequence',
'exists_table': 'skytools.sqltools:exists_table',
'exists_temp_table': 'skytools.sqltools:exists_temp_table',
'exists_type': 'skytools.sqltools:exists_type',
'exists_view': 'skytools.sqltools:exists_view',
'fq_name': 'skytools.sqltools:fq_name',
'fq_name_parts': 'skytools.sqltools:fq_name_parts',
'full_copy': 'skytools.sqltools:full_copy',
'get_table_columns': 'skytools.sqltools:get_table_columns',
'get_table_oid': 'skytools.sqltools:get_table_oid',
'get_table_pkeys': 'skytools.sqltools:get_table_pkeys',
'installer_apply_file': 'skytools.sqltools:installer_apply_file',
'installer_find_file': 'skytools.sqltools:installer_find_file',
'magic_insert': 'skytools.sqltools:magic_insert',
'mk_delete_sql': 'skytools.sqltools:mk_delete_sql',
'mk_insert_sql': 'skytools.sqltools:mk_insert_sql',
'mk_update_sql': 'skytools.sqltools:mk_update_sql',
# skytools.timeutil
'FixedOffsetTimezone': 'skytools.timeutil:FixedOffsetTimezone',
'datetime_to_timestamp': 'skytools.timeutil:datetime_to_timestamp',
'parse_iso_timestamp': 'skytools.timeutil:parse_iso_timestamp',
# skytools.utf8
'safe_utf8_decode': 'skytools.utf8:safe_utf8_decode',
}
__all__ = _symbols.keys()
_symbols['__version__'] = 'skytools.installer_config:package_version'
if 1:
# lazy-import exported vars
import skytools.apipkg as _apipkg
_apipkg.initpkg(__name__, _symbols, {'apipkg': _apipkg})
elif 1:
# import everything immediately
from skytools.quoting import *
from skytools.sqltools import *
from skytools.scripting import *
from skytools.adminscript import *
from skytools.config import *
from skytools.dbservice import *
from skytools.dbstruct import *
from skytools.fileutil import *
from skytools.gzlog import *
from skytools.hashtext import *
from skytools.natsort import *
from skytools.parsing import *
from skytools.psycopgwrapper import *
from skytools.querybuilder import *
from skytools.skylog import *
from skytools.sockutil import *
from skytools.timeutil import *
from skytools.utf8 import *
else:
from skytools.quoting import *
from skytools.sqltools import *
from skytools.scripting import *
# compare apipkg list to submodule exports
xall = []
import skytools.adminscript
import skytools.config
import skytools.dbservice
import skytools.dbstruct
import skytools.fileutil
import skytools.gzlog
import skytools.hashtext
import skytools.natsort
import skytools.parsing
import skytools.psycopgwrapper
import skytools.querybuilder
import skytools.quoting
import skytools.scripting
import skytools.skylog
import skytools.sockutil
import skytools.sqltools
import skytools.timeutil
import skytools.utf8
xall = ( skytools.adminscript.__all__
+ skytools.config.__all__
+ skytools.dbservice.__all__
+ skytools.dbstruct.__all__
+ skytools.fileutil.__all__
+ skytools.gzlog.__all__
+ skytools.hashtext.__all__
+ skytools.natsort.__all__
+ skytools.parsing.__all__
+ skytools.psycopgwrapper.__all__
+ skytools.querybuilder.__all__
+ skytools.quoting.__all__
+ skytools.scripting.__all__
+ skytools.skylog.__all__
+ skytools.sockutil.__all__
+ skytools.sqltools.__all__
+ skytools.timeutil.__all__
+ skytools.utf8.__all__ )
for k in __all__:
if k not in xall:
print '%s missing from __all__?' % k
for k in xall:
if k not in __all__:
print '%s missing from top-level?' % k
| [
198,
37811,
33637,
329,
11361,
6831,
14750,
526,
15931,
198,
198,
62,
1837,
2022,
10220,
796,
1391,
198,
220,
220,
220,
1303,
6766,
31391,
13,
324,
42951,
6519,
198,
220,
220,
220,
705,
46787,
7391,
10354,
705,
15688,
31391,
13,
324,
... | 2.35037 | 4,050 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for debiasing ML models."""
import math
import numpy as np
class RandomizedThreshold:
"""Threshold optimizer (RTO) to debias models via postprocessing.
See: https://arxiv.org/abs/2106.12887.
This is a solver to the following optimiation problem:
minimize gamma/2 ||x||^2 - y^Tx
s.t. x satisfies DP constraint with tolerance eps and parameter rho.
There are no assumptions about y in this code but, in general, y should be the
predictions of the original classifier.
"""
def __init__(self, gamma=1.0, eps=0.0, rho=None):
"""Instantiate object.
Args:
gamma: The regularization parameter gamma (for randomization). Set this to
1 if the goal is to minmize changes to the original scores.
eps: Tolerance parameter for bias between 0 and 1 inclusive.
rho: The rho parameter in the post-hoc rule. If None, rho = E[y].
"""
if eps < 0:
raise ValueError('eps must be non-negative.')
if gamma <= 0:
raise ValueError('gamma must be a strictly positive number.')
if rho is not None and rho <= 0:
raise ValueError('rho must be either None or a strictly positive number.')
self.num_groups = 1
self.gamma = gamma
self.eps = eps
self.rho = rho
self.avrg_y_score = 0
# model paramters (Lagrange dual variables)
self.lambdas = []
self.mus = []
def fit(self, y_orig, group_feature, sgd_steps,
full_gradient_epochs=1_000, verbose=True, batch_size=256,
ignore_warnings=False):
"""Debias predictions w.r.t. the sensitive class in each demographic group.
This procedure takes as input a vector y=y_orig and solves the optimization
problem subject to the statistical parity constraint.
minimize_x gamma/2 ||x||^2 - y^Tx
s.t. x satisfies DP constraints with tolerance eps and parameter rho.
IMPORTANT: If this is used for postprocessing a classifier,
the scores y_orig need to be rescaled linearly to [-1, +1].
Training proceeds in two rounds. First is SGD. Second is full gradient
descent. Full gradient descent is recommended when debiasing deep neural
nets because the scores are concentrated around the extremes
so high preciseion might be needed. Because the loss is smooth, the lr
in full gradient method does not need tuning. It can be set to gamma / 2.0.
Args:
y_orig: A vector of the original probability scores. If this is used for
debiasing binary classifiers, y_orig = 2 * p(y=1) -1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
sgd_steps: Number of minibatch steps in SGD.
full_gradient_epochs: Number of epochs in full gradient descent phase.
verbose: Set to True to display progress.
batch_size: Size of minibatches in SGD.
ignore_warnings: Set to True to suppress warnings.
Returns:
None.
"""
if min(y_orig) >= 0:
self.yscale = 'positive'
else:
self.yscale = 'negative'
y_orig = np.array(y_orig)
num_groups = len(set(group_feature)) # number of demographic groups
if (min(y_orig) < -1 or max(y_orig) > 1) and not ignore_warnings:
print('Warning: the scores y_orig are not in the range [-1, +1].'
'To suppress this message, set ignore_warnings=True.')
if self.yscale == 'positive' and not ignore_warnings:
print('Warning: if this is for postprocessing a binary classifier, '
'the scores need to be rescaled to [-1, +1]. To suppress this '
'message, set ignore_warnings=True.')
if min(group_feature) != 0 or (max(group_feature) != num_groups - 1):
raise ValueError('group_feature should be in {0, 1, .. K-1} where '
'K is the nubmer of groups. Some groups are missing.')
self.num_groups = num_groups
eps0 = self.eps / 2.0
gamma = self.gamma
# Store group membership ids in a dictionary.
xk_groups = {}
for k in range(num_groups):
xk_groups[k] = []
for i in range(len(group_feature)):
xk_groups[group_feature[i]].append(i)
for k in xk_groups:
assert xk_groups[k] # All groups must be non-empty.
self.avrg_y_score = float(sum(y_orig))/len(y_orig)
if self.rho is None:
if self.yscale == 'positive':
self.rho = self.avrg_y_score
else:
self.rho = self.avrg_y_score / 2.0 + 0.5
# The parameters we optimize in the algorithm are lambdas and mus.
# lambdas_final and mus_final are running averages (final output).
lambdas = np.zeros((num_groups,))
mus = np.zeros((num_groups,))
lambdas_final = np.zeros((num_groups,)) # running averages
mus_final = np.zeros((num_groups,)) # running averages
# SGD is carried out in each group separately due to decomposition of the
# optimization problem.
num_samples_sgd = sgd_steps * batch_size
lr = gamma * math.sqrt(1.0 / num_samples_sgd)
# Begin the projected SGD phase.
if verbose:
print('SGD phase started:')
for k in range(num_groups):
if verbose:
print('Group %d.\t\t%02d%%'%(k, int(100*k/num_groups)), end='\r')
idx = np.array(list(xk_groups[k])) # instance IDs in group k
group_size = len(idx)
for _ in range(sgd_steps):
# Using random.randint is 10x faster than random.choice.
batch_ids = np.random.randint(0, group_size, batch_size)
batch_ids = idx[batch_ids]
# The code below is a faster implementation of:
# xi_arg = y_orig[batch_ids] - (lambdas[k] - mus[k])
# xi_gradient = xi_arg/gamma
# xi_gradient = np.maximum(xi_gradient, 0.)
# xi_gradient = np.minimum(xi_gradient, 1.)
lambda_minus_mu = lambdas[k] - mus[k]
xi_arg = np.maximum(y_orig[batch_ids], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
lambda_gradient = eps0 + self.rho - mean_xi
mu_gradient = eps0 - self.rho + mean_xi
# stochastic gradient descent
if eps0 > 1e-3:
lambdas[k] = max(0, lambdas[k] - lr * batch_size * lambda_gradient)
mus[k] = max(0, mus[k] - lr * batch_size * mu_gradient)
else:
# If self.eps=0, we can drop mus and optimize lambdas only but
# lambdas will not be constrained to be non-negative in this case.
lambdas[k] = lambdas[k] - lr * batch_size * lambda_gradient
# lambdas_final and mus_final are running averages.
lambdas_final[k] += lambdas[k] / sgd_steps
mus_final[k] += mus[k] / sgd_steps
# Now switch to full gradient descent.
# Because the objective is smooth, lr=gamma/2 works.
if verbose and full_gradient_epochs:
print('\nFull gradient descent phase started:')
for k in range(num_groups):
if verbose:
print('Group {}.'.format(k))
idx = np.array(list(xk_groups[k]))
for _ in range(full_gradient_epochs):
lambda_minus_mu = lambdas_final[k] - mus_final[k]
xi_arg = np.maximum(y_orig[idx], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
full_grad_lambda = eps0 + self.rho - mean_xi
full_grad_mu = eps0 - self.rho + mean_xi
if eps0 > 1e-3:
lambdas_final[k] = max(0,
lambdas_final[k] - 0.5*gamma*full_grad_lambda)
mus_final[k] = max(0, mus_final[k] - 0.5*gamma*full_grad_mu)
else:
lambdas_final[k] = lambdas_final[k] - 0.5*gamma*full_grad_lambda
self.lambdas = lambdas_final
self.mus = mus_final
def predict(self, y_orig, group_feature, ignore_warnings=False):
"""Debiases the predictions.
Given the original scores y, post-process them according to the learned
model such that the predictions satisfy the desired fairness criteria.
Args:
y_orig: Original classifier scores. If this is for postprocessing binary
classifiers, y_orig = 2 * p(y=1) -1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
ignore_warnings: Set to True to suppress warnings.
Returns:
y_new_prob: y_new_prob[i] is the probability of predicting the positive
class for the instance i.
"""
if (((min(y_orig) >= 0 and self.yscale == 'negative') or
(min(y_orig) < 0 and self.yscale == 'positive')) and
not ignore_warnings):
print('Warning: the scores seem to have a difference scale from the '
'training data. '
'If the data is scaled in [0, 1], e.g. for preprocessing, or '
'in [-1, +1], e.g. for postprocessing, make sure the test labels '
'are scaled similarly.')
num_examples = len(y_orig) # number of training examples
gamma = self.gamma
lambdas = self.lambdas
mus = self.mus
y_new_prob = np.zeros((num_examples,))
for i in range(num_examples):
k = group_feature[i]
if y_orig[i] < (lambdas[k]-mus[k]):
y_new_prob[i] = 0
elif y_orig[i] < (lambdas[k]-mus[k]) + gamma:
y_new_prob[i] = (1.0/gamma)*(y_orig[i]-(lambdas[k]-mus[k]))
else:
y_new_prob[i] = 1.0
return y_new_prob
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
33160,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.483598 | 4,024 |
from .pillar_encoder import PillarFeatureNet, PointPillarsScatter
from .voxel_encoder import SimpleVoxel, VFEV3_ablation, VoxelFeatureExtractorV3
from .feature_normalizer import FeatureNormalizer
__all__ = [
"VoxelFeatureExtractorV3",
"SimpleVoxel",
"PillarFeatureNet",
"PointPillarsScatter",
"VFEV3_ablation",
"FeatureNormalizer"
]
| [
6738,
764,
41643,
62,
12685,
12342,
1330,
39179,
38816,
7934,
11,
6252,
47,
359,
945,
3351,
1436,
198,
6738,
764,
85,
1140,
417,
62,
12685,
12342,
1330,
17427,
53,
1140,
417,
11,
569,
15112,
53,
18,
62,
397,
7592,
11,
28035,
417,
38... | 2.620438 | 137 |
# -*- coding: utf-8 -*-
""" Object Detection Viewer """
import serial, os, copy
import sys
import ctypes
#setup sdl
os.environ["PYSDL2_DLL_PATH"] = "..\env"
from sdl2 import *
from math import sin, cos, radians
import sdl2.ext
import sdl2.sdlgfx as gfx
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
9515,
46254,
3582,
263,
37227,
198,
11748,
11389,
11,
28686,
11,
4866,
198,
11748,
25064,
198,
11748,
269,
19199,
198,
198,
2,
40406,
264,
25404,
198,
418,
13,
26... | 2.392857 | 112 |
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
198,
6738,
764,
1330,
11507,
1143,
11,
43720,
77,
11,
4433,
62,
67,
2093,
628,
628
] | 3.064516 | 31 |
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import base64
import boto3
import os
import json
import time
import uuid
trackingId = os.environ['TRACKING_ID']
personalize_events = boto3.client(service_name='personalize-events') | [
2,
15069,
12131,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
12,
15,
198,
198,
11748,
2779,
2414,
198,
11748,
275,
2069,
18,
198,
11748,
28686,
... | 3.186813 | 91 |
import logging
import os
import uuid
import warnings
import sys
import click
import mal_tier_list_bbcode_gen.exceptions as exceptions
from flask import Flask, render_template, request, send_from_directory
from mal_tier_list_bbcode_gen.tierlistgenerator import TierListGenerator
from waitress import serve
from werkzeug.exceptions import RequestEntityTooLarge
UPLOAD_FOLDER = '/tmp'
MAX_CONTENT_LENGTH_MB = 4
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = MAX_CONTENT_LENGTH_MB * 1024 * 1024
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
@app.errorhandler(RequestEntityTooLarge)
@app.route('/favicon.ico')
@app.route('/', methods=['GET', 'POST'])
@app.route('/index.html', methods=['GET', 'POST'])
@app.route('/tutorial.html', methods=['GET'])
@click.command()
@click.option('--dev', is_flag=True)
if __name__ == '__main__':
main()
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
334,
27112,
198,
11748,
14601,
198,
11748,
25064,
198,
198,
11748,
3904,
198,
11748,
6428,
62,
24948,
62,
4868,
62,
11848,
8189,
62,
5235,
13,
1069,
11755,
355,
13269,
198,
198,
6738,
42903,
... | 2.824926 | 337 |
# Generated by Django 3.2.5 on 2022-02-15 20:45
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
20,
319,
33160,
12,
2999,
12,
1314,
1160,
25,
2231,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""Grafana Database plugin."""
import gettext
from otopi import constants as otopicons
from otopi import filetransaction
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.dwh import constants as odwhcons
from ovirt_engine_setup.grafana_dwh import constants as ogdwhcons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.engine_common import database
@util.export
class Plugin(plugin.PluginBase):
"""Grafana Database plugin."""
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
odwhcons.Stages.DB_SCHEMA,
),
condition=lambda self: (
self.environment[ogdwhcons.CoreEnv.ENABLE]
),
)
# vim: expandtab tabstop=4 shiftwidth=4
| [
2,
198,
2,
19643,
2265,
12,
18392,
12,
40406,
1377,
19643,
2265,
3113,
9058,
198,
2,
198,
2,
15069,
267,
53,
2265,
46665,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
2,
198,
2,
628,
198,
37811,... | 2.691218 | 353 |
# Generated by Django 3.2.4 on 2022-03-18 12:09
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
13,
19,
319,
33160,
12,
3070,
12,
1507,
1105,
25,
2931,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error
diabetes = datasets.load_diabetes()
# (['data', 'target', 'frame', 'DESCR', 'feature_names', 'data_filename', 'target_filename'])
# print(diabetes.keys())
# print(diabetes.data)
# print(diabetes.DESCR)
diabetes_x = diabetes.data # [:, np.newaxis, 2] # for plotting line
diabetes_x_train = diabetes_x[:-30] # slicing from data
diabetes_x_test = diabetes_x[-20:]
diabetes_y_train = diabetes.target[:-30] # slicing from data
diabetes_y_test = diabetes.target[-20:]
model = linear_model.LinearRegression() # using regression model
model.fit(diabetes_x_train, diabetes_y_train)
diabetes_y_predict = model.predict(diabetes_x_test)
print("Mean squared error is : ", mean_squared_error(diabetes_y_test, diabetes_y_predict))
print("Weights: ", model.coef_)
print("Intercept: ", model.intercept_)
# for plotting a line
# plt.scatter(diabetes_x_test, diabetes_y_test)
# plt.plot(diabetes_x_test, diabetes_y_predict)
#
# plt.show()
# Mean squared error is : 2561.3204277283867
# Weights: [941.43097333]
# Intercept: 153.39713623331698 | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
1341,
35720,
1330,
40522,
11,
14174,
62,
19849,
201,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
1612,
62,
16485,
1144,... | 2.597872 | 470 |
# (c) 2021 Amazon Web Services, Inc. or its affiliates. All Rights Reserved.
# This AWS Content is provided subject to the terms of the AWS Customer Agreement available at
# https://aws.amazon.com/agreement/ or other written agreement between Customer
# and Amazon Web Services, Inc.
"""Notifier.
Provies core logic for the Notifier Lambda Function.
"""
import logging
import boto3
from botocore.exceptions import ClientError
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
| [
2,
357,
66,
8,
33448,
6186,
5313,
6168,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
770,
30865,
14041,
318,
2810,
2426,
284,
262,
2846,
286,
262,
30865,
22092,
12729,
1695,
379,
198,
2,
3740,
1378,
8356,
13,
... | 3.679104 | 134 |
import json
import sys
import github
access_key = sys.argv[-1]
git = github.Github(access_key)
symfem = git.get_repo("mscroggs/symfem")
branch = symfem.get_branch("main")
ref = symfem.get_git_ref("heads/main")
base_tree = symfem.get_git_tree(branch.commit.sha)
vfile1 = symfem.get_contents("VERSION", branch.commit.sha)
version = vfile1.decoded_content.decode("utf8").strip()
vfile2 = symfem.get_contents("codemeta.json", branch.commit.sha)
data = json.loads(vfile2.decoded_content)
assert data["version"] == version
for release in symfem.get_releases():
if release.tag_name == f"v{version}":
break
else:
symfem.create_git_tag_and_release(
f"v{version}", f"Version {version}", f"Version {version}", "Latest release",
branch.commit.sha, "commit")
| [
11748,
33918,
198,
11748,
25064,
198,
11748,
33084,
198,
198,
15526,
62,
2539,
796,
25064,
13,
853,
85,
58,
12,
16,
60,
198,
198,
18300,
796,
33084,
13,
38,
10060,
7,
15526,
62,
2539,
8,
198,
198,
37047,
69,
368,
796,
17606,
13,
1... | 2.465409 | 318 |
from flask import Blueprint, render_template, redirect, url_for, flash
from flask_login import login_required, current_user
views = Blueprint("views", __name__)
@views.route("/")
@views.route("/home")
@login_required
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
11,
7644,
198,
6738,
42903,
62,
38235,
1330,
17594,
62,
35827,
11,
1459,
62,
7220,
198,
198,
33571,
796,
39932,
7203,
33571,
1600,
11593,
3672,
834,
8,
628... | 3.363636 | 66 |
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm.collections import InstrumentedList
from spiderlib.db.db_modules.quote import Quote
from spiderlib.db.db_modules.author import Author
from spiderlib.db.db_modules.tag import Tag
import json
class DBEncoderJson(json.JSONEncoder):
"""
Helper class to convert SQLAlchemy db objects into json
"""
class DBEncoderDict(object):
"""
Helper class to convert SQLAlchemy nested db objects into dict
"""
@staticmethod
def encode(obj) -> dict:
"""
Converts SQLAlchemy nested db objects into dict
"""
# if
if isinstance(obj.__class__, DeclarativeMeta):
# an SQLAlchemy class
_dict = {}
_excluded_fields = ["metadata", "json", "dict", "to_dict"]
# filter the field
for field in [x for x in dir(obj) if not x.startswith('_') and x not in _excluded_fields]:
data = obj.__getattribute__(field)
try:
json.dumps(data) # this will fail on non-encodable values, like other classes
_dict[field] = data
except TypeError:
# object needs its own method (.to_dict)
if not isinstance(data, InstrumentedList):
_dict[field] = data.to_dict
else:
# list of object
# NOTE: it goes down one level only,
_dict[field] = []
for item in data:
_dict[field].append(item.to_dict)
return _dict
@staticmethod
def list_to_dict(list_obj) -> dict:
"""
Converts a list fof SQLAlchemy nested db objects into dict.
"""
_dict = dict()
for index, obj in enumerate(list_obj):
_dict[index] = DBEncoderDict.encode(obj)
return _dict
| [
198,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
16691,
283,
876,
48526,
198,
6738,
44161,
282,
26599,
13,
579,
13,
4033,
26448,
1330,
42410,
276,
8053,
198,
198,
6738,
19230,
8019,
13,
9945,
13,
9945,
62,
18170... | 2.082636 | 956 |
from django.apps import AppConfig
""" this models require wallets module """
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
198,
37811,
428,
4981,
2421,
29608,
8265,
37227,
628
] | 4.388889 | 18 |
import equipment
import logging
import os
import time
import numpy
import scipy.io as sio
import mks
import util
| [
11748,
5112,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
299,
32152,
198,
11748,
629,
541,
88,
13,
952,
355,
264,
952,
198,
198,
11748,
285,
591,
198,
11748,
7736,
628,
198,
220,
220,
220,
220,
220,
220,
... | 2.527273 | 55 |
"""Definitions for the primitive `Jinv`."""
from ..abstract.infer import compute_jinv_type
from ..lib import bprop_to_grad_transform, standard_prim
from ..operations import J
from . import primitives as P
@standard_prim(P.Jinv)
async def infer_Jinv(self, engine, x):
"""Infer the return type of primitive `Jinv`."""
return await compute_jinv_type(x)
@bprop_to_grad_transform(P.Jinv)
def bprop_Jinv(x, out, dout):
"""Backpropagator for primitive `Jinv`."""
return (J(dout),)
__operation_defaults__ = {
"name": "Jinv",
"registered_name": "Jinv",
"mapping": P.Jinv,
"python_implementation": None,
}
__primitive_defaults__ = {
"name": "Jinv",
"registered_name": "Jinv",
"type": "placeholder",
"python_implementation": None,
"inferrer_constructor": infer_Jinv,
"grad_transform": bprop_Jinv,
}
| [
37811,
7469,
50101,
329,
262,
20049,
4600,
41,
16340,
63,
526,
15931,
198,
198,
6738,
11485,
397,
8709,
13,
259,
2232,
1330,
24061,
62,
73,
16340,
62,
4906,
198,
6738,
11485,
8019,
1330,
275,
22930,
62,
1462,
62,
9744,
62,
35636,
11,
... | 2.546269 | 335 |
# -*- coding: utf-8 -*-
# Simple Bot (SimpBot)
# Copyright 2016-2017, Ismael Lugo (kwargs)
import time
from simpbot import envvars
from simpbot import localedata
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
17427,
18579,
357,
8890,
79,
20630,
8,
198,
2,
15069,
1584,
12,
5539,
11,
1148,
2611,
417,
31541,
78,
357,
46265,
22046,
8,
198,
198,
11748,
640,
198,
6738,
985,
... | 2.688525 | 61 |
from odoo import models
| [
6738,
16298,
2238,
1330,
4981,
628
] | 4.166667 | 6 |
list=[2,5,9,6]
print(revers(list))
# def revers(arr):
# new_array=[]
# for i in range(len(arr)-1,-1,-1):
# print(i)
# new_array.append(arr[i])
# print(new_array)
# revers(list)
# def revers(arr):
# new_array=[0 for i in arr ]
# for i in range(0,len(arr)):
# new_array[i]=arr[len(arr)-1-i]
# print(new_array)
# revers(list)
| [
4868,
41888,
17,
11,
20,
11,
24,
11,
21,
60,
198,
198,
4798,
7,
260,
690,
7,
4868,
4008,
198,
198,
2,
825,
10372,
7,
3258,
2599,
198,
2,
220,
220,
220,
220,
649,
62,
18747,
28,
21737,
198,
2,
220,
220,
220,
220,
329,
1312,
2... | 1.88 | 200 |
# -*- coding: utf-8 -*-
### ----------------------------- IMPORTS --------------------------- ###
import click
import os
import json
### ----------------------------------------------------------------- ###
def check_main(folder, data_dir, csv_dir):
"""
Check if folders exist and if h5 files match csv files.
Parameters
----------
folder : dict, with config settings
data_dir : str, data directory name
true_dir : str, csv directory name
Returns
-------
None,str, None if test passes, otherwise a string is returned with the name
of the folder where the test did not pass
"""
h5_path = os.path.join(folder, data_dir)
ver_path = os.path.join(folder, csv_dir)
if not os.path.exists(h5_path):
return h5_path
if not os.path.exists(ver_path):
return ver_path
h5 = {x.replace('.h5', '') for x in os.listdir(h5_path)}
ver = {x.replace('.csv', '') for x in os.listdir(ver_path)}
if len(h5) != len(h5 & ver):
return folder
def check_group_dir(settings, data_key='filt_dir', csv_key='true_dir'):
"""
Check if folders exist and if h5 files in filt directory match csv files.
Parameters
----------
settings : dict, with config settings
data_key : str, settings key for filtered data directory
csv_key : str, settings key for csv directory (can be ground truth or predicted)
Returns
-------
None,str, None if test passes, otherwise a string is returned with the name
of the folder where the test did not pass
"""
# get child folders and create success list for each folder
if not os.path.exists(settings['group_path']):
return settings['group_path']
folders = [f.path for f in os.scandir(settings['group_path']) if f.is_dir()]
# find whether the same files are present in filtered data and verified files
for folder in folders:
check_main(folder, data_dir=settings[data_key], csv_dir=settings[csv_key])
@click.group()
@click.pass_context
def main(ctx):
"""
-----------------------------------------------------
\b
\b _
\b ___ ___(_)_____ _
\b / __|/ _ \ |_ / | | |
\b \__ \ __/ |/ /| |_| |
\b |___/\___|_/___|\__, |
\b |___/
\b
-----------------------------------------------------
"""
# get settings and pass to context
with open(settings_path, 'r') as file:
settings = json.loads(file.read())
ctx.obj = settings.copy()
@main.command()
@click.pass_context
def setgrouppath(ctx):
"""Set path to group folder for processing"""
path = input('Enter Group Path for data processing: \n')
ctx.obj.update({'group_path': path, 'file_check': False})
with open(settings_path, 'w') as file:
file.write(json.dumps(ctx.obj))
click.secho(f"\n -> Group Path was set to:'{path}'.\n", fg='green', bold=True)
@main.command()
@click.pass_context
def setmainpath(ctx):
"""Set path to individual folder for verification"""
path = input('Enter Path for seizure verification: \n')
ctx.obj.update({'main_path': path})
with open(settings_path, 'w') as file:
file.write(json.dumps(ctx.obj))
click.secho(f"\n -> Path was set to:'{path}'.\n", fg='green', bold=True)
@main.command()
@click.pass_context
def filecheck(ctx):
""" Check whether files can be opened and read"""
from data_preparation.downsample import Lab2h5
# get child folders and create success list for each folder
if not os.path.exists(ctx.obj['group_path']):
click.secho(f"\n -> Group folder '{ctx.obj['group_path']}' was not found." +\
" Please run -setgrouppath-.\n",
fg='yellow', bold=True)
return
folders = [f.path for f in os.scandir(ctx.obj['group_path']) if f.is_dir()]
success_list = []
for f_path in folders:
ctx.obj['main_path'] = f_path
obj = Lab2h5(ctx.obj)
success = obj.check_files()
success_list.append(success)
# save error check to settings file
ctx.obj.update({'file_check': all(success_list)})
with open(settings_path, 'w') as file:
file.write(json.dumps(ctx.obj))
click.secho(f"\n -> Error check for group folder '{ctx.obj['group_path']}' completed.\n",
fg='green', bold=True)
@main.command()
@click.option('--p', type=str, help='downsample, filter, predict')
@click.pass_context
def process(ctx, p):
"""Process data (downsample, filter, predict)"""
if not ctx.obj['file_check']:
click.secho("\n -> File check has not pass. Please run -filecheck-.\n",
fg='yellow', bold=True)
return
process_type_options = ['downsample', 'filter', 'predict']
if p is None:
process_type = set(process_type_options)
else:
process_type = set([p])
# check if user input exists in process types
process_type = list(process_type.intersection(process_type_options))
if not process_type:
click.secho(f"\n -> Got'{p}' instead of {process_type_options}\n",
fg='yellow', bold=True)
return
# get parent folders (children of group dir)
folders = [f.path for f in os.scandir(ctx.obj['group_path']) if f.is_dir()]
# process functions
if 'downsample' in process_type:
from data_preparation.downsample import Lab2h5
for f_path in folders:
ctx.obj['main_path'] = f_path
Lab2h5(ctx.obj).downsample()
ctx.obj.update({'downsample':1})
if 'filter' in process_type:
from data_preparation.preprocess import PreProcess
for f_path in folders:
ctx.obj['main_path'] = f_path
PreProcess(ctx.obj).filter_data()
ctx.obj.update({'filtered':1})
if 'predict' in process_type:
from data_preparation.get_predictions import ModelPredict
for f_path in folders:
ctx.obj['main_path'] = f_path
ModelPredict(ctx.obj).predict()
ctx.obj.update({'predicted':1})
with open(settings_path, 'w') as file:
file.write(json.dumps(ctx.obj))
return
@main.command()
@click.pass_context
def verify(ctx):
"""Verify detected seizures"""
out = check_main(folder=ctx.obj['main_path'],
data_dir=ctx.obj['filt_dir'],
csv_dir=ctx.obj['rawpred_dir'])
if out:
click.secho(f"\n -> Main path was not set properly. Could not find: {out}.\n",
fg='yellow', bold=True)
return
# import toolbox for verification
from user_gui.user_verify import UserVerify
# Create instance for UserVerify class
obj = UserVerify(ctx.obj)
file_id = obj.select_file() # user file selection
data, idx_bounds = obj.get_bounds(file_id) # get data and seizure index
# check for zero seizures otherwise proceed with gui creation
if idx_bounds.shape[0] == 0:
obj.save_emptyidx(data.shape[0], file_id)
else:
from user_gui.verify_gui import VerifyGui
VerifyGui(ctx.obj, file_id, data, idx_bounds)
@main.command()
@click.pass_context
def getprop(ctx):
"""Get seizure properties"""
ver_path = os.path.join(ctx.obj['main_path'], ctx.obj['verpred_dir'])
if os.path.exists(ver_path):
filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))
if not filelist:
click.secho("\n -> Could not find verified seizures: Please verify detected seizures.\n",
fg='yellow', bold=True)
return
# get properies and save
from helper.get_seizure_properties import get_seizure_prop
_,save_path = get_seizure_prop(ctx.obj)
click.secho(f"\n -> Properies were saved in '{save_path}'.\n", fg='green', bold=True)
@main.command()
@click.option('--p', type=str, help='threshold, parameters, train')
@click.pass_context
def train(ctx, p):
"""Find best parameters"""
# check input
process_type_options = ['threshold', 'parameters', 'train']
if p is None:
process_type = set(process_type_options)
else:
process_type = set([p])
# check if user input exists in process types
process_type = list(process_type.intersection(process_type_options))
if not process_type:
click.secho(f"\n -> Got'{p}' instead of {process_type_options}\n",
fg='yellow', bold=True)
return
# get paths from user and check if they are valid
paths={}
if 'train' in process_type:
paths = {'train': 'training data', 'test': 'testing data'}
elif 'threshold' in process_type:
paths = {'train': 'training data'}
for i,(key,val) in enumerate(paths.items()):
path = input('\n' + str(i+1) + '.Enter group path to ' + val + ':\n')
paths.update({key:path})
ctx.obj.update({'group_path':path})
folder = check_group_dir(ctx.obj)
if folder is not None:
click.secho(f"\n -> Error in '{folder}'. Could not find .h5 files that match" +\
" .csv files in children directories.\n", fg='yellow', bold=True)
return
if 'threshold' in process_type:
# find optimum thresholds
from train.threshold_metrics import ThreshMetrics
ThreshMetrics(paths['train'], ctx.obj['true_dir']).multi_folder()
if 'parameters' in process_type:
# create parameter space catalogue
from train.create_parameter_space import CreateCatalogue
CreateCatalogue().get_parameter_space()
if 'train' in process_type:
# get metrics from training and testing datasets
from train.get_method_metrics import MethodMetrics
for dataset in paths:
csv_name = 'parameter_metrics_' + dataset + '.csv'
MethodMetrics(paths[dataset], ctx.obj['true_dir'],
data_dir=ctx.obj['filt_dir'],
output_csv_name=csv_name).multi_folder()
# export best method
from train.get_best_parameters import get_best
df,_ = get_best(common_n=1, save=True)
print_msg = df[['percent_detected', 'false_positive_rate']].to_string()
click.secho(print_msg, fg='white', bold=True)
if process_type == set(process_type_options):
click.secho('\n ---> Training was completed successfully.\n',
fg='bright_magenta', bold=True)
@main.command()
@click.option('--n', type=str, help='Select number of methods')
@click.option('--s', type=str, help='Select method id')
@click.pass_context
def selbest(ctx, n, s):
"""Select best parameter"""
from train.get_best_parameters import get_best
import pandas as pd
if not n:
n = 1
else:
n = int(n)
# select best method
df, save_path = get_best(common_n=n)
print_msg = df[['percent_detected', 'false_positive_rate']].to_string()
click.secho('\n' + print_msg + '\n', fg='white', bold=True)
if not s:
s = df.index[0]
else:
s = int(s)
# save dataframe
df = pd.DataFrame(df.loc[s])
df.T.to_csv(save_path, index=False)
print_msg = '--> Index: ' + str(s) +\
' Best parameter-set was exported to: ' + save_path + '\n'
click.secho(print_msg, fg='white', bold=True)
if __name__ == '__main__':
# define settings path
temp_settings_path = 'temp_config.json'
settings_path = 'config.json'
# check if settings file exist and if all the fields are present
if not os.path.isfile(settings_path):
import shutil
shutil.copy(temp_settings_path, settings_path)
else:
# check if keys match otherwise load original settings
with open(temp_settings_path, 'r') as file:
temp_settings = json.loads(file.read())
with open(settings_path, 'r') as file:
settings = json.loads(file.read())
if settings.keys() != temp_settings.keys():
import shutil
shutil.copy(temp_settings_path, settings_path)
# init cli
main(obj={})
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
21017,
34400,
32501,
30023,
33002,
220,
22369,
6329,
44386,
198,
11748,
3904,
198,
11748,
28686,
198,
11748,
33918,
198,
21017,
16529,
12,
44386,
628,
198,
4299,
2198,
... | 2.179169 | 5,972 |
import json # import function unused for now, until we have a sample JSON file.
class Processing:
"""Contains logic for processing json from wrapper.
:mean: defines mean price of input prices from mean
:location: defines average lat and long for input.
:average: defines average.
:median: defines median for input data.
Note: JSON imports INCOMPLETE
"""
processeddata = {}
def average(self, *args):
"""Simple average calculator"""
total = sum(args)
totalinstances = len(args)
average = total / totalinstances
return average
def mean(self, *args):
"""fixes potential issue with calling average in location method"""
mean = self.average(args)
Processing.processeddata.update({'meanprice': mean})
return mean
def location(self, *args1, **args2):
"""Creates an average location for all houses analysed, in longitude and latitude.
Uses output from Processing.average() to determine average latitude and longitude.
I think there would be an issue the Processing.processeddata.update({'meanprice': mean})
because it would record this average as the meanprice, so I created a seperate mean function
that will call the average function
"""
averagelat = self.average(args1)
averagelong = self.average(args2)
Processing.processeddata.update({'averagelat': averagelat, 'averagelong': averagelong})
def median(self, *args):
"""Simple median calculator"""
prices = sorted(args)
totalInstances = len(args)
if totalInstances%2 == 0:
median = (prices[int(totalInstances/2)]+prices[int((totalInstances/2)-1)])/2
else:
median = prices[int((totalInstances-1)/2)]
Processing.processeddata.update({'median':median})
<<<<<<< Updated upstream
return prices[int((totalInstances-1)/2)]
=======
return prices[(totalInstances-1)/2]
def iqr(self):
>>>>>>> Stashed changes
| [
11748,
33918,
220,
1303,
1330,
2163,
21958,
329,
783,
11,
1566,
356,
423,
257,
6291,
19449,
2393,
13,
198,
198,
4871,
28403,
25,
198,
220,
220,
220,
37227,
4264,
1299,
9156,
329,
7587,
33918,
422,
29908,
13,
628,
220,
220,
220,
1058,
... | 2.737617 | 747 |
import json, logging, pytest
from decimal import Decimal
from cryptoadvance.specter.helpers import alias, generate_mnemonic
from cryptoadvance.specter.key import Key
from cryptoadvance.specter.rpc import BitcoinRPC
from cryptoadvance.specter.specter import get_rpc, Specter
from cryptoadvance.specter.specter_error import SpecterError
from cryptoadvance.specter.wallet_manager import WalletManager
@pytest.mark.skip(reason="no idea why this does not pass on gitlab exclusively")
| [
11748,
33918,
11,
18931,
11,
12972,
9288,
198,
6738,
32465,
1330,
4280,
4402,
198,
6738,
8194,
1170,
19259,
13,
4443,
263,
13,
16794,
364,
1330,
16144,
11,
7716,
62,
10295,
50016,
198,
6738,
8194,
1170,
19259,
13,
4443,
263,
13,
2539,
... | 3.558824 | 136 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 5 22:34:12 2020
@author: arti
"""
import pandas as pd
dict_data = {'c0':[1,2,3], 'c1':[4,5,6], 'c2':[7,8,9], 'c3':[10,11,12], 'c4':[13,14,15]}
df = pd.DataFrame(dict_data, index=['r0', 'r1', 'r2'])
print(df); print('--')
ndf = df.sort_index(ascending=False)
print(ndf); print('--')
ndf = df.sort_index()
print(ndf); print('--') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2447,
220,
642,
2534,
25,
2682,
25,
1065,
12131,
198,
198,
31,
9800,
25,
1242,
72,
... | 2 | 202 |
import graphene
from django.test import TestCase
from django.contrib.auth.models import User
from chigre.models import Brewery, BeerType, KegType, TapType, Beer, Keg, Tap, Pub
from chigreQL.schema import Query
# Create your tests here.
| [
11748,
42463,
198,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
442,
328,
260,
13,
27530,
1330,
31003,
11,
16971,
6030,
11,
509,
1533,
6030,
... | 3.260274 | 73 |
#!/usr/bin/env python3
import os
import pathlib
import smtplib
import ssl
import config
from message import Message
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
895,
83,
489,
571,
198,
11748,
264,
6649,
198,
198,
11748,
4566,
198,
6738,
3275,
1330,
16000,
628
] | 3.216216 | 37 |
import numpy as np
import xpress as xp | [
11748,
299,
32152,
355,
45941,
198,
11748,
2124,
8439,
355,
36470
] | 3.454545 | 11 |
import deriva.core.ermrest_model as em
from deriva.core import ErmrestCatalog, get_credential
from deriva.core.ermrest_config import tag as chaise_tags
from requests.exceptions import HTTPError
from attrdict import AttrDict
def default_table_config(catalog, schema_name, table_name):
"""
This function adds the following basic configuration details to an existing table:
1) Creates a self service modification policy in which creators can update update any row they create. Optionally,
an Owner column can be provided, which allows the creater of a row to delegate row ownership to a specific
individual.
2) Adds display annotations and foreign key declarations so that system columns RCB, RMB display in a user friendly
way.
:param catalog:
:param schema_name:
:param table_name:
:return:
"""
model_root = catalog.getCatalogModel()
schema = model_root.schemas[schema_name]
table = schema.tables[table_name]
if table.column_definitions['Owner']:
print('Table missing owner column.')
# Make table policy be self service, creators can update.
self_service_policy = {
"self_service_creator": {
"types": ["update", "delete"],
"projection": ["RCB"],
"projection_type": "acl"
}
}
if table.column_definitions['Owner']:
self_service_policy['self_service_owner'] = {
"types": ["update", "delete"],
"projection": ["Owner"],
"projection_type": "acl"
}
table.acl_bindings.update(self_service_policy)
model_root.apply(catalog)
# Set up foreign key to ermrest_client on RCB and Owner.
for col, display in [('RCB', 'Created By'), ('RMB', 'Modified By'), ('Owner', 'Ownder')]:
fk_name = '{}_{}_fkey'.format(table_name, col)
fk = em.ForeignKey.define(
[col],
'public',
'ermrest_client',
['id'],
constraint_names=[(schema_name, fk_name)],
)
try:
# Delete old fkey if there is one laying around....
f = table.foreign_keys[(schema_name, fk_name)]
f.delete(catalog, table)
except KeyError:
pass
table.create_fkey(catalog, fk)
# Add a display annotation so that we use the user name on RCB and RMB and Owner
column_annotation = {
'tag:isrd.isi.edu,2016:column-display':
{'*': {
'markdown_pattern': '{{{{{{$fkeys.{}.{}.values._display_name}}}}}}'.format(schema_name, fk_name)}},
'tag:misd.isi.edu,2015:display': {'markdown_name': display}
}
table.column_definitions[col].annotations.update(column_annotation)
table.apply(catalog, schema)
return
def default_visible_columns(table):
"""
return a baseline visible columns annotation for all the columns in a table that can be modified to create more
customized displays.
"""
pass
| [
11748,
4587,
12151,
13,
7295,
13,
7780,
2118,
62,
19849,
355,
795,
198,
6738,
4587,
12151,
13,
7295,
1330,
5256,
76,
2118,
49015,
11,
651,
62,
66,
445,
1843,
198,
6738,
4587,
12151,
13,
7295,
13,
7780,
2118,
62,
11250,
1330,
7621,
3... | 2.458401 | 1,226 |
from django.urls import path
from .views import TaskListCreateView, TaskUpdateDeleteView
urlpatterns = [
path('', TaskListCreateView.as_view()),
path('<int:pk>/', TaskUpdateDeleteView.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
764,
33571,
1330,
15941,
8053,
16447,
7680,
11,
15941,
10260,
38727,
7680,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
15941,
8053,
16447,
7680,
13... | 2.835616 | 73 |
import link_cpp
link_cpp.main()
import mathx_cpp
mathx_cpp.main()
import mathx
print(mathx.inverse(2.0)) | [
11748,
2792,
62,
20322,
198,
8726,
62,
20322,
13,
12417,
3419,
198,
198,
11748,
10688,
87,
62,
20322,
198,
11018,
87,
62,
20322,
13,
12417,
3419,
198,
198,
11748,
10688,
87,
198,
4798,
7,
11018,
87,
13,
259,
4399,
7,
17,
13,
15,
4... | 2.409091 | 44 |
#!/usr/bin/python3.8
import sys
import unittest
from pprint import pprint
from jinja2 import Template
sys.path.append('sphinxcontrib')
from src import ExtIndexRack as IndexRack
from . import util
#-------------------------------------------------------------------
#kana_text_word_listの上書き
testcase01in = {
'doc01': [('single','ああ|球球球; いい|球球球','id-01','',None)],
'doc02': [('see','かか|球球球; めめ|球球球','id-02','',None)],
'doc03': [('single','ささ|球球球; んん|球球球','id-03','',None)],
'doc04': [('seealso','たた|拾拾拾; いい|拾拾拾','id-04','',None)],
'doc05': [('single','なな|拾拾拾; めめ|拾拾拾','id-05','',None)],
'doc06': [('single','おお|拾拾拾; んん|拾拾拾','id-06','',None)],
}
#kana_text_word_listの上書き
testcase02in = {
'doc01': [('single','ああ|球球球; いい|球球球','id-01','',None)],
'doc02': [('see','かか|球球球; めめ|球球球','id-02','',None)],
'doc03': [('single','ささ|球球球; んん|球球球','id-03','',None)],
'doc04': [('seealso','たた|拾拾拾; いい|拾拾拾','id-04','',None)],
'doc05': [('single','なな|拾拾拾','id-05','',None)],
'doc06': [('single','おお|拾拾拾','id-06','',None)],
}
testcase01str = "tests/jinja2/result75_01.txt"
testcase02str = "tests/jinja2/result75_02.txt"
#-------------------------------------------------------------------
template = get_template('tests/genindex.tpl')
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
23,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
6738,
474,
259,
6592,
17,
1330,
37350,
198,
198,
17597,
13,
6978,
13,
33295,
10786,
82,
746,... | 1.852807 | 659 |
import os
import yaml
import tempfile
from abstract_tts import AbstractMp3TTSEngine
from src import diagnose
from src import paths
try:
import gtts
except ImportError:
pass
class GoogleTTS(AbstractMp3TTSEngine):
"""
Uses the Google TTS online translator
Requires pymad and gTTS to be available
"""
SLUG = "google-tts"
@classmethod
@classmethod
@property
| [
11748,
28686,
198,
11748,
331,
43695,
198,
11748,
20218,
7753,
198,
198,
6738,
12531,
62,
83,
912,
1330,
27741,
28861,
18,
51,
4694,
13798,
198,
6738,
12351,
1330,
37489,
198,
6738,
12351,
1330,
13532,
198,
198,
28311,
25,
198,
220,
220... | 2.871429 | 140 |
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Downloads a patch and changed files from Rietveld.
Prints the patch of the most recent patchset to stdout.
"""
try:
import base64
import fix_encoding
import gerrit_util
import git_cl
import optparse
import os.path
# import StringIO
import sys
import tarfile
#import urllib2
from third_party import colorama
except ImportError as e:
print(e)
print('Perhaps you\'re missing depot_tools in your PYTHONPATH.')
import sys
sys.exit(1)
if __name__ == '__main__':
# These affect sys.stdout so do it outside of main() to simplify mocks in
# unit testing.
fix_encoding.fix_encoding()
colorama.init()
sys.exit(main(sys.argv[1:]))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
... | 3.05694 | 281 |
#!/usr/bin/env python
import socket
import select
import errno
import os
import sys
import time
import getopt
import string
import shutil
import configuration_client
import log_client
import udp_client
import library_manager_client
import e_errors
import enstore_functions2
import Trace
import enstore_mail
interval = 30
event_dict = {}
time_for_record = time.time()
MY_NAME = "LM_NANNY"
RETRY_ATTEMPTS = 5
RETRY_TO = 20
DEBUG_LOG = 11
mail_recipient = os.environ.get("ENSTORE_MAIL", None)
prog_name = sys.argv[0].split('/')[-1]
restart = False
levels = None
opts, args = getopt.getopt(sys.argv[1:], "d:t:h:r", ["debug", "timeout", "help", "restart"])
for o, a in opts:
if o in ["-t", "--time"]:
interval = int(a)
if o in ["-m", "--mail"]:
mail_recipient = a
if o in ["-d", "--debug"]:
levels = a
if o in ["-h", "--help"]:
print_help()
sys.exit(0)
if o in ["-r", "--restart"]:
restart = True
if not mail_recipient:
print "Please specify mail recipient"
sys.exit(1)
csc = configuration_client.ConfigurationClient((os.environ['ENSTORE_CONFIG_HOST'],
int(os.environ['ENSTORE_CONFIG_PORT'])))
logc = log_client.LoggerClient(csc, MY_NAME)
Trace.init(MY_NAME)
lm_list_0 = csc.get_library_managers()
# get library managers from stdin
lm_list = []
for k in lm_list_0:
l = lm_list_0[k]
# find list entry corresponding to library manager
# from the argument list
for lm in args:
if l['name'] == lm:
lm_list.append(lm)
# found lm, can break here
break
# create library manager clients
lmc_list = []
for lm in lm_list:
lmc_list.append(LMC(csc, lm))
try:
while True:
for lmc in lmc_list:
if not lmc.is_monitored():
# we do not care about library managers that are not
# watched
continue
# Check whether library manager is running.
if not lmc.is_lm_running():
continue # library manager is not running, we do not care: why
# get current queue length
ql = lmc.get_pending_queue_length(timeout=10)
Trace.log(DEBUG_LOG, "LM %s pending_queue_length returned %s"%(lmc.server_name, ql,))
# show netstats
control_buf, encp_buf, mover_buf, udp_errors = get_netstat(lmc.control_port,
lmc.encp_port,
lmc.mover_port)
Trace.log(DEBUG_LOG, "net stats: CB %s ENCPB %s MOVB %s ERR %s"%(control_buf,
encp_buf,
mover_buf,
udp_errors))
# Number of LM ports
# can be 1 or 3.
# If it is 1 the library manager does not have
# separate threads to serve requests.
# If it is 3 the library manager has
# separate threads to serve requests coming
# from encp, movers, and other client.
# We need to know how many ports the
# LM has and how many of these ports do not respond
not_responding_ports = 0
rc = lmc.ping_lm_port(lmc.control_port)
if rc:
Trace.log(e_errors.ERROR, "Library manager %s is not responding on %s %s"%
(lmc.server_name, lmc.host, lmc.control_port))
not_responding_ports = not_responding_ports + rc
rc = lmc.ping_lm_port(lmc.mover_port)
if rc:
Trace.log(e_errors.ERROR, "Library manager %s is not responding on %s mover port %s"%
(lmc.server_name, lmc.host, lmc.mover_port))
not_responding_ports = not_responding_ports + rc
rc = lmc.ping_lm_port(lmc.encp_port)
if rc:
Trace.log(e_errors.ERROR, "Library manager %s is not responding on %s encp port %s"%
(lmc.server_name, lmc.host, lmc.encp_port))
not_responding_ports = not_responding_ports + rc
# library manager is running and hanging
if not_responding_ports > 0:
record_event(lmc.server_name, "NOT_RUNNING")
# Restart library manager on weekdays (Mon - Fri) after work hours
# and on weekend.
# Otherwise send e-mail to developer
t = time.localtime()
if (t.tm_wday in (5,6) or # weekend
(t.tm_hour not in xrange(8, 17)) or # weekday before 8:00am or after 5:00pm
(restart)): # restart unconditionally
# restart LM
lmc.restart(levels)
else: # weekdays between 8:00 and 17:00
Trace.alarm(e_errors.INFO, "Library manager %s does not get restarted during work hours"%(lmc.server_name, ))
enstore_mail.send_mail(MY_NAME, "Library manager %s is not responding."%(lmc.server_name,),
"Library manager %s is not responding. Check log file"%(lmc.server_name,), mail_recipient)
time.sleep(interval)
except KeyboardInterrupt:
Trace.log(e_errors.INFO, "Monitoring Statistics: %s"%(event_dict,))
except:
Trace.handle_error()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
17802,
198,
11748,
2922,
198,
11748,
11454,
3919,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
651,
8738,
198,
11748,
4731,
198,
11748,
4423,
346,
198,
... | 1.951799 | 2,863 |
#!/usr/bin/env python
import roslib
import rospy
import math
import tf
import geometry_msgs.msg
from pycrazyswarm import *
import datetime
import csv
import time
import julia
jl = julia.Julia(compiled_modules=False)
from julia import Main
# Enable or disable using Julia MPC algorithm
USE_JULIA = True
# Assemble filename for logged data
datetimeString = datetime.datetime.now().strftime("%m%d%y-%H:%M:%S")
csv_filename = "experiment_data/" + datetimeString + "-2drones.csv"
# Enable or disable data logging
LOG_DATA = True
TAKEOFF_Z = 1.0
TAKEOFF_DURATION = 3.0
# Used to tune aggresiveness of low-level controller
GOTO_DURATION = 1.6
# Defining takeoff and experiment start position
cf1_takeoff_pos = [0.0, 0.0, 1.0]
cf1_start_pos = [-1.0, 1.0, 1.0]
cf2_takeoff_pos = [-0.5, 0.0, 1.0]
cf2_start_pos = [0.0, -1.0, 1.0]
# Import waypoints from csv file
csvfilename = "hmmm.csv"
data = np.genfromtxt(csvfilename, delimiter=',')
waypoints_cf1 = []
waypoints_cf2 = []
for i in range(data.shape[0]):
waypoints_cf1.append(list(data[i, 0:3]))
waypoints_cf2.append(list(data[i, 0:3])) # <------ FIX ME
if __name__ == '__main__':
#rospy.init_node('tf_listener')
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
num_cfs = len(swarm.allcfs.crazyflies)
cf1 = swarm.allcfs.crazyflies[0]
cf2 = swarm.allcfs.crazyflies[1]
listener = tf.TransformListener()
rate = rospy.Rate(10.0)
if LOG_DATA:
print("### Logging data to file: " + csv_filename)
csvfile = open(csv_filename, 'w')
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['# CFs', str(num_cfs)])
csvwriter.writerow(["Timestamp [s]"] + num_cfs*["TODO (disregard)"])
try:
perform_experiment()
except Exception as e:
print ("##### Python exception occurred! Returning to start location and landing #####")
cf1.goTo(cf1_takeoff_pos, yaw=0.0, duration=3.0)
cf2.goTo(cf1_takeoff_pos, yaw=0.0, duration=3.0)
timeHelper.sleep(4.0)
cf1.land(targetHeight=0.05, duration=3.0)
cf2.land(targetHeight=0.05, duration=3.0)
timeHelper.sleep(4.0)
raise(e)
except KeyboardInterrupt:
print ("##### KeyboardInterrupt detected. Landing all CFs #####")
cf1.land(targetHeight=0.05, duration=3.0)
cf2.land(targetHeight=0.05, duration=3.0)
timeHelper.sleep(4.0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
6649,
571,
198,
11748,
686,
2777,
88,
198,
11748,
10688,
198,
11748,
48700,
198,
11748,
22939,
62,
907,
14542,
13,
19662,
198,
6738,
12972,
66,
3247,
893,
31975,
1330,
1635,
... | 2.313576 | 1,046 |
from calendar import timegm
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
import six
from sanic_restful_api import marshal
__all__ = ["String", "FormattedString", "DateTime", "Float",
"Integer", "Arbitrary", "Nested", "List", "Raw", "Boolean",
"Fixed", "Price"]
class MarshallingException(Exception):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if isinstance(key, int):
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def to_marshallable_type(obj):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
return dict(obj.__dict__)
class Raw(object):
"""Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
"""
def format(self, value):
"""Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
"""
value = get_value(
key if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
"""
class List(Raw):
"""
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
"""
class String(Raw):
"""
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
"""
class Integer(Raw):
""" Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
"""
class Boolean(Raw):
"""
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to
``False``.
"""
class FormattedString(Raw):
"""
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
"""
def __init__(self, src_str):
"""
:param string src_str: the string to format with the other
values from the response.
"""
super(FormattedString, self).__init__()
self.src_str = six.text_type(src_str)
class Float(Raw):
"""
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
"""
class Arbitrary(Raw):
"""
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
"""
class DateTime(Raw):
"""
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
"""
ZERO = MyDecimal()
class Fixed(Raw):
"""
A decimal number with a fixed precision.
"""
"""Alias for :class:`~fields.Fixed`"""
Price = Fixed
def _rfc822(dt):
"""Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => "Sat, 01 Jan 2011 00:00:00 -0000"
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
"""
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
"""Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => "2012-01-01T00:00:00"
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
"""
return dt.isoformat()
| [
198,
6738,
11845,
1330,
640,
39870,
198,
6738,
32465,
1330,
4280,
4402,
355,
2011,
10707,
4402,
11,
371,
15919,
62,
39,
1847,
37,
62,
20114,
1677,
198,
6738,
3053,
13,
26791,
1330,
5794,
4475,
198,
11748,
2237,
198,
6738,
5336,
291,
6... | 2.72584 | 2,411 |
from flask import render_template, request, current_app, jsonify, redirect, session
from init import app
from utils.interceptors import jsonRequest, loginRequiredJSON, loginOptional
from utils.jsontools import *
from utils.exceptions import UserError
from utils import getDefaultJSON
from services.rating import rateVideo, ratePlaylist, getVideoRating, getPlaylistRating, getVideoRatingAggregate, getPlaylistRatingAggregate
from services.tcb import filterOperation
from bson import ObjectId
@app.route('/rating/video.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
@app.route('/rating/playlist.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
@app.route('/rating/get_video.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
@app.route('/rating/get_playlist.do', methods = ['POST'])
@loginRequiredJSON
@jsonRequest
@app.route('/rating/get_video_total.do', methods = ['POST'])
@loginOptional
@jsonRequest
@app.route('/rating/get_playlist_total.do', methods = ['POST'])
@loginOptional
@jsonRequest
| [
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
11,
1459,
62,
1324,
11,
33918,
1958,
11,
18941,
11,
6246,
198,
198,
6738,
2315,
1330,
598,
198,
6738,
3384,
4487,
13,
3849,
984,
669,
1330,
33918,
18453,
11,
17594,
37374,
40386,
11,
... | 3.298077 | 312 |
import fnmatch
import os
def custom_import_module(full_config_path):
"""
Import and execute a python file as a module. Useful for import the experiment module and the
analysis module.
Args:
full_config_path: Full path to the python file.
Returns: The python file as a module
"""
import importlib.util
spec = importlib.util.spec_from_file_location("mod", full_config_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def checkpoint_from_trained_directory(full_trained_directory, checkpoint_desired):
"""
Return the checkpoint directory to load the policy. If checkpoint_desired is specified and
found, then return that policy. Otherwise, return the last policy.
"""
checkpoint_dirs = find_dirs_in_dir('checkpoint*', full_trained_directory)
# Try to load the desired checkpoint
if checkpoint_desired is not None: # checkpoint specified
for checkpoint in checkpoint_dirs:
if checkpoint_desired == int(checkpoint.split('/')[-1].split('_')[-1]):
return checkpoint, checkpoint_desired
import warnings
warnings.warn(
f'Could not find checkpoint_{checkpoint_desired}. Attempting to load the last '
'checkpoint.'
)
# Load the last checkpoint
max_checkpoint = None
max_checkpoint_value = 0
for checkpoint in checkpoint_dirs:
checkpoint_value = int(checkpoint.split('/')[-1].split('_')[-1])
if checkpoint_value > max_checkpoint_value:
max_checkpoint_value = checkpoint_value
max_checkpoint = checkpoint
if max_checkpoint is None:
raise FileNotFoundError("Did not find a checkpoint file in the given directory.")
return max_checkpoint, max_checkpoint_value
def find_dirs_in_dir(pattern, path):
"""
Traverse the path looking for directories that match the pattern.
Return: list of paths that match
"""
result = []
for root, dirs, files in os.walk(path):
for name in dirs:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
| [
11748,
24714,
15699,
198,
11748,
28686,
628,
198,
4299,
2183,
62,
11748,
62,
21412,
7,
12853,
62,
11250,
62,
6978,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
17267,
290,
12260,
257,
21015,
2393,
355,
257,
8265,
13,
49511,
32... | 2.75125 | 800 |
"""pymodel config"""
import tracemultiplexer
tracemultiplexer.unsynchronized = True # ignore tracelock, may corrupt log file
| [
37811,
9078,
19849,
4566,
37811,
198,
198,
11748,
491,
330,
368,
586,
2480,
87,
263,
198,
198,
2213,
330,
368,
586,
2480,
87,
263,
13,
403,
28869,
11413,
1143,
796,
6407,
1303,
8856,
491,
330,
417,
735,
11,
743,
10622,
2604,
2393,
6... | 2.976744 | 43 |
from django.urls import path, include
from pages import views
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about')
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
201,
198,
6738,
5468,
1330,
5009,
201,
198,
201,
198,
6371,
33279,
82,
796,
685,
201,
198,
220,
220,
220,
3108,
10786,
3256,
5009,
13,
9630,
11,
1438,
11639,
9630,
33809,
201,
... | 2.707692 | 65 |
# -*- coding: utf-8 -*-
import re
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
628,
198
] | 2 | 18 |
import Constant
| [
11748,
20217,
628
] | 5.666667 | 3 |
"""This module tests the `/withdraw` endpoint."""
import json
from unittest.mock import patch
import pytest
from stellar_sdk.keypair import Keypair
from stellar_sdk.transaction_envelope import TransactionEnvelope
from polaris import settings
from polaris.helpers import format_memo_horizon
from polaris.management.commands.watch_transactions import process_withdrawal
from polaris.models import Transaction
from polaris.tests.helpers import mock_check_auth_success
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_success(mock_check, client, acc1_usd_withdrawal_transaction_factory):
"""`GET /withdraw` succeeds with no optional arguments."""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_invalid_asset(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""`GET /withdraw` fails with an invalid asset argument."""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=ETH", follow=True)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid operation for asset ETH", "status_code": 400}
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_no_asset(mock_check, client):
"""`GET /withdraw fails with no asset argument."""
del mock_check
response = client.get(f"/withdraw", follow=True)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "'asset_code' is required", "status_code": 400}
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_no_txid(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with no transaction_id.
"""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw/interactive_withdraw?", follow=True)
assert response.status_code == 400
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_no_asset(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with no asset_code.
"""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(
f"/withdraw/interactive_withdraw?transaction_id=2", follow=True
)
assert response.status_code == 400
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_invalid_asset(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with invalid asset_code.
"""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(
f"/withdraw/interactive_withdraw?transaction_id=2&asset_code=ETH", follow=True
)
assert response.status_code == 400
# TODO: Decompose the below tests, since they call the same logic. The issue: Pytest complains
# about decomposition when passing fixtures to a helper function.
@pytest.mark.django_db
@patch(
"polaris.management.commands.watch_transactions.stream_transactions",
return_value=[{}],
)
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_failure_no_memotype(
mock_check, mock_transactions, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with no `memo_type` in Horizon response.
"""
del mock_check, mock_transactions
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 20, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
assert (
Transaction.objects.get(id=transaction_id).status
== Transaction.STATUS.pending_user_transfer_start
)
@pytest.mark.django_db
@patch(
"polaris.management.commands.watch_transactions.stream_transactions",
return_value=[{"memo_type": "not_hash"}],
)
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_failure_incorrect_memotype(
mock_check, mock_transactions, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with incorrect `memo_type` in Horizon response.
"""
del mock_check, mock_transactions
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 20, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
assert (
Transaction.objects.get(id=transaction_id).status
== Transaction.STATUS.pending_user_transfer_start
)
@pytest.mark.django_db
@patch(
"polaris.management.commands.watch_transactions.stream_transactions",
return_value=[{"memo_type": "hash"}],
)
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_failure_no_memo(
mock_check, mock_transactions, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with no `memo` in Horizon response.
"""
del mock_check, mock_transactions
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 20, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
assert (
Transaction.objects.get(id=transaction_id).status
== Transaction.STATUS.pending_user_transfer_start
)
@pytest.mark.django_db
@patch(
"polaris.management.commands.watch_transactions.stream_transactions",
return_value=[{"memo_type": "hash", "memo": "wrong_memo"}],
)
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_failure_incorrect_memo(
mock_check, mock_transactions, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` fails with incorrect `memo` in Horizon response.
"""
del mock_check, mock_transactions
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 20, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
assert (
Transaction.objects.get(id=transaction_id).status
== Transaction.STATUS.pending_user_transfer_start
)
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_success_transaction_unsuccessful(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` changes transaction to `pending_stellar`
with unsuccessful transaction.
"""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 50, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
transaction = Transaction.objects.get(id=transaction_id)
assert transaction.status == Transaction.STATUS.pending_user_transfer_start
withdraw_memo = transaction.withdraw_memo
mock_response = {
"memo_type": "hash",
"memo": format_memo_horizon(withdraw_memo),
"successful": False,
"id": "c5e8ada72c0e3c248ac7e1ec0ec97e204c06c295113eedbe632020cd6dc29ff8",
"envelope_xdr": "AAAAAEU1B1qeJrucdqkbk1mJsnuFaNORfrOAzJyaAy1yzW8TAAAAZAAE2s4AAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAoUKq+1Z2GGB98qurLSmocHafvG6S+YzKNE6oiHIXo6kAAAABVVNEAAAAAACnUE2lfwuFZ+G+dkc+qiL0MwxB0CoR0au324j+JC9exQAAAAAdzWUAAAAAAAAAAAA=",
}
process_withdrawal(mock_response, transaction)
assert (
Transaction.objects.get(id=transaction_id).status
== Transaction.STATUS.pending_stellar
)
@pytest.mark.django_db
@patch("polaris.helpers.check_auth", side_effect=mock_check_auth_success)
def test_withdraw_interactive_success_transaction_successful(
mock_check, client, acc1_usd_withdrawal_transaction_factory
):
"""
`GET /withdraw/interactive_withdraw` changes transaction to `completed`
with successful transaction.
"""
del mock_check
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
transaction_id = content["id"]
url = content["url"]
response = client.post(
url, {"amount": 50, "bank_account": "123456", "bank": "Bank"}
)
assert response.status_code == 200
transaction = Transaction.objects.get(id=transaction_id)
assert transaction.status == Transaction.STATUS.pending_user_transfer_start
withdraw_memo = transaction.withdraw_memo
mock_response = {
"memo_type": "hash",
"memo": format_memo_horizon(withdraw_memo),
"successful": True,
"id": "c5e8ada72c0e3c248ac7e1ec0ec97e204c06c295113eedbe632020cd6dc29ff8",
"envelope_xdr": "AAAAAEU1B1qeJrucdqkbk1mJsnuFaNORfrOAzJyaAy1yzW8TAAAAZAAE2s4AAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAoUKq+1Z2GGB98qurLSmocHafvG6S+YzKNE6oiHIXo6kAAAABVVNEAAAAAACnUE2lfwuFZ+G+dkc+qiL0MwxB0CoR0au324j+JC9exQAAAAAdzWUAAAAAAAAAAAA=",
}
process_withdrawal(mock_response, transaction)
assert transaction.status == Transaction.STATUS.completed
assert transaction.completed_at
@pytest.mark.django_db
def test_withdraw_authenticated_success(
client, acc1_usd_withdrawal_transaction_factory
):
"""`GET /withdraw` succeeds with the SEP 10 authentication flow."""
client_address = "GDKFNRUATPH4BSZGVFDRBIGZ5QAFILVFRIRYNSQ4UO7V2ZQAPRNL73RI"
client_seed = "SDKWSBERDHP3SXW5A3LXSI7FWMMO5H7HG33KNYBKWH2HYOXJG2DXQHQY"
acc1_usd_withdrawal_transaction_factory()
# SEP 10.
response = client.get(f"/auth?account={client_address}", follow=True)
content = json.loads(response.content)
envelope_xdr = content["transaction"]
envelope_object = TransactionEnvelope.from_xdr(envelope_xdr, network_passphrase=settings.STELLAR_NETWORK_PASSPHRASE)
client_signing_key = Keypair.from_secret(client_seed)
envelope_object.sign(client_signing_key)
client_signed_envelope_xdr = envelope_object.to_xdr()
response = client.post(
"/auth",
data={"transaction": client_signed_envelope_xdr},
content_type="application/json",
)
content = json.loads(response.content)
encoded_jwt = content["token"]
assert encoded_jwt
header = {"HTTP_AUTHORIZATION": f"Bearer {encoded_jwt}"}
response = client.get(f"/withdraw?asset_code=USD", follow=True, **header)
content = json.loads(response.content)
assert response.status_code == 403
assert content["type"] == "interactive_customer_info_needed"
@pytest.mark.django_db
def test_withdraw_no_jwt(client, acc1_usd_withdrawal_transaction_factory):
"""`GET /withdraw` fails if a required JWT isn't provided."""
acc1_usd_withdrawal_transaction_factory()
response = client.get(f"/withdraw?asset_code=USD", follow=True)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "JWT must be passed as 'Authorization' header", "status_code": 400}
| [
37811,
1212,
8265,
5254,
262,
4600,
14,
4480,
19334,
63,
36123,
526,
15931,
198,
11748,
33918,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
12972,
9288,
198,
6738,
25041,
62,
21282,
74,
13,
2539,
24874,
1330,
73... | 2.602355 | 5,095 |
#-*- coding:utf-8 -*-
import tensorflow as tf
from tensorflow.contrib import predictor
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
import pdb
import traceback
import pickle
import logging
import multiprocessing
from functools import partial
import os,sys
ROOT_PATH = '/'.join(os.path.abspath(__file__).split('/')[:-2])
sys.path.append(ROOT_PATH)
from embedding import embedding
from encoder import encoder
from utils.data_utils import *
from tests.test import Test
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
11192,
273,
11125,
13,
3642,
822,
1330,
41568,
198,
6738,
1341,
35720,
13,
4164,
10466,
13,
24874,
3083,
1330,
8615,
500,
62,
... | 3.08642 | 162 |