content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
import uuid
from smexperiments import api_types, trial_component
from smexperiments.search_expression import Filter, SearchExpression, Operator
| [
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
198,
2,
743,
407,
779,
428,
2393,
2845,
... | 3.792746 | 193 |
"""
This file defines constants which
are used in the various tests to
validate the API functions.
"""
# Summoner name to be used for Standard API calls, defaulting to EUW
SUMMONER_NAME = "Crimack"
# Unknown Summoner, used to validate that an error is thrown
UNKNOWN_SUMMONER_NAME = "abcaklsjdlakakdjlsakjdsdjlaksjdla"
# League UUID for validating league-related endpoints
LEAGUE_UUID = "5d24b9a1-6667-4445-bc51-fa28e5b293cb"
| [
37811,
198,
1212,
2393,
15738,
38491,
543,
198,
533,
973,
287,
262,
2972,
5254,
284,
198,
12102,
378,
262,
7824,
5499,
13,
198,
37811,
628,
198,
2,
49167,
1438,
284,
307,
973,
329,
8997,
7824,
3848,
11,
4277,
278,
284,
4576,
54,
198... | 2.892617 | 149 |
linha('NICOLAS') | [
198,
2815,
3099,
10786,
45,
2149,
3535,
1921,
11537
] | 1.888889 | 9 |
import uvicorn
from utils import check_config_json
from configs import *
if __name__ == '__main__':
if not check_config_json():
exit()
uvicorn.run('api:app', host=HOST, port=PORT, log_level='info', reload=False)
| [
11748,
334,
25531,
1211,
198,
198,
6738,
3384,
4487,
1330,
2198,
62,
11250,
62,
17752,
198,
6738,
4566,
82,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
611,
407,
2198,
62,
11250,... | 2.58427 | 89 |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"DLCImporter": "00_dlc_importer.ipynb",
"transform_to_relative": "00_dlc_importer.ipynb",
"add_middle_neck": "00_dlc_importer.ipynb",
"add_rotation": "00_dlc_importer.ipynb",
"apply_rotation": "00_dlc_importer.ipynb",
"PigeonAnimator": "01_pigeon_animator.ipynb",
"PigeonAnimatorFactory": "01_pigeon_animator.ipynb",
"LabelAssistant": "03_label_assistant.ipynb",
"build_timeseries": "04_ts_prep.ipynb"}
modules = ["dlc_importer.py",
"pigeon_animator.py",
"label_assistant.py",
"ts_prep.py"]
doc_url = "https://kiview.github.io/winkie/"
git_url = "https://github.com/kiview/winkie/tree/master/"
| [
2,
47044,
7730,
1677,
1137,
11617,
11050,
41354,
39345,
0,
8410,
5626,
48483,
0,
198,
198,
834,
439,
834,
796,
14631,
9630,
1600,
366,
18170,
1600,
366,
23144,
62,
15390,
62,
28751,
1600,
366,
18300,
62,
6371,
8973,
198,
198,
9630,
79... | 2.009877 | 405 |
from celery.task import Task
from apps.social.models import MSharedStory, MSocialProfile, MSocialServices
from utils import log as logging
| [
6738,
18725,
1924,
13,
35943,
1330,
15941,
198,
6738,
6725,
13,
14557,
13,
27530,
1330,
337,
2484,
1144,
11605,
11,
6579,
9402,
37046,
11,
6579,
9402,
31007,
198,
6738,
3384,
4487,
1330,
2604,
355,
18931,
628,
220,
220,
220,
220,
220,
... | 2.824561 | 57 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import reduce
from operator import mul
import math
import models.utils as utils
import pdb
#class BaseCNN(nn.Module)
#CNN -> 3 layer CNN, linear transform to hidden, step through gru for new hidden
#return -> linear pass through critic, action_features, gru_hidden
#class Decoder(nn.Module)
#Encode state to hidden value (same as current CNN)
#Reconstruct from x, x is stil used for linear_critic, and action_features
#class CapsuleNet(nn.Module)
#Use magnitude to get probabilites, not the "Categorical function
# Use a custom self.dist -> calculate relative probabilities from magnitudes and pass to pytorch dist
# Calculate the value from linear_critic of output vector (also use for reconstruction)
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
10088,
1330,
35971,
198,
11748,
10688,
198,
198,
11748,
4981,
13,
2679... | 3.688372 | 215 |
/usr/lib64/python3.4/stat.py | [
14,
14629,
14,
8019,
2414,
14,
29412,
18,
13,
19,
14,
14269,
13,
9078
] | 2 | 14 |
import asynchat
import asyncore
import os
import socket
import string
import sys
import StringIO
import mimetools
ROOT = "."
PORT = 8000
# a producer which reads data from a file object
if __name__ == '__main__':
s = HTTPServer(PORT)
print "serving at port", PORT
asyncore.loop()
| [
11748,
355,
2047,
17006,
198,
11748,
355,
2047,
7295,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
4731,
198,
11748,
25064,
198,
11748,
10903,
9399,
198,
11748,
17007,
316,
10141,
198,
198,
13252,
2394,
796,
366,
526,
198,
15490,
796... | 3.117021 | 94 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
import sys
import json
import socket
import time
import copy
import traceback
import signal
import logging
logging.getLogger('kafka').addHandler(logging.StreamHandler())
logging.getLogger('kafka').setLevel(logging.WARNING)
logging.getLogger('kazoo').addHandler(logging.StreamHandler())
logging.getLogger('kazoo').setLevel(logging.WARNING)
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from pysandesh.sandesh_base import *
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.process_info.ttypes import ConnectionType,\
ConnectionStatus
from pysandesh.gen_py.sandesh_alarm.ttypes import SandeshAlarmAckResponseCode
from sandesh.alarmgen_ctrl.sandesh_alarm_base.ttypes import AlarmTrace, \
UVEAlarms, UVEAlarmInfo, UVEAlarmConfig, AlarmTemplate, AllOf
from sandesh.analytics.ttypes import *
from sandesh.analytics.cpuinfo.ttypes import ProcessCpuInfo
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT, COLLECTOR_DISCOVERY_SERVICE_NAME,\
ALARM_GENERATOR_SERVICE_NAME
from alarmgen_cfg import CfgParser
from uveserver import UVEServer
from partition_handler import PartitionHandler, UveStreamProc
from sandesh.alarmgen_ctrl.ttypes import PartitionOwnershipReq, \
PartitionOwnershipResp, PartitionStatusReq, UVECollInfo, UVEGenInfo, \
PartitionStatusResp, UVETableAlarmReq, UVETableAlarmResp, \
AlarmgenTrace, UVEKeyInfo, UVETypeCount, UVETypeInfo, AlarmgenStatusTrace, \
AlarmgenStatus, AlarmgenStats, AlarmgenPartitionTrace, \
AlarmgenPartition, AlarmgenPartionInfo, AlarmgenUpdate, \
UVETableInfoReq, UVETableInfoResp, UVEObjectInfo, UVEStructInfo, \
UVETablePerfReq, UVETablePerfResp, UVETableInfo, UVETableCount, \
UVEAlarmStateMachineInfo, UVEAlarmState, UVEAlarmOperState,\
AlarmStateChangeTrace, UVEQTrace
from sandesh.discovery.ttypes import CollectorTrace
from cpuinfo import CpuInfoData
from opserver_util import ServicePoller
from stevedore import hook, extension
from pysandesh.util import UTCTimestampUsec
from libpartition.libpartition import PartitionClient
import discoveryclient.client as client
from kafka import KafkaClient, SimpleProducer
import redis
from collections import namedtuple
OutputRow = namedtuple("OutputRow",["key","typ","val"])
class AGTabStats(object):
""" This class is used to store per-UVE-table information
about the time taken and number of instances when
a UVE was retrieved, published or evaluated for alarms
"""
class AGKeyInfo(object):
""" This class is used to maintain UVE contents
"""
if __name__ == '__main__':
main()
| [
2,
198,
2,
15069,
357,
66,
8,
2211,
7653,
9346,
27862,
11,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
198,
2,
198,
2,
26123,
18497,
198,
2,
198,
2,
6564,
864,
1812,
9652,
329,
569,
7792,
198,
2,
198,
198,
6738,
4903,
1151,
... | 2.975442 | 1,018 |
"""
.. _l-b-reducesumsquare:
Compares implementations of ReduceSumSquare
===========================================
This example compares the *numpy* for the operator *ReduceSumSquare*
to :epkg:`onnxruntime` implementation.
If available, :epkg:`tensorflow` and :epkg:`pytorch` are included as well.
.. contents::
:local:
Available optimisation
++++++++++++++++++++++
The code shows which parallelisation optimisation could be used,
*AVX* or *SSE* and the number of available processors.
"""
import numpy
import pandas
import matplotlib.pyplot as plt
from onnxruntime import InferenceSession
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.algebra.onnx_ops import OnnxReduceSumSquare
from mlprodict.tools import measure_time
from tqdm import tqdm
from mlprodict.testing.experimental_c import code_optimisation
print(code_optimisation())
###################################
# ReduceSumSquare implementations
# +++++++++++++++++++++++++++++++
try:
from tensorflow.math import reduce_sum as tf_reduce_sum
from tensorflow import convert_to_tensor
except ImportError:
reduce_sum = None
try:
from torch import sum as torch_sum, from_numpy
except ImportError:
torch_sum = None
dfs = []
###################################
# Reduction on a particular case KR
# +++++++++++++++++++++++++++++++++
#
# Consecutive axis not reduced and consecutive reduced
# axis are merged.
# KRK means kept axis - reduced axis - kept axis,
#
# (8, 24, 48, N), axis=(3, )
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
axes = (3, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
###################################
# Reduction on a particular case RK
# +++++++++++++++++++++++++++++++++
#
# Consecutive axis not reduced and consecutive reduced
# axis are merged.
# KRK means kept axis - reduced axis - kept axis,
#
# (8, 24, 48, N), axis=(0, )
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
axes = (0, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
###################################
# Reduction on a particular case KRK
# ++++++++++++++++++++++++++++++++++
#
# Consecutive axis not reduced and consecutive reduced
# axis are merged.
# KRK means kept axis - reduced axis - kept axis,
#
# (8, 24, 48, N), axis=(1, 2)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
axes = (1, 2)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
###################################
# (8, 24 * 48, N), axis=1
# ^^^^^^^^^^^^^^^^^^^^^^^
axes = (1, )
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24 * 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
###################################
# (2, 8, 12, 24, 2, N), axis=(2, 3)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
axes = (2, 3)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (2, 8, 12, 24, 2, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
###################################
# Reduction on a particular case RKRK
# +++++++++++++++++++++++++++++++++++
#
# (8, 24, 48, N), axis=(0, 2)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
axes = (0, 2)
df, piv, ax = benchmark_op(axes, shape_fct=lambda dim: (8, 24, 48, dim))
dfs.append(df)
df.pivot("fct", "N", "average")
####################################
# Conclusion
# ++++++++++
#
# Some of the configurations should be investigated.
# :ref:`l-reducesum-problem1`. The reduction on tensorflow
# in one dimension seems to be lazy.
merged = pandas.concat(dfs)
name = "reducesumsquare"
merged.to_csv("plot_%s.csv" % name, index=False)
merged.to_excel("plot_%s.xlsx" % name, index=False)
plt.savefig("plot_%s.png" % name)
plt.show()
| [
37811,
198,
492,
4808,
75,
12,
65,
12,
445,
26873,
5700,
421,
533,
25,
198,
198,
7293,
3565,
25504,
286,
44048,
13065,
48011,
198,
10052,
2559,
18604,
198,
198,
1212,
1672,
23008,
262,
1635,
77,
32152,
9,
329,
262,
10088,
1635,
7738,
... | 2.961875 | 1,259 |
from flask import Flask
from flask_restful import Resource, Api
from flask_jwt import JWT
from flask_admin import Admin
from resources.user import UserRegister
from authentication.security import authenticate, identity
import os
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'rohan'
api = Api(app)
admin = Admin(app)
jwt = JWT(app, authenticate, identity)
@app.route('/')
api.add_resource(UserRegister,'/register')
if __name__ == '__main__':
from db import db
from flask_admin.contrib.sqla import ModelView
from models.user import UserModel
db.init_app(app)
admin.add_view(ModelView(UserModel, db.session))
app.run() | [
6738,
42903,
1330,
46947,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
198,
6738,
42903,
62,
73,
46569,
1330,
449,
39386,
198,
6738,
42903,
62,
28482,
1330,
32053,
198,
6738,
4133,
13,
7220,
1330,
11787,
38804,
198,
6738,... | 2.921569 | 255 |
# -*- coding: utf-8 -*-
"""
Created 5 March 2019
epsc_peak_x.y.z.py
"""
# from __main__ import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import math
import platform
''' ################## Define file structure on server #################### '''
# home_dir will depend on the OS, but the rest will not
# query machine identity and set home_dir from there
machine = platform.uname()[0]
if machine == 'Darwin':
home_dir = '/Volumes/Urban'
elif machine == 'Linux':
home_dir = '/run/user/1000/gvfs/smb-share:server=130.49.237.41,share=urban'
elif machine == 'Windows':
home_dir = os.path.join('N:', os.sep, 'urban')
else:
print("OS not recognized. \nPlease see Nate for correction.")
project_dir = os.path.join(home_dir, 'Huang', 'OSN_OMPvGg8_MTC')
figure_dir = os.path.join(project_dir, 'figures')
table_dir = os.path.join(project_dir, 'tables')
data_dir = os.path.join(project_dir, 'data')
''' ## Open the notes spreadsheet and parse for what we want to analyze ## '''
# open metadata file
data_notes = pd.read_csv(os.path.join(table_dir, 'OSN_Gg8vOMP.csv'))
# pull out cell_id for directory, file name, and make the full path
file_name_list = data_notes['Cell name'].tolist()
cell_id_list = []
for file in file_name_list:
file_split = file.split('_')
cell_id = file_split[0]+'_'+file_split[1]
cell_id_list.append(cell_id)
file_path_list = []
for cell, file in zip(cell_id_list, file_name_list):
file_path = os.path.join(cell, file + '.ibw')
file_path_list.append(file_path)
data_notes = pd.concat([pd.DataFrame({'File Path': file_path_list}), data_notes], axis=1)
data_notes.to_csv(os.path.join(table_dir, 'data_notes_test.csv'))
# drop cells that didn't save to igor
noigor_list = np.array(data_notes[data_notes['Igor saved?'] == 'No'].index)
data_notes = data_notes.drop(noigor_list)
# drop cells that don't have any # of drug sweeps
nodrug_list = np.array(data_notes[data_notes['# of drug sweeps'].isnull() == True].index)
data_notes = data_notes.drop(nodrug_list)
# update file name list to have only files you want to analyze after logic
file_name_list = data_notes['Cell name'].tolist()
data_notes.to_csv(os.path.join(table_dir, 'analyzed_data_notes_test.csv'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
642,
2805,
13130,
198,
538,
1416,
62,
36729,
62,
87,
13,
88,
13,
89,
13,
9078,
198,
198,
37811,
198,
2,
422,
11593,
12417,
834,
1330,
1635,
198,
1... | 2.688525 | 854 |
import numpy as np
| [
11748,
299,
32152,
355,
45941,
628,
198
] | 3 | 7 |
"""
Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
Try solving it using constant additional space.
Example :
Input :
______
| |
\/ |
1 -> 2 -> 3 -> 4
Return the node corresponding to node 3.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# @param A : head node of linked list
# @return the first node in the cycle in the linked list
| [
37811,
198,
15056,
257,
6692,
1351,
11,
1441,
262,
10139,
810,
262,
6772,
6140,
13,
1002,
612,
318,
645,
6772,
11,
1441,
9242,
13,
198,
198,
23433,
18120,
340,
1262,
6937,
3224,
2272,
13,
198,
198,
16281,
1058,
198,
198,
20560,
1058,
... | 2.429825 | 228 |
#########################################################################################################
# Description: Main file for telecom_churn dataset. Key function is to transform dataset into needed
# input_features and output
#########################################################################################################
import os
import time
import logging
import logging.config
# Imports for various models (Turn on as needed)
from sklearn.ensemble import RandomForestClassifier as RandomForest
# from sklearn.ensemble import BaggingClassifier as Bagging
# from sklearn.svm import SVC as SVC # Support vector machines
# from sklearn.neighbors import KNeighborsClassifier as KNN
# from sklearn.linear_model import LogisticRegression as LogReg
# from sklearn.linear_model import RidgeClassifier as Ridge
# from sknn.mlp import Classifier as NeuralNetClassifier, Layer as NeuralNetLayer
from sklearn.ensemble import GradientBoostingClassifier as GradBoost
from sklearn.preprocessing import StandardScaler
# Import Python libs
import pandas as pd
import numpy as np
# Import from within project
import support_functions as sf
import ensemble_models
import visualization
#########################################################################################################
# Global variables
__author__ = "DataCentric1"
__pass__ = 1
__fail__ = 0
#########################################################################################################
# Setup logging
logging.config.fileConfig('logging.conf')
logger = logging.getLogger("info")
#########################################################################################################
####################################################################################################
if __name__ == "__main__":
start_time = time.time()
# Create precision_recall-curve?
prec_recall_plot = True
# Choose models for the ensemble. Uncomment to choose model needed
estimator_model0 = RandomForest
estimator_keywords_model0 = dict(n_estimators=1000, verbose=0, criterion='entropy', n_jobs=-1,
max_features=5, class_weight='auto')
estimator_model1 = GradBoost
estimator_keywords_model1 = dict(n_estimators=1000, loss='deviance', learning_rate=0.01, verbose=0, max_depth=5,
subsample=1.0)
model_names_list = dict(model0=estimator_model0, model1=estimator_model1)
model_parameters_list = dict(model0=estimator_keywords_model0, model1=estimator_keywords_model1)
[input_features, output] = telecom_churn(use_synthetic_data=False, feature_scaling=True)
# ensemble_models.majority_voting(input_features, output, model_names_list, model_parameters_list,
# run_cv_flag=True, num_model_iterations=1, plot_learning_curve=False,
# run_prob_predictions=True, classification_threshold=0.45)
if prec_recall_plot:
# Divide 0 and 0.9 by 21 equally distributed values (including both).
# Ignoring 1.0 as it has Fbeta_score of 0
num_of_thresholds = np.linspace(0, 0.9, 21)
threshold = np.zeros((len(num_of_thresholds), 1), dtype=float)
precision = np.zeros((len(num_of_thresholds), 1), dtype=float)
recall = np.zeros((len(num_of_thresholds), 1), dtype=float)
fbeta_score = np.zeros((len(num_of_thresholds), 1), dtype=float)
idx = 0
for classification_threshold in num_of_thresholds:
prec_recall = ensemble_models.average_prob(input_features, output, model_names_list, model_parameters_list,
run_cv_flag=False, num_model_iterations=1,
plot_learning_curve=False, run_prob_predictions=True,
classification_threshold=classification_threshold)
threshold[idx] = classification_threshold
precision[idx] = round(prec_recall[0] * 100) # Convert to %
recall[idx] = round(prec_recall[1] * 100)
fbeta_score[idx] = round(prec_recall[2] * 100)
idx += 1
# Call function for plotting
vis = visualization.Plots()
vis.basic_2d_plot(x=threshold, y=(precision, recall, fbeta_score),
legends=("Precision", "Recall", "Fbeta_score (beta=2)"),
title="Precision Recall Curve", xaxis_label="Classification Threshold",
yaxis_label="Score %")
##################################
# Other model
# estimator = SVC
# estimator_keywords = dict(C=1, kernel='rbf', class_weight='auto')
# estimator_model2 = LogReg
# estimator_keywords_model2 = dict(solver='liblinear')
# Neural network
# estimator = NeuralNetClassifier
# estimator_keywords = dict(layers=[NeuralNetLayer("Rectifier", units=64), NeuralNetLayer("Rectifier", units=32),
# NeuralNetLayer("Softmax")],
# learning_rate=0.001, n_iter=50)
##################################
print("Total time: %0.3f" % float(time.time() - start_time))
| [
29113,
29113,
29113,
7804,
2,
198,
2,
220,
12489,
25,
8774,
2393,
329,
30452,
62,
354,
700,
27039,
13,
7383,
2163,
318,
284,
6121,
27039,
656,
2622,
198,
2,
220,
5128,
62,
40890,
290,
5072,
198,
29113,
29113,
29113,
7804,
2,
198,
11... | 2.662967 | 1,982 |
import uuid
import pytest
from sanic import Sanic
from sanic.response import json
from sanic.websocket import WebSocketProtocol
from sanic_jwt_extended.decorators import refresh_jwt_required
from sanic_jwt_extended.jwt_manager import JWT
from tests.utils import DunnoValue
@pytest.fixture
@pytest.fixture
| [
11748,
334,
27112,
198,
198,
11748,
12972,
9288,
198,
6738,
5336,
291,
1330,
2986,
291,
198,
6738,
5336,
291,
13,
26209,
1330,
33918,
198,
6738,
5336,
291,
13,
732,
1443,
5459,
1330,
5313,
39105,
19703,
4668,
198,
198,
6738,
5336,
291,
... | 3.038835 | 103 |
#
from z3c.autoinclude.dependency import package_includes
| [
2,
198,
6738,
1976,
18,
66,
13,
23736,
17256,
13,
45841,
1387,
1330,
5301,
62,
42813,
198
] | 3.411765 | 17 |
import pytest
from django.db.migrations.state import ProjectState
from django_test_migrations.migrator import Migrator
@pytest.mark.django_db()
def test_migrator(transactional_db):
"""We only need this test for coverage."""
migrator = Migrator()
old_state = migrator.apply_initial_migration(('main_app', None))
new_state = migrator.apply_tested_migration(('main_app', '0001_initial'))
assert isinstance(old_state, ProjectState)
assert isinstance(new_state, ProjectState)
assert migrator.reset() is None
@pytest.mark.django_db()
def test_migrator_list(transactional_db):
"""We only need this test for coverage."""
migrator = Migrator()
old_state = migrator.apply_initial_migration([('main_app', None)])
new_state = migrator.apply_tested_migration([('main_app', '0001_initial')])
assert isinstance(old_state, ProjectState)
assert isinstance(new_state, ProjectState)
assert migrator.reset() is None
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
9945,
13,
76,
3692,
602,
13,
5219,
1330,
4935,
9012,
198,
198,
6738,
42625,
14208,
62,
9288,
62,
76,
3692,
602,
13,
76,
3692,
1352,
1330,
337,
3692,
1352,
628,
198,
31,
9078,
9288,
13... | 2.845697 | 337 |
from lox.ast_printer import AstPrinter
from lox.lox import Lox
from lox.parser import Parser, ParseError
from lox.scanner import Scanner
from lox import expressions
from lox import tokens
| [
6738,
300,
1140,
13,
459,
62,
1050,
3849,
1330,
8304,
6836,
3849,
198,
6738,
300,
1140,
13,
75,
1140,
1330,
406,
1140,
198,
6738,
300,
1140,
13,
48610,
1330,
23042,
263,
11,
2547,
325,
12331,
198,
6738,
300,
1140,
13,
35836,
1008,
1... | 3.241379 | 58 |
import unittest
from numpy.testing import assert_allclose
import warnings
from hazma.parameters import omega_h2_cdm
from hazma.relic_density import relic_density
import unittest
warnings.filterwarnings("ignore")
| [
11748,
555,
715,
395,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
439,
19836,
198,
11748,
14601,
198,
6738,
11595,
2611,
13,
17143,
7307,
1330,
37615,
62,
71,
17,
62,
10210,
76,
198,
6738,
11595,
2611,
13,
260,
677,
62,
43337,
... | 3.359375 | 64 |
import functools
import threading
import logging
import requests
import datetime
from requests.models import HTTPBasicAuth
logger = logging.getLogger(__name__)
def thread_it(func):
"""A wrapper function to run func in a daemon thread.
Args:
func (function): The function to run in a thread
Returns:
function: the wrapped function.
"""
@functools.wraps(func)
return wrapper
def send_update(category, message, update_service_data):
"""Sends message (application updates) to an http endpoint
"""
if not update_service_data:
logger.warning(
"updateCallback in instruction is not configured properly. Request not sent to update webhook"
)
return
data = {"time": datetime.datetime.now(), "category": category, "message": message}
url = update_service_data.get('url', "")
custom_headers = update_service_data.get('customHeaders', {})
username = update_service_data.get('username', "")
password = update_service_data.get('password', "")
try:
if username:
requests.post(url,
data=data,
headers=custom_headers,
auth=HTTPBasicAuth(username, password))
else:
requests.post(url, data=data, headers=custom_headers)
except Exception as e:
logger.error(e) | [
11748,
1257,
310,
10141,
198,
11748,
4704,
278,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
4818,
8079,
198,
198,
6738,
7007,
13,
27530,
1330,
14626,
26416,
30515,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
36... | 2.549451 | 546 |
from urllib2 import Request, urlopen, URLError, HTTPError
#samplefirst()
samplesecond() | [
6738,
2956,
297,
571,
17,
1330,
19390,
11,
19016,
9654,
11,
37902,
2538,
81,
1472,
11,
14626,
12331,
628,
628,
628,
198,
198,
2,
39873,
11085,
3419,
198,
82,
12629,
721,
623,
3419
] | 2.848485 | 33 |
# Contact: sara.ferreira <at> fc [dot] up [dot] pt
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import jarray
import json
import inspect
import os
import ast
import re
import subprocess
import shutil
from subprocess import Popen, PIPE
from java.lang import System
from java.util.logging import Level
from java.io import File
from org.sleuthkit.datamodel import SleuthkitCase
from org.sleuthkit.datamodel import AbstractFile
from org.sleuthkit.datamodel import ReadContentInputStream
from org.sleuthkit.datamodel import BlackboardArtifact
from org.sleuthkit.datamodel import BlackboardAttribute
from org.sleuthkit.autopsy.datamodel import ContentUtils
from org.sleuthkit.autopsy.ingest import IngestModule
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.autopsy.ingest import DataSourceIngestModule
from org.sleuthkit.autopsy.ingest import FileIngestModule
from org.sleuthkit.autopsy.ingest import IngestModuleFactoryAdapter
from org.sleuthkit.autopsy.ingest import IngestMessage
from org.sleuthkit.autopsy.ingest import IngestServices
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import Services
from org.sleuthkit.autopsy.casemodule.services import FileManager
from org.sleuthkit.autopsy.casemodule.services import Blackboard
from org.sleuthkit.autopsy.datamodel import ContentUtils
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
# Data Source-level ingest module. One gets created per data source.
# TODO: Rename this to something more specific. Could just remove "Factory" from above name.
| [
2,
14039,
25,
264,
3301,
13,
2232,
260,
8704,
1279,
265,
29,
277,
66,
685,
26518,
60,
510,
685,
26518,
60,
42975,
198,
2,
198,
2,
770,
318,
1479,
290,
555,
12685,
26584,
3788,
2716,
656,
262,
1171,
7386,
13,
198,
2,
198,
2,
1746... | 3.584367 | 806 |
"""
Copyright 2018 Globo.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2
from mock import patch
from globomap_driver_napi.kind import Kind
from tests.util import open_json
if __name__ == '__main__':
unittest2.main()
| [
37811,
198,
220,
220,
15069,
2864,
2671,
20391,
13,
785,
628,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
1... | 3.483871 | 217 |
# -*- coding: utf-8 -*-
import os
import unittest
from openregistry.lots.core.tests.base import BaseWebTest, snitch
from openregistry.lots.core.tests.blanks.mixins import ResourceTestMixin
from openregistry.lots.loki.tests.base import (
BaseLotWebTest
)
from openregistry.lots.loki.tests.json_data import test_loki_lot_data
from openregistry.lots.loki.tests.blanks.lot_blanks import (
dateModified_resource,
# LotResourceTest
change_draft_lot,
change_dissolved_lot,
check_lot_assets,
rectificationPeriod_workflow,
check_decisions,
change_pending_lot,
change_composing_lot,
change_verification_lot,
change_pending_deleted_lot,
change_deleted_lot,
change_pending_dissolution_lot,
change_active_salable_lot,
change_active_auction_lot,
change_active_contracting_lot,
change_sold_lot,
change_pending_sold_lot,
auction_autocreation,
check_change_to_verification,
# LotTest
simple_add_lot,
simple_patch
)
from openregistry.lots.loki.models import Lot
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
555,
715,
395,
198,
198,
6738,
1280,
2301,
4592,
13,
75,
1747,
13,
7295,
13,
41989,
13,
8692,
1330,
7308,
13908,
14402,
11,
3013,
2007,
198,
... | 2.510158 | 443 |
print('Bem vindo ao analisador de nomes')
print('--------------------------------')
Nom = input('Qual seu nome? ')
print('________________________________')
print('Seu nome em maiusculo ficaria {}'.format(Nom.upper()))
print('Seu nome em minusculo ficaria {}'.format(Nom.lower()))
Nomd = Nom.split()
print('Seu primeiro nome tem {} letras'.format(len(Nomd[0])))
print('E seu nome tem ao todo {} letras'.format(len("".join(Nomd))))
| [
4798,
10786,
33,
368,
29178,
78,
257,
78,
2037,
271,
7079,
390,
299,
2586,
11537,
198,
4798,
10786,
3880,
11537,
198,
45,
296,
796,
5128,
10786,
46181,
384,
84,
299,
462,
30,
705,
8,
198,
4798,
10786,
10221,
11537,
198,
4798,
10786,
... | 2.835526 | 152 |
# -*- coding: utf-8 -*-
from .api import *
from . import ui, util
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
764,
15042,
1330,
1635,
198,
6738,
764,
1330,
334,
72,
11,
7736,
198
] | 2.310345 | 29 |
import json
import plotly
import pandas as pd
from flask import Flask
from flask import render_template, request, jsonify
from plotly.graph_objs import Bar
from sqlalchemy import create_engine
import joblib
import plotly.graph_objs as go_
from flask import current_app, flash, jsonify, make_response, redirect, request, url_for
import folium
from folium import plugins
from folium.plugins import HeatMap
#Import custom class and functions:
import sys
sys.path.append("./customized_class")
from report import create_missing_report
from create_choropleth import create_choropleth
from create_geodf import construct_geodf
#from load_map import load_source_map
# load data for missing report
engine = create_engine('sqlite:///../data/PropertiesPrices.db')
df = pd.read_sql_table('Cleaned_prices', engine)
options = list(df['property_type'].unique())
list_features = ["rooms", "bedrooms","bathrooms","surface_total","surface_covered","lat","lon"]
types_for_maps = ['Casa', 'Apartamento']
types_for_errors =['Casa', 'Apartamento']
#Create a base map for index page:
obj_map = folium.Map(location=[5.170035, -74.914305], tiles='cartodbpositron', zoom_start=6)
data = df.head(1000)
data = data[(data['missing_lon']==0) & (data['missing_lat']==0)]
HeatMap(data=data[['lat', 'lon']], radius=10).add_to(obj_map)
obj_map.save("templates/map.html")
#base_map = load_source_map(obj_map)
# load model
#model = joblib.load("../models/classifier.pkl")
#model = model.best_estimator_
#Star flask app
app = Flask(__name__)
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
# web page that handles user query and displays model results
@app.route('/ajax_add', methods = ['POST','GET'])
@app.route('/ajax_add_2', methods = ['POST','GET'])
@app.route('/ajax_add_3', methods = ['POST','GET'])
@app.route('/map')
@app.route('/Choropleth_map')
@app.route('/Choropleth_map_errors')
@app.route('/')
if __name__ == '__main__':
main() | [
11748,
33918,
198,
11748,
7110,
306,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
42903,
1330,
46947,
198,
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
11,
33918,
1958,
198,
6738,
7110,
306,
13,
34960,
62,
672,
8457,
1330,
2409,
... | 2.912281 | 684 |
""" -*- coding: utf-8 -*-
@author: omerkocadayi
https://github.com/omerkocadayi
https://www.linkedin.com/in/omerkocadayi/ """
import cv2
import time
vid = cv2.VideoCapture("cars.mp4")
carCascade = cv2.CascadeClassifier("haarcascade/car1.xml")
crd = [[620,220],[740,220],[580,500],[815,500]]
distance, t1, t2, speed, p_frame, n_frame, cnt = 0.025, 0, 0, "", 0, 0, 0
while True:
ret, frame = vid.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cars = carCascade.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors=3,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE)
cv2.line(frame, (crd[0][0],crd[0][1]),(crd[1][0],crd[1][1]),(0,0,255), 2)
cv2.line(frame, (crd[0][0],crd[0][1]),(crd[2][0],crd[2][1]),(0,0,255), 2)
cv2.line(frame, (crd[2][0],crd[2][1]),(crd[3][0],crd[3][1]),(0,0,255), 2)
cv2.line(frame, (crd[1][0],crd[1][1]),(crd[3][0],crd[3][1]),(0,0,255), 2)
for (a,b,c,d) in cars:
cx, cy = int(a+c/2), int(b+d/2)
if(cx >= crd[2][0] and cx <= crd[3][0] and cy >= (crd[2][1]-10) and cy <= (crd[3][1]+5)):
cv2.circle(frame,(cx,cy),4,(0,255,255),-1)
cv2.line(frame, (crd[2][0],crd[2][1]),(crd[3][0],crd[3][1]),(0,255,0), 2)
t1 = time.time()
if(cx >= crd[0][0] and cx <= crd[1][0] and cy >= (crd[0][1]-10) and cy <= (crd[1][1]+5)):
cv2.circle(frame,(cx,cy),4,(0,255,255),-1)
cv2.line(frame, (crd[0][0],crd[0][1]),(crd[1][0],crd[1][1]),(0,255,0), 2)
t2 = time.time()
fps = cnt/(t2-t1)
if t2-t1 > 0:
sp = distance/((t2-t1)/3600)
if sp * (30/fps) < 250:
if sp * (30/fps) > 50 :
speed = str(sp* (30/fps))
else:
speed = str(sp)
cnt = 0
break;
if speed != "" : cv2.putText(frame,""+str(speed[:5])+"km/h",(crd[0][0]-20,crd[0][1]-20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,255),1,cv2.LINE_AA)
cv2.imshow("Frame", frame)
cnt+=1
if cv2.waitKey(2) & 0xFF == ord('q'):
break
vid.release()
cv2.destroyAllWindows() | [
37811,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
220,
220,
220,
2488,
9800,
25,
267,
647,
74,
420,
43593,
72,
220,
201,
198,
220,
220,
220,
3740,
1378,
12567,
13,
785,
14,
296,
9587,
420,
43593,
72,
201,
1... | 1.571522 | 1,524 |
# Copyright 2021 Yan Yan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pccm
import torch
import torch.nn.functional as F
from cumm import dtypes
from cumm import tensorview as tv
from cumm.constants import PACKAGE_ROOT
from cumm.conv.bases import NCHW, NHWC, ConvIterAlgo, ConvOpType
from cumm.conv.main import ConvMainUnitTest, gen_gemm_kernels
from cumm.conv.params import ConvProblem
from cumm.gemm import kernel
from cumm.gemm.constants import NVRTCConstants, NVRTCMode
from cumm.nvrtc import CummNVRTCModule, get_cudadevrt_path
os.environ["CUMM_DEBUG"] = "1"
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
if __name__ == "__main__":
_asdv_test_simt_python_v2()
| [
2,
15069,
33448,
10642,
10642,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
73... | 3.133489 | 427 |
"""Command-line interfaces for AROMA."""
| [
37811,
21575,
12,
1370,
20314,
329,
5923,
2662,
32,
526,
15931,
198
] | 3.416667 | 12 |
"""
Given a number sequence, find the increasing subsequence with the highest sum.
Write a method that returns the highest sum.
Example 1:
Input: {4,1,2,6,10,1,12}
Output: 32
Explanation: The increaseing sequence is {4,6,10,12}.
Please note the difference, as the LIS is {1,2,6,10,12} which has a sum of '31'.
Example 2:
Input: {-4,10,3,7,15}
Output: 25
Explanation: The increaseing sequences are {10, 15} and {3,7,15}.
"""
# Time: O(N^2) Space: O(N)
main()
| [
628,
198,
37811,
198,
15056,
257,
1271,
8379,
11,
1064,
262,
3649,
6399,
594,
351,
262,
4511,
2160,
13,
220,
198,
16594,
257,
2446,
326,
5860,
262,
4511,
2160,
13,
198,
198,
16281,
352,
25,
198,
198,
20560,
25,
1391,
19,
11,
16,
1... | 2.61326 | 181 |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Offline training binary."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import gin
import tensorflow as tf0
import tensorflow.compat.v1 as tf
from behavior_regularized_offline_rl.brac import agents
from behavior_regularized_offline_rl.brac import train_eval_offline
from behavior_regularized_offline_rl.brac import utils
import d4rl
tf0.compat.v1.enable_v2_behavior()
# Flags for offline training.
flags.DEFINE_string('root_dir',
os.path.join(os.getenv('HOME', '/'), 'tmp/offlinerl/learn'),
'Root directory for writing logs/summaries/checkpoints.')
flags.DEFINE_string('sub_dir', 'auto', '')
#flags.DEFINE_string('agent_name', 'brac_primal', 'agent name.')
flags.DEFINE_string('env_name', 'halfcheetah-random-v0', 'env name.')
flags.DEFINE_integer('seed', 0, 'random seed, mainly for training samples.')
flags.DEFINE_integer('total_train_steps', int(5e5), '')
flags.DEFINE_integer('n_eval_episodes', 20, '')
flags.DEFINE_integer('n_train', int(1e6), '')
flags.DEFINE_integer('value_penalty', 0, '')
flags.DEFINE_integer('save_freq', 1000, '')
flags.DEFINE_float('alpha', 1.0, '')
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
FLAGS = flags.FLAGS
if __name__ == '__main__':
app.run(main)
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
15069,
12131,
383,
3012,
4992,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 3.002857 | 700 |
import jieba
from wk import string_utils as TU | [
11748,
474,
494,
7012,
198,
6738,
266,
74,
1330,
4731,
62,
26791,
355,
309,
52
] | 3.066667 | 15 |
#!/usr/bin/env python
# -*-coding:utf-8-*-
# @Time : 2017/11/1 ~ 2019/9/1
# @Author : Allen Woo
from apps.core.flask.login_manager import osr_login_required
from apps.core.blueprint import api
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.message.process.send_msg import send_msg
@api.route('/admin/message/send', methods=['POST'])
@osr_login_required
@permission_required()
def api_adm_send_msg():
"""
POST
发送消息
title:<title>,标题
content:<str>,正文
content_html:<str>,正文html
send_type:<array>,发送类型on_site, email, sms . 如:["email"], 也可以同时发送多个个["email", "on_site"]
username:<array>, 接收信息的用户名, 如["test", "test2"]
:return:
"""
data = send_msg()
return response_format(data)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
66,
7656,
25,
40477,
12,
23,
12,
9,
12,
198,
2,
2488,
7575,
1058,
2177,
14,
1157,
14,
16,
5299,
13130,
14,
24,
14,
16,
198,
2,
2488,
13838,
1058,
9659,
39832,
19... | 2.085859 | 396 |
# ==================
#
# nosecone_threaded_bases.py
#
# License: https://github.com/summerswallow/open-rocketry/blob/master/LICENSE
# (c) 2018 Summer Swallow Consulting
#
# ==================
from math import sqrt, atan, acos, cos, sin, fabs, pi
import os
from solid import *
from solid.utils import up, down, left, forward
from .nosecone import NoseCone, FunctionBasedNoseCone
from misc import utils
MM2IN = 25.4
class CylindricalNoseCone(NoseCone):
"""
This is good for fit testing not really intended for actual nose cone
"""
if __name__ == '__main__':
from bodytubes.semroc import bt20
from bodytubes.semroc import bt5
array = utils.array(4, MM2IN, [
InvertedTangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
EllipticalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
ConicalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125, mid_diameter=.3),
BiconicNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125, mid_diameter=.3),
ParabolicNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
HaackSeriesNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
PowerSeriesNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
BluntedConicalNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
TangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
BluntedTangentOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3),
SecantOgiveNoseCone(0.75, bodytube=bt5, thickness=1 / 16.0, base_height=0.25, blunt_radius=0.125,
mid_diameter=.3)])
utils.render_to_file(array, "examples/standard_nosecones.scad")
| [
2,
36658,
28,
198,
2,
198,
2,
9686,
49180,
62,
16663,
276,
62,
65,
1386,
13,
9078,
198,
2,
198,
2,
220,
220,
13789,
25,
3740,
1378,
12567,
13,
785,
14,
16345,
11056,
11930,
322,
14,
9654,
12,
10823,
11973,
14,
2436,
672,
14,
986... | 2.011676 | 1,199 |
import numpy as np
x = np.random.randn(20) | [
11748,
299,
32152,
355,
45941,
198,
198,
87,
796,
45941,
13,
25120,
13,
25192,
77,
7,
1238,
8
] | 2.388889 | 18 |
from .input import Input
from .switch import Switch
from .delayed_switch import DelayedSwitch
from .joystick import Joystick, Directions
from .linear_actuator import LinearActuator
| [
6738,
764,
15414,
1330,
23412,
198,
6738,
764,
31943,
1330,
14645,
198,
6738,
764,
12381,
16548,
62,
31943,
1330,
4216,
16548,
38978,
198,
6738,
764,
2633,
13915,
1330,
14087,
13915,
11,
47426,
198,
6738,
764,
29127,
62,
529,
84,
1352,
... | 3.934783 | 46 |
import numpy as np
import math
from .util import add_column
from . import global_val as gv
| [
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
198,
6738,
764,
22602,
1330,
751,
62,
28665,
198,
6738,
764,
1330,
3298,
62,
2100,
355,
308,
85,
628
] | 3.321429 | 28 |
# Third Party Module Import
from flask_restplus import Api
api = Api()
| [
2,
10467,
3615,
19937,
17267,
198,
6738,
42903,
62,
2118,
9541,
1330,
5949,
72,
628,
198,
15042,
796,
5949,
72,
3419,
198
] | 3.318182 | 22 |
""" Collection of functions to transform popular datasets into torch_dataset Datasets """
import os
import tqdm
import json
from scipy.io import loadmat
from .detection_dataset import DetectionDataset
from .siamese_dataset import SiameseDataset
def convert_coco_to_detection_dataset(coco_ann_file, root_image_dir, no_crowd=False):
""" Converts a coco annotation file to a detection dataset (which can be saved with save_dataset)
Args
coco_ann_file : The annotation file eg. 'XXX/instances_train2017.json'
root_image_dir : The folder storing all images eg. 'XXX/train2017/'
no_crowd : Flag to switch if crowd object should be included
Returns
DetectionDataset object containing coco data
"""
# Load coco data
with open(coco_ann_file, 'r') as f:
print('Loading coco annotation file')
coco_data = json.load(f)
# Create empty dataset object
dataset = DetectionDataset(root_dir=root_image_dir)
# Set classes
# Also create link for original class id to class name
print('Setting classes')
orig_label_to_name = {}
all_class_names = []
for category in coco_data['categories']:
orig_label_to_name[category['id']] = category['name']
all_class_names.append(category['name'])
dataset.set_classes(all_class_names)
# Set images
for image in tqdm.tqdm(coco_data['images'], desc='Setting images'):
dataset.set_image(
image_path=image['file_name'],
image_url=image['coco_url'],
image_id=image['id'],
height=image['height'],
width=image['width']
)
# Set annotations
for ann in tqdm.tqdm(coco_data['annotations'], desc='Setting annotations'):
# Convert bbox to x1, y1, x2, y2
bbox = ann['bbox']
bbox[2] += bbox[0]
bbox[3] += bbox[1]
if no_crowd and ann['iscrowd'] == 1:
continue
import warnings
warnings.filterwarnings('error')
try:
dataset.set_ann(
image_id=ann['image_id'],
bbox=bbox,
class_name=orig_label_to_name[ann['category_id']],
segmentation=ann['segmentation']
)
except:
continue
return dataset
def convert_wider_to_detection_dataset(
wider_mat_file,
root_image_dir,
allowed_blur_labels=[0,1,2],
allowed_expression_labels=[0,1],
allowed_illumination_labels=[0,1],
allowed_occlusion_labels=[0,1,2],
allowed_pose_labels=[0,1]
):
""" Converts a wider annotation file to a detection dataset (which can be saved with save_dataset)
Args
wider_mat_file : The annotation file eg. 'XXX/wider_face_val.mat'
root_image_dir : The folder storing all image folders, 'The directory with 0--Prade ...'
Returns
DetectionDataset object containing wider data
"""
# Load wider data
wider_data = loadmat(wider_mat_file)
# Create empty dataset object
dataset = DetectionDataset(root_dir=root_image_dir)
# Set classes even though there are only 1 class
dataset.set_classes(['person_face'])
# Set image and annotations one image at a time
# Loop through events
for event_id, event in enumerate(wider_data['event_list']):
event_name = str(event[0][0])
event_files = wider_data['file_list'][event_id,0]
event_bboxes = wider_data['face_bbx_list'][event_id,0]
event_blur_labels = wider_data['blur_label_list'][event_id,0]
event_expression_labels = wider_data['expression_label_list'][event_id,0]
event_illumination_labels = wider_data['illumination_label_list'][event_id,0]
event_occlusion_labels = wider_data['occlusion_label_list'][event_id,0]
event_pose_labels = wider_data['pose_label_list'][event_id,0]
event_invalid_labels = wider_data['invalid_label_list'][event_id,0]
pbar = tqdm.tqdm(total=len(event_files), desc='Setting {}'.format(event_name))
# Loop through each image
for file_id, file_name in enumerate(event_files):
pbar.update(1)
# Save image
file_name = str(file_name[0][0])
image_info = dataset.set_image(image_path=os.path.join(event_name, file_name) + '.jpg')
image_id = image_info['id']
bboxes = event_bboxes[file_id,0]
blur_labels = event_blur_labels[file_id,0]
expression_labels = event_expression_labels[file_id,0]
illumination_labels = event_illumination_labels[file_id,0]
occlusion_labels = event_occlusion_labels[file_id,0]
pose_labels = event_pose_labels[file_id,0]
invalid_labels = event_invalid_labels[file_id,0]
# Loop though each annotation
for i in range(len(bboxes)):
# do checks
if not (
(blur_labels[i,0] in allowed_blur_labels) and
(expression_labels[i,0] in allowed_expression_labels) and
(illumination_labels[i,0] in allowed_illumination_labels) and
(occlusion_labels[i,0] in allowed_occlusion_labels) and
(pose_labels[i,0] in allowed_pose_labels) and
(invalid_labels[i,0] == 0)
):
continue
bbox = bboxes[i].copy().tolist()
bbox[3] += bbox[1]
bbox[2] += bbox[0]
import warnings
warnings.filterwarnings('error')
try:
dataset.set_ann(
image_id=image_id,
bbox=bbox,
class_name='person_face'
)
except:
continue
pbar.close()
return dataset
def convert_lfw_to_siamese_dataset(people_txt_file, root_image_dir):
""" Converts a lfw people file to a siamese dataset (which can be saved with save_dataset)
Args
people_txt_file : The annotation file eg. 'XXX/peopleDevTrain.txt'
root_image_dir : The folder storing all image folders, 'The directory with Aaron_Eckhart ...'
Returns
SiameseDataset object containing lfw data
"""
# Load lfw data
with open(people_txt_file, 'r') as f:
lfw_data = f.readlines()[1:]
# Create empty dataset object
dataset = SiameseDataset(root_dir=root_image_dir)
# Set images one person at a time
for person_info in tqdm.tqdm(lfw_data, desc='Setting each person'):
person_name = person_info.split('\t')[0]
person_folder = os.path.join(root_image_dir, person_name)
for image_file in os.listdir(person_folder):
dataset.set_image(
image_path=os.path.join(person_name, image_file),
class_name=person_name
)
return dataset
def convert_webface_to_siamese_dataset(root_image_dir):
""" Converts a CASIA-WebFace image set to a siamese dataset (which can be saved with save_dataset)
Args
root_image_dir : The folder storing all image folders, 'The directory with 0000045/ ...'
Returns
SiameseDataset object containing CASIA-WebFace data
"""
# Create empty dataset object
dataset = SiameseDataset(root_dir=root_image_dir)
# Set images one person at a time
all_person_folders = os.listdir(root_image_dir)
for person_folder in tqdm.tqdm(all_person_folders, desc='Setting each person'):
person_name = str(person_folder)
for image_file in os.listdir(os.path.join(root_image_dir, person_folder)):
dataset.set_image(
image_path=os.path.join(person_folder, image_file),
class_name=person_name
)
return dataset
| [
37811,
12251,
286,
5499,
284,
6121,
2968,
40522,
656,
28034,
62,
19608,
292,
316,
16092,
292,
1039,
37227,
198,
11748,
28686,
198,
11748,
256,
80,
36020,
198,
198,
11748,
33918,
198,
6738,
629,
541,
88,
13,
952,
1330,
3440,
6759,
198,
... | 2.163304 | 3,668 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 11 15:49:12 2019
@author: jgoldstein
inspace toy problem
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import periodogram
from inspace.interpolation import get_target_times, sinc_interpolation
from inspace.gp_george import gp_estimate, plot_gp_stuff
# using time units of days
YEAR = 365.25
# fig2 = plt.figure()
# ax = fig2.add_subplot(121)
# ax.plot(freqs, ft_true, 'b-', label='true signal')
# ax.plot(freqs, ft_gp_nonoise, 'm-.', label='gp no noise')
# ax.plot(freqs, ft_gp_wnoise, 'k--', label='gp w/ noise')
# ymin, ymax = ax.get_ylim()
# ax.vlines(fGW, ymin, ymax, color='g', zorder=0, label='fGW')
# ax.set_ylim(ymin, ymax)
# ax.set_xlabel('frequency')
# ax.legend(loc='best')
# ax.set_title('rfft')
#
# ax2 = fig2.add_subplot(122)
# ax2.set_yscale('log')
# ft_true_sq = ft_true * np.conj(ft_true)
# ft_gp_nonoise_sq = ft_gp_nonoise * np.conj(ft_gp_nonoise)
# ft_gp_wnoise_sq = ft_gp_wnoise * np.conj(ft_gp_wnoise)
# ax2.plot(freqs, ft_true_sq, 'b-', label='true signal')
# ax2.plot(freqs, ft_gp_nonoise_sq, 'm-.', label='gp no noise')
# ax2.plot(freqs, ft_gp_wnoise_sq, 'k--', label='gp w/ noise')
# ymin, ymax = ax2.get_ylim()
# ax2.vlines(fGW, ymin, ymax, color='g', zorder=0, label='fGW')
# new_ymin = min(ft_true_sq[1:]) / 10
# ax2.set_ylim(new_ymin, ymax)
# ax2.set_xlabel('frequency')
# ax2.legend(loc='best')
# ax2.set_title('$|\mathrm{rfft}|^2$')
#
# fig2.tight_layout()
#
# fig1.savefig('/home/jgoldstein/Documents/GP_fig1.pdf')
# fig2.savefig('/home/jgoldstein/Documents/GP_fig2.pdf')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
2365,
1367,
1315,
25,
2920,
25,
1065,
13130,
198,
198,
31,
9800,
25,
474,
24267,
5... | 2.05119 | 840 |
import sys
import logging
import gym
import h5py
from es_distributed import tf_util
from es_distributed.policies import policies
from es_distributed.atari_wrappers import wrap_deepmind
from .common import RunningStat, SharedNoiseTable
log = logging.getLogger(__name__)
| [
11748,
25064,
198,
11748,
18931,
198,
198,
11748,
11550,
198,
11748,
289,
20,
9078,
198,
198,
6738,
1658,
62,
17080,
6169,
1330,
48700,
62,
22602,
198,
6738,
1658,
62,
17080,
6169,
13,
79,
4160,
444,
1330,
4788,
198,
6738,
1658,
62,
1... | 3.4 | 80 |
import torch.nn as nn
import ltr.models.backbone as backbones
import ltr.models.bbreg as bbmodels
from ltr import model_constructor
class ATOMnet(nn.Module):
""" ATOM network module"""
def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True):
"""
args:
feature_extractor - backbone feature extractor
bb_regressor - IoU prediction module
bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to
bb_regressor
extractor_grad - Bool indicating whether backbone feature extractor requires gradients
"""
super(ATOMnet, self).__init__()
self.feature_extractor = feature_extractor
self.bb_regressor = bb_regressor
self.bb_regressor_layer = bb_regressor_layer
if not extractor_grad:
for p in self.feature_extractor.parameters():
p.requires_grad_(False)
def forward(self, train_imgs, test_imgs, train_bb, test_proposals):
""" Forward pass
Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension
corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]
"""
num_sequences = train_imgs.shape[-4]
num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1
num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1
# Extract backbone features
train_feat = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:]))
test_feat = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]))
train_feat_iou = [feat for feat in train_feat.values()]
test_feat_iou = [feat for feat in test_feat.values()]
# Obtain iou prediction
iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou,
train_bb.view(num_train_images, num_sequences, 4),
test_proposals.view(num_train_images, num_sequences, -1, 4))
return iou_pred
@model_constructor
@model_constructor
| [
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
300,
2213,
13,
27530,
13,
1891,
15992,
355,
736,
35095,
198,
11748,
300,
2213,
13,
27530,
13,
11848,
2301,
355,
275,
65,
27530,
198,
6738,
300,
2213,
1330,
2746,
62,
41571,
273,
628,
... | 2.300714 | 981 |
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(100)
p = 0.98 # スパース度
n = 100 # 行列のサイズ
l = [0]*int(p*100) + [1]*int((1-p)*100)
if __name__ == "__main__":
main()
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
198,
37659,
13,
25120,
13,
28826,
7,
3064,
8,
198,
79,
796,
657,
13,
4089,
220,
220,
220,
1303,
220,
8943,
32546,
6312,
8943,
41753,
9... | 1.819048 | 105 |
"""XManager/XCloud launcher for both GPU and TPU jobs.
The launcher works with any Python binary with the following flags:
* `output_dir` is the directory for saving summaries and logs;
* `use_gpu` determines whether to run on GPU or otherwise TPU;
* `num_cores` is the number of TPU cores or GPUs;
* `tpu` is the TPU main address (flag not required if launching with GPU);
* `seed` is the experiment's random seed.
For binaries that support only certain accelerator settings, we recommend still
using these flags. Raise errors outside its support or rely on runtime errors.
"""
import collections
import functools
import importlib.util
import inspect
import json
import operator
import os
import random
import time
import asyncio
from typing import Any, Dict, List, Optional, Text
from absl import app
from absl import flags
from absl import logging
from ml_collections.config_dict import config_dict
from xmanager import xm
from xmanager import xm_local
from xmanager.cloud import vertex
# Binary flags
flags.DEFINE_string(
'binary', None,
'Filepath to Python script to run. For external GCS experiments, it can be '
'an absolute path to the binary, or a relative one with respect to the '
'current folder.')
flags.mark_flag_as_required('binary')
flags.DEFINE_list(
'args', [], 'Flag arguments to pass to binary. Follow the format '
'--args=batch_size=64,train_epochs=300.')
flags.DEFINE_string(
'config', None, 'Filepath to Python file with a function '
'get_sweep(hyper) returning a hyperparameter sweep and/or '
'a function get_config() returning a ConfigDict.')
flags.DEFINE_bool('launch_on_gcp', True, 'Whether or not to launch on GCS.')
# Accelerator flags
flags.DEFINE_string('platform', None, 'Platform (e.g., tpu-v2, tpu-v3, gpu).')
flags.DEFINE_string(
'tpu_topology', '2x2',
'TPU topology. Only used if platform is TPU. {x}x{y} means x*x **chips**, '
'and because the number of devices is the number of cores, we further '
'multiply by 2 because there are 2 cores per chip. For example, 2x2 is '
'equivalent to an 8 core TPU slice, 8x8 = 128 cores, etc.')
flags.DEFINE_string('gpu_type', 'p100',
'GPU type. Only used if platform is GPU.')
flags.DEFINE_integer('num_gpus', None,
'Number of GPUs. Only used if platform is GPU.')
flags.DEFINE_integer('num_cpus', None, 'Number of CPUs.')
flags.DEFINE_integer('num_workers', 1, 'Number of workers (including chief)'
'in cluster.')
flags.DEFINE_integer(
'memory', None, 'Amount of CPU memory in GB. Only used if launching on '
'GCP.')
flags.DEFINE_string('experiment_name', None,
'Experiment name; defaults to timestamp.')
flags.DEFINE_integer('num_runs', 1,
'Number of runs each with a different seed.')
FLAGS = flags.FLAGS
_JobMetadata = collections.namedtuple('_JobMetadata', [
'platform_str',
'num_workers',
'gpu_type',
'num_gpus',
'tpu_topology',
'num_cpus',
'experiment_name',
'memory',
])
def _get_attr(config, name: str) -> Optional[Any]:
"""Get a given attribute from the passed FLAGS or ConfigDict."""
# Note that if a flag is passed with its default value, this will not override
# a conflicting config value.
has_flag_value = name in FLAGS and FLAGS[name].value != FLAGS[name].default
if has_flag_value:
return FLAGS[name].value
elif config and name in config:
return config[name]
elif name in FLAGS:
return FLAGS[name].default
return None
def _build_binary_metadata(config):
"""Extracts job metadata and args from the given ConfigDict and/or FLAGS."""
if config:
flag_args = config.args
experiment_name = _get_attr(config, 'experiment_name')
else:
flag_args = dict(arg.split('=', 1) for arg in FLAGS.args)
experiment_name = FLAGS.experiment_name
if not experiment_name: # default experiment name
experiment_name = time.strftime('%m%d_%H%M%S')
metadata = _JobMetadata(
platform_str=_get_attr(config, 'platform'),
num_workers=_get_attr(config, 'num_workers'),
gpu_type=_get_attr(config, 'gpu_type'),
num_gpus=_get_attr(config, 'num_gpus'),
tpu_topology=_get_attr(config, 'tpu_topology'),
num_cpus=_get_attr(config, 'num_cpus'),
memory=_get_attr(config, 'memory'),
experiment_name=experiment_name,
)
use_gpu = 'gpu' in metadata.platform_str or metadata.platform_str == 'cpu'
if metadata.platform_str == 'cpu':
num_cores = 1
elif 'gpu' in metadata.platform_str:
num_cores = metadata.num_gpus
else:
num_cores = 2 * functools.reduce(
operator.mul, [int(i) for i in metadata.tpu_topology.split('x')])
if 'num_cores' in flag_args and flag_args['num_cores'] != num_cores:
raise ValueError(
'"num_cores" requested in binary incompatible with inferred number of '
'cores based on tpu_topology and platform_str ({}!={} respectively)'
.format(flag_args['num_cores'], num_cores))
args = dict(num_cores=num_cores, use_gpu=use_gpu)
args.update(flag_args)
return args, metadata
def _split_path_to_ub(filepath):
"""For a path '/a/b/c/baselines/...', return '/a/b/c', 'baselines/...'."""
filepath = os.path.abspath(filepath)
pieces = filepath.split('/')
library_index = None
for pi, piece in enumerate(pieces):
if piece == 'qhbm-library':
library_index = pi + 1
break
if library_index is None:
raise ValueError(
'Unable to parse FLAGS.binary ({}) to find the location of the qhbm-library.'.format(filepath))
library_dir = '/'.join(pieces[:library_index])
project_dir = '/'.join(pieces[library_index:-1])
binary_path = '/'.join(pieces[-1:])
return library_dir, project_dir, binary_path
def _launch_gcp_experiment(library_dir, project_dir, binary_path, sweep, args, metadata):
"""Launch a job on GCP using the Cloud AI Platform."""
with xm_local.create_experiment(metadata.experiment_name) as experiment:
# Note that we normally would need to append a "$@" in order to properly
# forward the args passed to the job into the python command, but the XM
# library already does this for us.
run_cmd = f'python {binary_path} $@'
# These images are necessary to get tf-nightly pre-installed.
# Our lazy loading `__getattr__ = _lazy_import` in `__init__.py` requires
# at least Python 3.7, so we use a base image that has Python 3.7.
if metadata.platform_str == 'gpu':
# base_image = 'tensorflow/tensorflow:nightly-gpu'
base_image = 'gcr.io/deeplearning-platform-release/tf2-gpu.2-7'
else:
# base_image = 'tensorflow/tensorflow:nightly'
base_image = 'gcr.io/deeplearning-platform-release/tf2-cpu.2-7'
pip_cmd = 'pip --no-cache-dir install'
# spec = xm.Dockerfile(path=library_dir)
spec = xm.PythonContainer(
path=library_dir,
base_image=base_image,
entrypoint=xm.CommandList([run_cmd]),
docker_instructions=[
f'COPY {os.path.basename(library_dir)} qhbm-library',
f'WORKDIR qhbm-library',
'RUN curl -sSL https://install.python-poetry.org | python - --preview',
'RUN export PATH="/root/.local/bin:$PATH"',
'RUN /root/.local/bin/poetry config virtualenvs.create false && /root/.local/bin/poetry install --no-interaction --no-ansi',
f'RUN {pip_cmd} ml_collections',
f'WORKDIR {project_dir}',
],
)
[executable] = experiment.package([
xm.Packageable(
executable_spec=spec,
executor_spec=xm_local.Vertex.Spec(),
),
])
platform = {}
if 'tpu' in metadata.platform_str:
# To run on a tpu-v2-8, tpu_topology should be 2x2.
pieces = map(int, metadata.tpu_topology.split('x'))
num_tpus = pieces[0] * pieces[1] * 2 # 2 cores per TPU chip.
platform = {metadata.platform_str.split('-')[-1]: num_tpus}
elif metadata.platform_str == 'gpu':
platform = {metadata.gpu_type: metadata.num_gpus}
if metadata.num_cpus is not None:
platform['cpu'] = metadata.num_cpus * xm.vCPU
if metadata.memory is not None:
platform['memory'] = metadata.memory * xm.GiB
bucket = os.environ.get('GOOGLE_CLOUD_BUCKET_NAME')
tensorboard = vertex.get_default_client().get_or_create_tensorboard(metadata.experiment_name)
tensorboard = asyncio.get_event_loop().run_until_complete(tensorboard)
# Create one job per setting in the hyperparameter sweep. The default case
# is a length 1 sweep with a single argument name "seed".
for ji, sweep_args in enumerate(sweep):
job_args = args.copy()
if 'output_dir' in job_args:
job_args['output_dir'] = os.path.join(bucket, job_args['output_dir'], str(experiment.experiment_id), str(ji))
if 'data_dir' in job_args and job_args.get('download_data', False):
job_args['data_dir'] = os.path.join(bucket, job_args['data_dir'], str(experiment.experiment_id), str(ji))
# Overwrite any values in `args` with the `sweep_args`.
job_args.update(sweep_args)
tensorboard_capability = xm_local.TensorboardCapability(name=tensorboard, base_output_directory=job_args['output_dir'])
job_requirements = xm.JobRequirements(**platform)
executor = xm_local.Vertex(requirements=job_requirements, tensorboard=tensorboard_capability)
logging.info('Launching job %d/%d with args %s.\n', ji + 1, len(sweep),
json.dumps(job_args, indent=4, sort_keys=True))
job = xm.Job(
executable=executable,
executor=executor,
args=job_args,
)
experiment.add(job)
def _generate_hyperparameter_sweep(
config_module) -> List[Dict[Text, Any]]:
"""Generate the hyperparameter sweep."""
if FLAGS.config and 'get_sweep' in dir(config_module):
if FLAGS.num_runs != 1:
raise ValueError('FLAGS.num_runs not supported with config.get_sweep().')
sweep = config_module.get_sweep()
else:
sweep = [{
'seed': seed + random.randint(0, 1e10)
} for seed in range(FLAGS.num_runs)]
return sweep
def _load_config_helper(config_path, launch_on_gcp):
"""Get the ConfigDict from config_path:get_config()."""
config_module_spec = importlib.util.spec_from_file_location(
'', os.path.abspath(config_path))
config_module = importlib.util.module_from_spec(config_module_spec)
config_module_spec.loader.exec_module(config_module)
config = None
if 'get_config' in dir(config_module):
# Check if get_config takes a parameter called launch_on_gcp, and if so then
# pass in FLAGS.launch_on_gcp.
get_config_inspect = inspect.getfullargspec(config_module.get_config)
get_config_params = get_config_inspect.args
if 'launch_on_gcp' in get_config_params:
config = config_module.get_config(launch_on_gcp=launch_on_gcp)
else:
config = config_module.get_config()
return config_module, config
def _load_config(config_path, launch_on_gcp):
"""Load the ConfigDict if one was passed in as FLAGS.config."""
if config_path:
config_module = None
if not config_module:
config_module, config = _load_config_helper(config_path, launch_on_gcp)
else:
config_module = None
config = None
return config_module, config
if __name__ == '__main__':
app.run(main)
| [
37811,
55,
13511,
14,
55,
18839,
24008,
329,
1111,
11362,
290,
309,
5105,
3946,
13,
198,
198,
464,
24008,
2499,
351,
597,
11361,
13934,
351,
262,
1708,
9701,
25,
198,
198,
9,
4600,
22915,
62,
15908,
63,
318,
262,
8619,
329,
8914,
30... | 2.600595 | 4,369 |
import os
import platform
try:
from importlib.metadata import version as _version_func # type: ignore
except ImportError:
# Python 3.7 and lower
from importlib_metadata import version as _version_func # type: ignore
__all__ = ("name", "version", "node", "environment")
name = __package__
version = _version_func(__package__)
node = platform.node()
environment = os.environ.get("APP_ENVIRONMENT", "dev")
| [
11748,
28686,
198,
11748,
3859,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
1330,
8019,
13,
38993,
1330,
2196,
355,
4808,
9641,
62,
20786,
220,
1303,
2099,
25,
8856,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
1303,
11361,
513... | 3.157895 | 133 |
from . import parsed_email
import mailbox
from email.utils import parseaddr
if __name__ == '__main__':
for mail in check_emails():
print(mail.getall())
| [
6738,
764,
1330,
44267,
62,
12888,
198,
11748,
37282,
198,
6738,
3053,
13,
26791,
1330,
21136,
29851,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
1640,
6920,
287,
2198,
62,
368,
1768,
33529,
198,
... | 3.098039 | 51 |
#!/usr/bin/env python
#coding=utf8
from scipy.io import wavfile
import argparse
import numpy as np
import pygame
import sys
import warnings
import Queue
myQueue = Queue.Queue()
#music=[0,0,0,0,0,0]
#music=[1,2,3,4,5,6,7,8,8,7,6,5,4,3,2,1]
#music=[-2,1,3,5,1,0,3,5,5,6,7,8,6,6,5,5,3,2,1,1,1,3,2,1,1,1,2,3,2,1,-1,2,3,2]
music=[1,1,5,5,6,6,5,4,4,3,3,2,2,1,5,5,4,4,3,3,2,5,5,4,4,3,3,2,1,1,5,5,6,6,5,4,4,3,3,2,2,1]#小星星
#music=[5,3,3,4,2,2,1,2,3,4,5,5,5,5,3,3,4,2,2,1,3,5,3,3,3,3,3,3,3,4,5,4,4,4,4,4,5,6,5,3,3,4,2,2,1,3,5,3,1]
for i in music:
myQueue.put(i*2+24)#差一位差半个音
'''
后续:长音处理,长音不能拼接(基准是开头音强),需要拉长
增加乐谱,用queue
增加提示?提示长音短音
声音的音质
'''
def speedx(snd_array, factor):#通过删除点和重复采样点改变频率,同时改变长度 factor加快的系数 越高声音修正的越不对
""" Speeds up / slows down a sound, by some factor. """
indices = np.round(np.arange(0, len(snd_array), factor)) #得到一个数列
#print(factor)
indices = indices[indices < len(snd_array)].astype(int) #通过布尔数列截取
return snd_array[indices] #返回np队列
def stretch(snd_array, factor, window_size, h):#变速 改变速度的同时保持音频 这个好像用起来不太对劲
""" Stretches/shortens a sound, by some factor. """
phase = np.zeros(window_size)
hanning_window = np.hanning(window_size)
result = np.zeros(int(len(snd_array)/ factor) + window_size)
for i in np.arange(0, len(snd_array) - (window_size + h), h*factor):
# Two potentially overlapping subarrays
i=int(i)
a1 = snd_array[i: i + window_size]
a2 = snd_array[i + h: i + window_size + h]
# The spectra of these arrays
s1 = np.fft.fft(hanning_window * a1)
s2 = np.fft.fft(hanning_window * a2)
# Rephase all frequencies
phase = (phase + np.angle(s2/s1)) % 2*np.pi
a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase))
i2 = int(i/factor)
result[i2: i2 + window_size] += hanning_window*a2_rephased.real
# normalize (16bit)
result = ((2**(16-4)) * result/result.max())
return result.astype('int16')
def pitchshift(snd_array, n, window_size=2**13, h=2**11):#整合以上两功能实现变频率的同时保持长度
""" Changes the pitch of a sound by ``n`` semitones. """
factor = 2**(1.0 * n / 12.0) #要将音高提高n个半音的话,我们需要将频率乘上系数2^(n/12) **和pow一个效果
stretched = stretch(snd_array, 1.0/factor, window_size, h)
return speedx(stretched[window_size:], factor) #反正最后获得的是一串等长的numpy数列
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Goodbye')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
66,
7656,
28,
40477,
23,
198,
6738,
629,
541,
88,
13,
952,
1330,
266,
615,
7753,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
6057,
198,
11748,
... | 1.510129 | 1,629 |
import pytest
from mock import Mock, patch
from service import poll_lobby
valid_query_string_params = {
"queryStringParameters": {
"PlayerID": "Player2",
"LobbyID": "Lobby1"}
}
valid_get_lobby = {
"GameID": "0",
"Players": [
{"PlayerID": "Player1", "PlayerName": "Bob", "PlayerSideSelection": 0, "PlayerState": 0},
{"PlayerID": "Player2", "PlayerName": "Joe", "PlayerSideSelection": 1, "PlayerState": 0}
]
}
not_in_get_lobby = {
"GameID": "0",
"Players": [
{"PlayerID": "Player1", "PlayerName": "Bob", "PlayerSideSelection": 0, "PlayerState": 0},
{"PlayerID": "Player3", "PlayerName": "Tom", "PlayerSideSelection": 1, "PlayerState": 0}
]
}
game_started_get_lobby = {
"GameID": "Game1",
"Players": [
{"PlayerID": "Player1", "PlayerName": "Bob", "PlayerSideSelection": 0, "PlayerState": 0},
{"PlayerID": "Player2", "PlayerName": "Joe", "PlayerSideSelection": 1, "PlayerState": 0}
]
}
@patch('service.lobby_service_common.get_lobby')
@patch('service.lobby_service_common.get_lobby')
@patch('service.lobby_service_common.get_lobby')
@patch('service.lobby_service_common.get_lobby')
| [
11748,
12972,
9288,
198,
6738,
15290,
1330,
44123,
11,
8529,
198,
198,
6738,
2139,
1330,
3278,
62,
75,
11369,
198,
198,
12102,
62,
22766,
62,
8841,
62,
37266,
796,
1391,
198,
220,
220,
220,
366,
22766,
10100,
48944,
1298,
1391,
198,
2... | 2.44877 | 488 |
import pytest
import aiopoke
@pytest.fixture
| [
11748,
12972,
9288,
198,
198,
11748,
257,
14922,
2088,
628,
198,
31,
9078,
9288,
13,
69,
9602,
198
] | 2.666667 | 18 |
from django import forms
from django.contrib.formtools.wizard.storage import BaseStorage
from hyperadmin.resources.wizard import Wizard, FormStep, MultiPartStep
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
687,
31391,
13,
86,
8669,
13,
35350,
1330,
7308,
31425,
198,
198,
6738,
8718,
28482,
13,
37540,
13,
86,
8669,
1330,
16884,
11,
5178,
8600,
11,
15237,
7841,
86... | 3.790698 | 43 |
import json
import hashlib
import re
import os
import random
import math
from urllib.parse import urlencode
from functools import wraps
from django.db.models import Avg, Q, Count, F
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404
from django.contrib import messages
from django.urls import reverse
from rest_framework.renderers import JSONRenderer
from soso.models import *
from SJTUsoso.utils import *
from SJTUsoso import settings
from soso import models as sosomodels
from blog.models import Wechat, Video, User, Rate, VideoComments, MessageBoard, CollectBoard, BoardComment
from . import models
from .forms import *
from .icf import ItemBasedCF
from .recom_friend import *
# Create your views here.
# def reset(req):
# return render(req, 'reset.html')
| [
11748,
33918,
198,
11748,
12234,
8019,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
10688,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
2956,
11925,
8189,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
198,
6738,
42625... | 3.409594 | 271 |
import unittest
from nmj.cleaners import MovieCleaner, TVShowCleaner
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
28642,
73,
13,
27773,
364,
1330,
15875,
32657,
263,
11,
3195,
15307,
32657,
263,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
403,
715,
395,
13,
12417,
3419,
198
] | 2.761905 | 42 |
#!/usr/bin/python
# login.cgi
import cgi
if __name__ == '__main__':
try:
formData = cgi.FieldStorage()
message = formData.getvalue('message','')
htmlTop()
if message == "wrong_password":
htmlMid("No such account or the password is wrong, please try again.")
elif message == "register_success":
htmlMid("Registration success, please login.")
else:
htmlMid(message)
htmlTail()
except:
cgi.print_exception()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
17594,
13,
37157,
198,
11748,
269,
12397,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1949,
25,
198,
220,
220,
220,
220,
220,
220,
220,
1296,
... | 2.274336 | 226 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ZoneLockdownArgs', 'ZoneLockdown']
@pulumi.input_type
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.408759 | 137 |
from flask import Flask, request
from flask_basicauth import BasicAuth
from werkzeug.wrappers import Response
from intents import controller
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
import json
import os
# Configuration information for the application
app = Flask(__name__)
#app.debug = True
app.config['BASIC_AUTH_USERNAME'] = 'cisconlp'
app.config['BASIC_AUTH_PASSWORD'] = 'thisisanamazingpasswordfortheserverthatwehave52394023409u-,,-,-.-'
basic_auth = BasicAuth(app)
# Listen for events
@app.route("/", methods=["POST"])
@basic_auth.required
def listen():
'''Main route that receives all requests from API.AI/Spark for processing'''
received = json.loads(request.data)
resp_item = controller.fetch_response(
sessionId=received['sessionId'],
parameters=received['result']['parameters'],
contexts=received['result']['contexts'],
resolvedQuery=received['result']['resolvedQuery'],
intentId=received['result']['metadata']['intentId'],
intentName=received['result']['metadata']['intentName']
)
resp_text = resp_item['text']
resp_markdown = resp_item['markdown']
file = resp_item['file']
if file == None:
post = {
"toPersonId": None,
"roomId": None,
"text": resp_text,
"markdown": resp_markdown,
}
else:
post = {
"toPersonId": None,
"roomId": None,
"text": resp_text,
"markdown": resp_markdown,
"files": ('My Result', file, 'image/jpg')
}
#Checks if the communication was with a single user or with a room
if received['originalRequest']['data']['data']['roomType'] == 'direct':
post['toPersonId'] = received['originalRequest']['data']['data']['personId']
del post['roomId']
else:
post['roomId'] = received['originalRequest']['data']['data']['roomId']
del post['toPersonId']
m = MultipartEncoder(post)
headers = {
'content-type': m.content_type,
'authorization': 'Bearer YWYzNDVkY2MtMDg5MC00MjRlLTk2MzYtMzFkZjllYTBjODBiYWFlYTc2OTItYmM3'
}
requests.post('https://api.ciscospark.com/v1/messages', headers=headers, verify=False, data=m).json()
resp = Response(json.dumps({
'speech': '',
'displayText': ''
}))
resp.headers['Content-Type'] = 'application/json'
return resp
if __name__ == '__main__':
app.run(port=5000) | [
6738,
42903,
1330,
46947,
11,
2581,
198,
6738,
42903,
62,
12093,
3970,
1071,
1330,
14392,
30515,
198,
6738,
266,
9587,
2736,
1018,
13,
29988,
11799,
1330,
18261,
198,
6738,
493,
658,
1330,
10444,
198,
11748,
7007,
198,
6738,
7007,
62,
2... | 2.398849 | 1,043 |
"""
Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.
https://leetcode.com/static/images/problemset/histogram.png
Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].
https://leetcode.com/static/images/problemset/histogram_area.png
The largest rectangle is shown in the shaded area, which has area = 10 unit.
Example:
Input: [2,1,5,6,2,3]
Output: 10
"""
| [
37811,
198,
15056,
299,
1729,
12,
31591,
37014,
10200,
262,
1554,
21857,
338,
2318,
6001,
810,
262,
9647,
286,
1123,
2318,
318,
352,
11,
1064,
262,
1989,
286,
4387,
35991,
287,
262,
1554,
21857,
13,
198,
198,
5450,
1378,
293,
316,
818... | 3.10625 | 160 |
# -*- coding: utf-8 -*-
import os
import numpy as np
import urllib.request
from flask import Flask, render_template, request, redirect, url_for, send_from_directory, session
from werkzeug.utils import secure_filename
from PIL import Image
import cv2
import io
from keras import models
from keras.models import load_model
# 初回はVGG16 訓練済みモデル(540MB)をダウンロードするために50分ほど時間がかかる
# 訓練済みモデルの保存場所 カレントディレクトリの中の /.keras に作られる.
# /.keras/model/vgg16_weights_tf_dim_ordering_tf_kernels.h5
#from keras.applications.vgg16 import VGG16
#from keras.applications.vgg16 import preprocess_input, decode_predictions
#from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
#import keras.preprocessing.image as Image
from keras.applications.mobilenet import MobileNet, preprocess_input, decode_predictions
from keras.layers import Input
app = Flask(__name__)
UPLOAD_FOLDER = './uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'PNG', 'JPG'])
IMAGE_WIDTH = 640
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = os.urandom(24)
@app.route('/')
@app.route('/send', methods=['GET', 'POST'])
@app.route('/uploads/<filename>')
if __name__ == '__main__':
app.debug = True
app.run()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
11,
18941,
11,
190... | 2.34542 | 524 |
# -*- coding: UTF-8 -*-
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc.exe')
else:
# otherwise, search the PATH for NVCC
nvcc = find_in_path('nvcc.exe', os.environ['PATH'])
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib', 'x64')}
for k, v in iter(cudaconfig.items()):
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
# run the customize_compiler
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"utils.cython_nms",
["utils/nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.cpp'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
'-fPIC']},
include_dirs = [numpy_include, CUDA['include']]
)
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
) | [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
20368,
22369,
198,
2,
12549,
371,
12,
18474,
198,
2,
15069,
357,
66,
8,
1853,
5413,
198,
2,
49962,
739,
383,
17168,
13789,
685,
3826,
38559,
24290,
329,
3307,
60,
198,... | 2.253341 | 1,646 |
#!/usr/bin/env python3
"""
Monitor GPIO pins ("lines") using the "new" way (libgpiod).
Thus it depends on python3-gpiod being installed:
https://git.kernel.org/pub/scm/libs/libgpiod/libgpiod.git
"""
import contextlib
import logging
import sys
import time
from typing import Dict, List, Callable, Tuple, Optional, Iterator
# pylint: disable=import-error
import gpiod # type: ignore
logger: logging.Logger = logging.getLogger(__name__)
# configure time [ms] over which a new signal has to be stable before
# a change in state is assumed
# after which time to check the state [ms]
DEBOUNCE_CHECK_INTERVAL: int = 5
# how long has a change to "active" to be stable [ms]
DEBOUNCE_ACTIVE_INTERVAL: int = 10
# how long has a change to "inactive" to be stable [ms]
DEBOUNCE_INACTIVE_INTERVAL: int = 100
# while recieving a stable "active" signal send `on_active` in regular intervals
ACTIVE_PULSES = False
# interval for pulses [ms]
ACTIVE_PULSE_INTERVAL: int = 500
class GPIOPin:
# pylint: disable=too-many-instance-attributes
"""Class to hold data associated with each registered pin.
Holds:
* the current state (this will only change after debouncing the
signal)
* the state of the countdown
* a list of callback functions to be called on a change to active /
inactive
Attributes:
_num: The number of the pin.
_active: Is the pin in active state?
_countdown: This is activated on raw pin state change and
decreases with every tick. If it reaches zero the state is
asssumed to be stable.
_countup: This counts up as soon as an active signal is stable.
This is used to trigger callbacks in `on_long_active`.
on_active: Functions to call on state change to active.
on_inactive: Functions to call on state change to inactive.
_on_long_active: Functions to call if the state has been
active for X ms.
_stack: A working copy of `on_long_active` where all called
callbacks are popped off.
"""
# save some space by using slots
__slots__ = ('_num', '_active', '_countdown', '_countup', 'on_active',
'on_inactive', 'on_long_active', '_stack')
active_interval: int = DEBOUNCE_ACTIVE_INTERVAL
inactive_interval: int = DEBOUNCE_INACTIVE_INTERVAL
check_interval: int = DEBOUNCE_CHECK_INTERVAL
active_pulses: bool = ACTIVE_PULSES
active_pulse_interval: int = ACTIVE_PULSE_INTERVAL
def __init__(self, num: int) -> None:
"""Initialise the accessible variables.
Arguments:
num: The number of the pin.
"""
self._num: int = num
# key is initially assumed to be not pressed
self._active: bool = False
# the countdown to accept a signal as "pressed"
self._countdown: int = GPIOPin.active_interval
# the countup to accept a signal as "long_pressed"
self._countup: int = 0
self.on_active: List[Callable[[int], None]] = []
self.on_inactive: List[Callable[[int], None]] = []
# list of tuples: (milliseconds, callback)
self.on_long_active: List[Tuple[int, Callable[[int], None]]] = []
# working copy of on_long_active
self._stack: List[Tuple[int, Callable[[int], None]]] = []
def set_state(self, active: bool) -> None:
"""This function is called once the signal has stably changed.
Attributes:
active: Is the state "active"?
"""
logger.debug('pin: %s, state: %s', self._num, active)
self._active = active
if active:
for callback in self.on_active:
callback(self._num)
else:
for callback in self.on_inactive:
callback(self._num)
def is_active(self) -> bool:
"""Is the pin active?
Returns:
Is the stable state of the pin "active"?
"""
return self._active
def reset_countdown(self) -> None:
"""Reset the countdown for a signal to be stable.
The length of the interval before a signal is considered stable
depends on the state. React faster for changes to "active", the
user might not be patient.
"""
if self._active:
self._countdown = GPIOPin.inactive_interval
else:
self._countdown = GPIOPin.active_interval
def tick(self, raw_active: bool) -> None:
"""Debounce a change to active / inactive.
This function is called every DEBOUNCE_CHECK_INTERVAL
milliseconds.
If the raw state of a pin differs from its known state this
function tries to determine if it's a real change or just
noise:
A countdown is started and with every check that holds the new
state the count is decreased.
If the count reaches 0 the new state is accepted. If a the old
state is detected inbetween the countdown is reset and starts
again if a new state is detected.
Example for DEBOUNCE_CHECK_INTERVAL = 5 ms and
DEBOUNCE_ACTIVE_INTERVAL = 15 ms
Time [ms]: 0 5 10 15 20 25 30 35
Check: 1 2 3 4 5 6 7 8
Signal: 0 1 1 0 1 1 1 1
^ ^ ^ ^ ^ ^ ^
| | | | | | |
| | | | | | no change -> do nothing
| | | | | signal stable -> count reaches 0
| | | | | -> emit event
| | | | signal stable -> count decreases
| | | countdown starts
| | signal does not seem stable -> reset
| signal stable -> count decreased
countdown starts
Adaption of: https://my.eng.utah.edu/~cs5780/debouncing.pdf
Arguments:
raw_state: The state as read from the pin ("line").
"""
if raw_active == self._active:
# state does not differ from the last accepted state
# so reset the countdown
self.reset_countdown()
# if the state is active
if self._active:
# count up
self._countup += GPIOPin.check_interval
to_pop: List = []
for i, (fire_after, callback) in enumerate(self._stack):
if self._countup >= fire_after:
# fire callback
callback(self._num)
# remove it from the list of available callbacks
to_pop.append(i)
else:
# break loop
# the list is sorted by the length
# all following items will need an even larger value of
# countup
break
# remove fired callbacks
for i in to_pop:
self._stack.pop(i)
# if we are on multiples of `active_pulse_interval`
if (self.active_pulses
and self._countup % self.active_pulse_interval):
# send a pulse
for callback in self.on_active:
callback(self._num)
else:
# state is not the last accepted state
# so decrease the count by DEBOUNCE_CHECK_INTERVAL
self._countdown -= GPIOPin.check_interval
if self._countdown == 0:
# signal seems stable
# accept the new state
self.set_state(raw_active)
# and prepare the countdown for the next change
self.reset_countdown()
# if the new state is active
if self._active:
# create a working copy
self._stack = self.on_long_active.copy()
# and reset countup
self._countup = 0
class GPIODMonitor:
"""Eventemitter using libgpiod and debouncing the raw signal.
For the debouncing algorithm see:
See: https://my.eng.utah.edu/~cs5780/debouncing.pdf
Attributes:
_chip_number: The number of the chip with the pins.
_chip: The gpiod.Chip.
_pins: The pins by their number.
check_interval: The interval with which to check the pins'
state in milliseconds.
"""
def __init__(self,
chip_number: int = 0,
check_interval: int = DEBOUNCE_CHECK_INTERVAL,
active_interval: int = DEBOUNCE_ACTIVE_INTERVAL,
inactive_interval: int = DEBOUNCE_INACTIVE_INTERVAL,
active_pulses: bool = ACTIVE_PULSES,
active_pulse_interval: int = ACTIVE_PULSE_INTERVAL):
# pylint: disable=too-many-arguments
"""Set default values.
Arguments:
chip_number: The number of the gpio chip; 0 if in doubt.
check_interval: The interval with which to check the pins'
state in milliseconds.
active_interval: The interval it takes for a stable active
signal to trigger a change in state in milliseconds.
inactive_interval: The interval it takes for a stable
inactive signal to trigger a change in state in
milliseconds.
active_pulses: While recieving a stable "active" signal
send `on_active` in regular intervals.
active_pulse_interval: Interval for pulses in milliseconds.
"""
logger.debug('creating monitor on chip %s', chip_number)
self._chip_number = chip_number
self._chip: Optional[gpiod.Chip] = None
self._pins: Dict[int, GPIOPin] = {}
self.check_interval: int = check_interval
GPIOPin.check_interval = check_interval
GPIOPin.active_interval = active_interval
GPIOPin.inactive_interval = inactive_interval
GPIOPin.active_pulses = active_pulses
GPIOPin.active_pulse_interval = active_pulse_interval
def get_pins(self) -> Dict[int, GPIOPin]:
"""Return the pins.
Returns:
The pins mapped to their number.
"""
return self._pins
def is_raw_pin_active(self, pin: int) -> bool:
"""Is the raw state of the pin "active".
Arguments:
pin: Number of the pin.
Returns:
Is the pin ("line") active?
"""
if not self._chip:
raise IOError('Chip not opened.')
return bool(self._chip.get_line(pin).get_value())
def register(self,
pin,
on_active: Optional[Callable[[int], None]] = None,
on_inactive: Optional[Callable[[int], None]] = None) -> None:
"""Register a callback for a stable signal change on a pin.
If you want to have multiple callbacks for one event call this
function often as you like but don't hand it a list.
Arguments:
pin: The BCM-number of the pin.
on_active: Function to call if the state changes to active.
on_inactive: Function to call if the state changes to
inctive.
"""
if not pin in self._pins:
logger.debug('registering new pin %s', pin)
self._pins[pin] = GPIOPin(pin)
if on_active:
self._pins[pin].on_active.append(on_active)
if on_inactive:
self._pins[pin].on_inactive.append(on_inactive)
def register_long_active(self, pin: int, callback: Callable[[int], None],
seconds: int) -> None:
"""Register a callback for a long change to active.
Arguments:
pin: The BCM-number of the pin.
callback: Function to call if the state changes to active.
seconds: The time button needs to be pressed before
callback is fired.
"""
if not pin in self._pins:
logger.debug('registering new pin %s', pin)
self._pins[pin] = GPIOPin(pin)
self._pins[pin].on_long_active.append((seconds * 1000, callback))
# sort callbacks by the time the button needs to be pressed
self._pins[pin].on_long_active.sort(key=lambda x: x[0])
@contextlib.contextmanager
def open_chip(self) -> Iterator[gpiod.Chip]:
"""Opens the chip and requests the registered lines.
Yields:
The handle of the chip.
"""
self._chip = gpiod.Chip(f'gpiochip{self._chip_number}')
logger.debug('opened chip: %s', self._chip)
# pylint: disable=consider-iterating-dictionary
for i in self._pins.keys():
logger.debug('requesting line: %s',
self._chip.get_line(i).offset())
self._chip.get_line(i).request(
consumer="GPIODMonitor",
type=gpiod.LINE_REQ_DIR_IN,
flags=gpiod.LINE_REQ_FLAG_BIAS_PULL_UP
| gpiod.LINE_REQ_FLAG_ACTIVE_LOW)
yield self._chip
self._chip.close()
self._chip = None
def tick(self) -> None:
"""Check the state of all registered pins."""
if self._chip is None:
raise IOError('Chip not opened.')
for number, pin in self.get_pins().items():
pin.tick(self.is_raw_pin_active(number))
def monitor(self):
"""Monitor all registered pins ("lines") for a change in state."""
if not self._chip is None:
logger.error(
'chip has already been opend using the context manager')
return
with self.open_chip() as chip:
self._chip = chip
try:
logger.debug('starting the loop')
while True:
# check according to interval
time.sleep(self.check_interval / 1000)
self.tick()
except KeyboardInterrupt:
sys.exit(130)
self._chip = None
if __name__ == '__main__':
import argparse
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument("chip", help="the number of the chip", type=int)
parser.add_argument("pins",
help="the numbers of the pins to monitor",
type=int,
nargs='+')
parser.add_argument('-v',
'--verbosity',
help='increase verbosity',
action='count',
default=0)
args: argparse.Namespace = parser.parse_args()
verbosity = ['ERROR', 'WARNING', 'INFO', 'DEBUG']
root_logger: logging.Logger = logging.getLogger()
root_logger.setLevel(verbosity[args.verbosity])
def dummy_active(pin: int):
"""Dummy function."""
print(f'{pin} is active')
def dummy_inactive(pin: int):
"""Dummy function."""
print(f'{pin} is inactive')
def dummy_long_active(pin: int):
"""Dummy function."""
print(f'{pin} has been active for a long time')
monitor = GPIODMonitor(args.chip)
for gpio_pin in args.pins:
monitor.register(int(gpio_pin),
on_active=dummy_active,
on_inactive=dummy_inactive)
monitor.register_long_active(int(gpio_pin),
callback=dummy_long_active,
seconds=3)
with monitor.open_chip():
try:
while True:
# check according to interval
time.sleep(monitor.check_interval / 1000)
monitor.tick()
except KeyboardInterrupt:
sys.exit(130)
# or use (equivalent but you don't have controll over the loop):
# chip.monitor()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
35479,
50143,
20567,
5855,
6615,
4943,
1262,
262,
366,
3605,
1,
835,
357,
8019,
31197,
2101,
737,
198,
198,
19093,
340,
8338,
319,
21015,
18,
12,
31197,
2101,
852,
6589... | 2.164032 | 7,401 |
#!/usr/bin/env python3
"""
gen-attributions.py: a tool for generating various OSS attributions docs.
See tools/scripts/README.md for instructions on how to format license files.
usage:
gen-attributions.py BUILD_TYPE [OUTPUT_FILE]
where BUILD_TYPE is one of:
sphix -- generate an ReST file for the sphinx documentation
master -- generate a debian copyright file for determined-master
agent -- generate a debian copyright file for determined-agent
"""
import email
import os
import sys
from typing import IO, List, Optional
known_licenses = {
"apache2": "Apache-2.0",
"bsd2": "BSD 2-clause",
"bsd3": "BSD 3-clause",
"mit": "MIT",
"mozilla": "Mozilla Public License",
"unlicense": "Unlicense",
}
def sphinx_format_header(text: str, char: str) -> str:
"""
Example output:
*******
WebUI
*******
"""
return "\n".join(
[
char * (len(text) + 2),
f" {text}",
char * (len(text) + 2),
]
)
def gen_sphinx_table(licenses: List[License]) -> str:
"""
Example output:
.. list-table::
:header-rows: 1
* - Package
- License
* - gopkg.in/tomb.v1
- :ref:`BSD 3-clause <tomb>`
"""
lines = [
".. list-table::",
" :header-rows: 1",
"",
" * - Package",
" - License",
]
for license in licenses:
lines.append(f" * - {license.name}")
lines.append(f" - {license.sphinx_ref()}")
return "\n".join(lines)
sphinx_preamble = """
######################
Open Source Licenses
######################
The following sets forth attribution notices for third-party software
that may be contained in Determined. We thank the open-source community
for all of their contributions.
""".strip()
def build_sphinx(licenses: List[License]) -> str:
"""Build the sphinx-format attributions.txt with all attributions."""
paragraphs = [
sphinx_preamble,
sphinx_format_header("WebUI", "*"),
gen_sphinx_table([license for license in licenses if license.webui]),
sphinx_format_header("Determined Master", "*"),
gen_sphinx_table([license for license in licenses if license.master]),
sphinx_format_header("Determined Agent", "*"),
gen_sphinx_table([license for license in licenses if license.agent]),
]
for license in licenses:
paragraphs.append(license.sphinx_entry())
return "\n\n".join(paragraphs)
if __name__ == "__main__":
if len(sys.argv) not in (2, 3):
print(__doc__, file=sys.stderr)
sys.exit(1)
build_type = sys.argv[1]
path_out = sys.argv[2] if len(sys.argv) == 3 else None
sys.exit(main(build_type, path_out))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
37811,
198,
5235,
12,
1078,
2455,
507,
13,
9078,
25,
257,
2891,
329,
15453,
2972,
440,
5432,
24548,
507,
34165,
13,
198,
198,
6214,
4899,
14,
46521,
14,
15675,
11682,
13,
91... | 2.368598 | 1,191 |
import json
from ..ext.database import db
| [
11748,
33918,
201,
198,
6738,
11485,
2302,
13,
48806,
1330,
20613,
201,
198,
201,
198
] | 3.066667 | 15 |
import weakref
from re import sub
from typing import Any
from sqlalchemy import MetaData, Integer, Column, Sequence
from sqlalchemy.ext.declarative import declarative_base, declared_attr, DeclarativeMeta
def gen_tablenames(name: str) -> str:
""" Converts CamelCase class names to snake_case table names """
return sub(r"(?<!^)(?=[A-Z])", "_", name).lower()
Base = declarative_base(
cls=_declared_Base,
metadata=MetaData(
naming_convention={
"ix": "ix_%(column_0_label)s", # index
"uq": "uq_%(table_name)s_%(column_0_name)s", # unique constraint
"ck": "ck_%(table_name)s_%(constraint_name)s", # check constraint
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", # foreign key
"pk": "pk_%(table_name)s", # primary key
}
),
)
# noinspection PyMethodMayBeStatic
__all__ = ["gen_tablenames", "Base", "Registry"]
| [
11748,
4939,
5420,
198,
6738,
302,
1330,
850,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
44161,
282,
26599,
1330,
30277,
6601,
11,
34142,
11,
29201,
11,
45835,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
... | 2.295122 | 410 |
"""All Tosurnament module mock objects."""
import json
import os
from unittest import mock
from mysqldb_wrapper import crypt
def query_side_effect_wrapper(session_mock):
"""Side effect for the query function, used to return the stubs in the storage."""
return query_side_effect
def add_side_effect_wrapper(session_mock):
"""Side effect for the add function, used to add an id to the input table."""
return add_side_effect
def delete_side_effect_wrapper(session_mock):
"""Side effect for the add function, used to add an id to the input table."""
return delete_side_effect
class SessionMock:
"""A mock for the session. Includes utility functions to simulate a storage."""
def add_stub(self, stub):
"""Adds a stub in the mock. The added stubs will be used when retrieving an object."""
stub._session = self
add_side_effect_wrapper(self)(stub)
def reset_stub(self, table):
"""Resets all the stubs of the table."""
self.tables[table.__tablename__] = []
def compare_table_objects(self, other):
"""Compares 2 table class objects."""
if not type(self) == type(other):
return False
fields = vars(self)
for key in fields.keys():
if (
not key.startswith("_")
and key not in ["created_at", "updated_at"]
and (crypt.is_encrypted(self, key) or (isinstance(getattr(type(self)(), key), crypt.Id) and key != "id"))
):
print(key, getattr(self, key), getattr(other, key))
if getattr(self, key) != getattr(other, key):
return False
return True
class Matcher:
"""Comparator of table class objects. To use with mock.call and to check against a call_args_list."""
| [
37811,
3237,
40195,
700,
3263,
8265,
15290,
5563,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
616,
31166,
335,
65,
62,
48553,
1330,
8194,
628,
198,
4299,
12405,
62,
1589,... | 2.706605 | 651 |
import pytest
import numpy as np
from bmtk.simulator.utils import nwb
import os
import h5py
@pytest.mark.skip(reason='Ability to add 0-lenght datasetset has been removed in newer version of h5py')
if __name__ == "__main__":
test_create_blank_file() # pragma: no cover
test_create_blank_file_force() # pragma: no cover
test_set_data_file_handle() # pragma: no cover
test_set_data_force() # pragma: no cover
test_get_data() # pragma: no cover
test_metadata() # pragma: no cover
test_add_shared_scale() # pragma: no cover
test_firing_rate() # pragma: no cover
test_processing() # pragma: no cover
test_analysis() # pragma: no cover
test_spike_train() # pragma: no cover
test_grayscale_movie() # pragma: no cover
# test_get_stimulus() # pragma: no cover
test_different_scales()
test_writable()
#test_nullscale()
test_timeseries()
test_external_link()
| [
11748,
12972,
9288,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
275,
16762,
74,
13,
14323,
8927,
13,
26791,
1330,
299,
39346,
198,
11748,
28686,
198,
11748,
289,
20,
9078,
628,
220,
220,
220,
220,
198,
220,
220,
220,
220,
628,
628,... | 2.145669 | 508 |
from fuzzywuzzy import process
import codecs#, re, sys
from residents import RESIDENTS, NAMES
filename = raw_input('Enter a name for the new file: ')
file = codecs.open(filename, 'w', encoding='utf-8', errors='ignore')
for resident in RESIDENTS:
match = process.extractOne(resident, NAMES)
name, accuracy = match[0], match[1]
# if accuracy < 60:
# print resident
# name = raw_input("Enter Resident Name: ")
s = "'%s': '%s', %s\n" % (resident, name, accuracy)
file.write(unicode(s))
file.close() | [
6738,
34669,
86,
4715,
88,
1330,
1429,
198,
11748,
40481,
82,
2,
11,
302,
11,
25064,
198,
6738,
5085,
1330,
15731,
2389,
15365,
11,
399,
29559,
198,
198,
34345,
796,
8246,
62,
15414,
10786,
17469,
257,
1438,
329,
262,
649,
2393,
25,
... | 2.881356 | 177 |
import argparse
import os
import errno
import io
import importlib
import sys
from datetime import datetime, timedelta
try:
from urllib.parse import urlparse # Python 3
except ImportError:
from urlparse import urlparse # Python 2
# import requests
IOOS_CATALOG_URL = "https://data.ioos.us/api/3"
VALID_QUERY_ACTIONS = ['resource_cc_check', 'dataset_list', 'dataset_list_by_filter']
def main():
"""
Command line interface
"""
kwargs = {
'description': 'Query the CKAN API from IOOS Catalog (or other) to get stuff.',
'formatter_class': argparse.RawDescriptionHelpFormatter,
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument('-c', '--catalog_api_url', type=str, default=IOOS_CATALOG_URL,
help='URL of CKAN Catalog to query. Default: {cat_url}'.format(cat_url=IOOS_CATALOG_URL))
parser.add_argument('-a', '--action', type=str, required=True,
help='Name of a defined Action (CKAN query plus any subsequent analysis) to run. Current provided actions: {valid}'.format(valid=", ".join(VALID_QUERY_ACTIONS)))
parser.add_argument('-o', '--output', type=str, required=False,
help='Output filename (path to a file to output results to). Will default to a randomized output file name with the Action prefix.')
parser.add_argument('-e', '--error_output', type=str, required=False,
help='Error output filename (path to a file to output results to). Will default to a randomized error output file name with the Action prefix.')
parser.add_argument('-q', '--query_params', type=str, required=False,
help='Query parameter value(s) to pass to the query action. Multiple query parameters needed for actions that expect multiple parameters can be passed as a comma separated string (eg. \'-q=name:AOOS,format:OPeNDAP or -q=name:NANOOS,resource_format:ERDDAP,resource_name:OPeNDAP)\' to run AOOS OPeNDAP services through the Compliance Checker test) ')
parser.add_argument('-op', '--operator', type=str, required=False, default='AND',
help='The operator to use when concatenating query parameters together. Appropriate values are: AND, OR.')
parser.add_argument('-t', '--cc_tests', type=str, required=False,
help='Compliance checker tests to run (by name, comma-separated) (eg \'-t=acdd:1.3,cf:1.6,ioos\')')
args = parser.parse_args()
catalog_api_url = urlparse(args.catalog_api_url)
if not catalog_api_url.scheme or not catalog_api_url.netloc:
sys.exit("Error: '--catalog_api_url' parameter value must contain a valid URL. Value passed: {param}".format(param=args.catalog_api_url))
if catalog_api_url.params or catalog_api_url.query:
sys.exit("Error: '--catalog_api_url' parameter should not contain query parameters ('{query}'). Please include only the service endpoint URL. Value passed: {param}".format(query=catalog_api_url.query, param=args.catalog_api_url))
# check to make sure the 'action' argument passed matches an expected query action type:
if args.action not in VALID_QUERY_ACTIONS:
sys.exit("Error: '--action' parameter value must contain a known query action. Valid query actions: {valid}. Value passed: {param}".format(valid=", ".join(VALID_QUERY_ACTIONS), param=args.action))
# perform the query action (if value passed is known):
for query_action in VALID_QUERY_ACTIONS:
if args.action == query_action:
print("query action: " + query_action)
try:
# try relative importlib import of action_module (Python 2.7?)
action_module = importlib.import_module(".{module}".format(module=query_action), package="catalog_query.action")
# for a same-level import (no submodule):
# action_module = importlib.import_module(".%s" % query_action, package="catalog_query")
# handle ImportError and instead try absolute module import (catalog_query.action.*) (Python 3?):
except (SystemError, ImportError, ModuleNotFoundError) as e:
action_module = importlib.import_module("catalog_query.action.{module}".format(module=query_action))
Action = action_module.Action
# import failure attempts:
# from .action.query_action import Action
# module = "action." + query_action + ".Action"
# __import__(module)
# Action = importlib.import_module(".action." + query_action + ".Action")
# Action = importlib.import_module("..Action.", "action." + query_action)
# module = __import__(".action." + query_action, globals(), locals(), ['Action'])
spec = {}
if args.catalog_api_url:
spec['catalog_api_url'] = args.catalog_api_url
if args.output:
spec['output'] = args.output
if args.error_output:
spec['error_output'] = args.error_output
if args.query_params:
spec['query'] = args.query_params
if args.operator:
spec['operator'] = args.operator
if args.cc_tests:
spec['cc_tests'] = args.cc_tests
try:
action = Action(**spec)
action.run()
except Exception as e:
print(e)
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
11454,
3919,
198,
11748,
33245,
198,
11748,
1330,
8019,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
28311,
25,
198,
220,
220,
220,
422,
2956,
297,
... | 2.516352 | 2,171 |
"""
Utilities and helpers for writing tests.
"""
import torch
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.common.testing.model_test_case import ModelTestCase
_available_devices = ["cpu"] + (["cuda"] if torch.cuda.is_available() else [])
def multi_device(test_method):
"""
Decorator that provides an argument `device` of type `str` for each available PyTorch device.
"""
return pytest.mark.parametrize("device", _available_devices)(pytest.mark.gpu(test_method))
def requires_gpu(test_method):
"""
Decorator to indicate that a test requires a GPU device.
"""
return pytest.mark.gpu(
pytest.mark.skipif(not torch.cuda.is_available(), reason="No CUDA device registered.")(
test_method
)
)
def requires_multi_gpu(test_method):
"""
Decorator to indicate that a test requires multiple GPU devices.
"""
return pytest.mark.gpu(
pytest.mark.skipif(torch.cuda.device_count() < 2, reason="2 or more GPUs required.")(
test_method
)
)
| [
37811,
198,
18274,
2410,
290,
49385,
329,
3597,
5254,
13,
198,
37811,
198,
11748,
28034,
198,
11748,
12972,
9288,
198,
198,
6738,
477,
1697,
34431,
13,
11321,
13,
33407,
13,
9288,
62,
7442,
1330,
9659,
45,
34431,
14402,
20448,
198,
6738... | 2.708955 | 402 |
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from app import app
import config
| [
11748,
895,
83,
489,
571,
198,
6738,
3053,
13,
44,
3955,
3620,
586,
541,
433,
1330,
337,
3955,
3620,
586,
541,
433,
198,
6738,
3053,
13,
44,
3955,
2767,
2302,
1330,
337,
3955,
2767,
2302,
198,
6738,
598,
1330,
598,
198,
11748,
4566,... | 2.977273 | 44 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Versão: 2.0
Script responsavel por realizar o undeploy e deploy do aplicativo no WAS, alem de compilar atraves do Maven.
Execução:
$ cd C:\<CAMINHO_WAS>\WebSphere\AppServer\bin
$ wsadmin.bat -lang jython -f <CAMINHO_SCRIPT> -username <USUARIO> -password <SENHA>
Autor: Filipe Manuel
'''
import time
import os
import sys
import string
import subprocess
# Lista de argumentos possiveis
ARGUMENTOS = [
'start'
]
# Caminho do projeto
# Ex: C:\\Desenvolvimento\\Projetos\\AppCorporativo\\
PROJETO_BASE_LOCAL = '<CAMINHO_DO_PROJETO>'
# Nome do aplicativo
# Ex: app-corporativo
APP_NOME = '<NOME_DO_APLICATIVO_EAR>'
# Caminho do EAR
# Ex: \\AppCorporativoEAR\\target\\
APP_LOCAL = PROJETO_BASE_LOCAL + '<CAMINHO_DA_PASTA_TARGET>'
# Caminho dos projetos/pastas a serem compilados em ordem, portanto, os projetos de EAR e WAR devem ser os ultimos
PROJETOS = [
'ModuloEJB',
'ModuloWAR',
'ModuloEAR'
]
if __name__ == '__main__':
inicio = time.time()
gerenciarApp = GerenciarApp()
compilarApp = CompilarApp()
try:
gerenciarApp.app_status_info()
compilarApp.compilar()
gerenciarApp.undeploy()
gerenciarApp.deploy(sys.argv[0])
except Exception as ex:
exibir_mensagem('Erro: {}'.format(ex))
minutos = ((time.time() - inicio) / 60)
exibir_mensagem('Tempo total: {} min'.format(minutos))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
34947,
28749,
25,
362,
13,
15,
198,
7391,
2424,
64,
626,
16964,
1103,
528,
283,
267,
44192,
1420,
304,
6061,
... | 2.194099 | 644 |
import sys
import numpy as np
from .config.pDeep_config import Common_Config
instrument_list = Common_Config().instrument_list
instrument_num = Common_Config().max_instrument_num
instrument_dict = dict(zip(instrument_list, range(len(instrument_list))))
_feature_name_list = ['x', 'mod_x', 'charge', 'nce', 'instrument', 'y', 'pepinfo']
_feature_name_dict = dict(zip(_feature_name_list, range(len(_feature_name_list))))
bucket_item_dict = _feature_name_dict
# def write_buckets_mgf(outfile, buckets, predict_buckets, fconfig, ioncalc, iontypes=['b{}', 'y{}']):
# def write_one(f, pepinfo, pred):
# peptide, mod, charge = pepinfo.split("|")
# f.write('BEGIN IONS\n')
# f.write('TITLE=' + pepinfo + '\n')
# f.write('CHARGE=' + charge + '+\n')
# pre_charge = int(charge)
# f.write('pepinfo=' + pepinfo + '\n')
# ions = {}
# modmass, lossmass, modname = ioncalc.calc_mod_mass_list(peptide, mod)
# bions = ioncalc.calc_b_ions(peptide, modmass)
# pepmass = ioncalc.calc_pepmass_from_b(peptide, modmass, bions)
# yions = ioncalc.calc_y_from_b(bions, pepmass)
# if 'b{}' in fconfig.ion_types and 'b{}' in iontypes: ions['b{}'] = bions
# if 'y{}' in fconfig.ion_types and 'y{}' in iontypes: ions['y{}'] = yions
# if 'c{}' in fconfig.ion_types and 'c{}' in iontypes: ions['c{}'] = ioncalc.calc_c_from_b(bions)
# if 'z{}' in fconfig.ion_types and 'z{}' in iontypes: ions['z{}'] = ioncalc.calc_z_from_b(bions, pepmass)
# if 'b{}-ModLoss' in fconfig.ion_types and 'b{}-ModLoss' in iontypes: ions[
# 'b{}-ModLoss'] = ioncalc.calc_Nterm_modloss(bions, lossmass, modname)
# if 'y{}-ModLoss' in fconfig.ion_types and 'y{}-ModLoss' in iontypes: ions[
# 'y{}-ModLoss'] = ioncalc.calc_Cterm_modloss(yions, lossmass, modname)
# max_charge = fconfig.max_ion_charge if pre_charge >= fconfig.max_ion_charge else pre_charge
# peak_list = []
# for ion_type in ions.keys():
# x_ions = np.array(ions[ion_type])
# for charge in range(1, max_charge + 1):
# intens = pred[:, fconfig.GetIonIndexByIonType(ion_type, charge)]
# f.write('{}={}\n'.format(ion_type.format("+" + str(charge)),
# ','.join(['%.5f' % inten for inten in intens])))
# peak_list.extend(zip(x_ions / charge + ioncalc.base_mass.mass_proton, intens))
# pepmass = pepmass / pre_charge + ioncalc.base_mass.mass_proton
# f.write("PEPMASS=%.5f\n" % pepmass)
# peak_list.sort()
# for mz, inten in peak_list:
# if inten > 1e-8: f.write("%f %.8f\n" % (mz, inten))
# f.write('END IONS\n')
# with open(outfile, 'w') as f:
# for key, value in buckets.items():
# preds = predict_buckets[key][-1]
# for i in range(value[-1].shape[0]):
# write_one(f, value[-1][i], preds[i])
# def write_buckets(outfile, buckets, predict_buckets, iontypes=['b+1', 'b+2', 'y+1', 'y+2']):
# def write_one(f, pepinfo, pred):
# f.write('BEGIN IONS\n')
# f.write('pepinfo=' + pepinfo + '\n')
# for i in range(len(iontypes)):
# f.write('{}={}\n'.format(iontypes[i], ','.join(['%.5f' % inten for inten in pred[:, i]])))
# f.write('END IONS\n')
# with open(outfile, 'w') as f:
# for key, value in buckets.items():
# preds = predict_buckets[key][-1]
# for i in range(value[-1].shape[0]):
# write_one(f, value[-1][i], preds[i])
# write_predict = write_buckets
| [
11748,
25064,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
764,
11250,
13,
79,
29744,
62,
11250,
1330,
8070,
62,
16934,
198,
198,
259,
43872,
62,
4868,
796,
8070,
62,
16934,
22446,
259,
43872,
62,
4868,
198,
259,
43872,
62,
225... | 2.041597 | 1,803 |
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Invoke the program passing a path to a file as an argument")
else:
with open(sys.argv[1], 'r') as file:
print(rna(file.read()))
| [
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
611,
18896,
7,
17597,
13,
853,
85,
8,
14512,
362,
25,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7203,
19904,
2088,
262,
... | 2.196262 | 107 |
from copy import copy
from typing import Optional
from hdlConvertorAst.hdlAst._defs import HdlIdDef
from hdlConvertorAst.hdlAst._expr import HdlValueId, HdlOp, HdlOpType
from hdlConvertorAst.hdlAst._structural import HdlModuleDec, HdlCompInst
from hdlConvertorAst.to.systemc.keywords import SYSTEMC_KEYWORDS
from hdlConvertorAst.translate.common.name_scope import LanguageKeyword, NameScope
from hwt.hdl.portItem import HdlPortItem
from hwt.interfaces.std import Clk
from hwt.serializer.generic.to_hdl_ast import ToHdlAst, \
HWT_TO_HDLCONVEROTR_DIRECTION
from hwt.serializer.simModel.serializer import ToHdlAstSimModel
from hwt.serializer.systemC.expr import ToHdlAstSystemC_expr
from hwt.serializer.systemC.statements import ToHdlAstSystemC_statements
from hwt.serializer.systemC.type import ToHdlAstSystemC_type
from ipCorePackager.constants import DIRECTION
class ToHdlAstSystemC(ToHdlAstSystemC_expr, ToHdlAstSystemC_type,
ToHdlAstSystemC_statements,
ToHdlAst):
"""
Serialized used to convert HWT design to SystemC code
"""
_keywords_dict = {kw: LanguageKeyword() for kw in SYSTEMC_KEYWORDS}
sc_in_clk = HdlValueId("sc_in_clk", obj=LanguageKeyword())
sc_out_clk = HdlValueId("sc_out_clk", obj=LanguageKeyword())
sc_inout_clk = HdlValueId("sc_inout_clk", obj=LanguageKeyword())
sc_in = HdlValueId("sc_in", obj=LanguageKeyword())
sc_out = HdlValueId("sc_out", obj=LanguageKeyword())
sc_inout = HdlValueId("sc_inout", obj=LanguageKeyword())
| [
6738,
4866,
1330,
4866,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
289,
25404,
3103,
1851,
273,
33751,
13,
71,
25404,
33751,
13557,
4299,
82,
1330,
367,
25404,
7390,
7469,
198,
6738,
289,
25404,
3103,
1851,
273,
33751,
13,
71,
25404... | 2.563545 | 598 |
import sys
import os
import io
import re
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
from misc import printdbg
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
33245,
198,
11748,
302,
198,
17597,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
492,
6,
4008,
198,
17597,
13,
6978,
... | 2.576923 | 78 |
# https://stackoverflow.com/a/33453124/6210398
import threading
import sys
from requests import get as requests_get
from zipfile import ZipFile
import os
from shutil import copyfile
from bs4 import BeautifulSoup
from packaging.version import Version, parse
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
import time
import subprocess
from setenv import manage_registry_env_vars
from win32file import GetLongPathName
# from win32api import GetShortPathName
if getattr(sys, 'frozen', False):
# frozen
dir_ = os.path.dirname(sys.executable)
else:
# unfrozen
dir_ = os.path.dirname(os.path.realpath(__file__))
| [
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
64,
14,
2091,
36625,
17464,
14,
5237,
940,
31952,
198,
11748,
4704,
278,
198,
11748,
25064,
198,
6738,
7007,
1330,
651,
355,
7007,
62,
1136,
198,
6738,
19974,
7753,
1330,
38636,
8979,
19... | 3.028708 | 209 |
# -*- coding: UTF-8 -*-
# Copyright 2016-2017 Luc Saffre
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from lino.api import rt, _
from lino.utils.cycler import Cycler
from lino.utils.instantiator import create_row
from lino_xl.lib.tickets.choicelists import TicketStates
from lino.api.dd import str2kw
from lino.api import dd
TICKET_STATES = Cycler(TicketStates.objects())
from lino.modlib.users.utils import create_user
# def S(name, **kw):
# kw.update(name=name)
# # return rt.models.tickets.Site(**kw)
# return dd.plugins.tickets.site_model(**kw)
# def ticket(username, summary, en, skill=None, **kw):
# ar = rt.login(username)
# u = ar.get_user() # rt.models.users.User.objects.get(username=user)
# if en and u.language != 'de':
# summary = en
# kw.update(summary=summary, user=u)
# # if no manual state is specified, take a random one:
# if not 'state' in kw:
# kw.update(state=TICKET_STATES.pop())
# t = create_row(rt.models.tickets.Ticket, **kw)
# t.after_ui_create(ar) # create author's vote
# yield t
# if skill is not None:
# yield rt.models.skills.Demand(demander=t, skill=skill)
# def competence(username, first_name, skill, **kw):
# Person = rt.models.contacts.Person
# kw.update(
# end_user=Person.objects.get(
# name=first_name))
# kw.update(skill=skill)
# kw.update(user=rt.models.users.User.objects.get(username=username))
# return rt.models.skills.Competence(**kw)
# def vote(user, ticket, state, **kw):
# u = rt.models.users.User.objects.get(username=user)
# t = rt.models.tickets.Ticket.objects.get(pk=ticket)
# s = rt.models.votes.VoteStates.get_by_name(state)
# return rt.models.votes.Vote(user=u, votable=t, state=s, **kw)
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
12,
5539,
7598,
311,
2001,
260,
198,
2,
13789,
25,
347,
10305,
357,
3826,
2393,
27975,
45761,
329,
3307,
8,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
... | 2.352564 | 780 |
#!/usr/bin/env python
#
# Test cases for tournament.py
from tournament import *
if __name__ == '__main__':
test_delete_matches()
test_delete()
test_count()
test_register()
test_register_count_delete()
test_standings_before_matches()
test_report_matches()
test_pairings()
# Custom tests
test_new_database()
print "Success! All tests pass!"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
6208,
2663,
329,
7756,
13,
9078,
198,
198,
6738,
7756,
1330,
1635,
628,
628,
628,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
... | 2.594771 | 153 |
# Name assosiated with a set of parameters and covariance matrix. Will be used
# to retrieve it
name = 'test_cov'
mode = 'relative'
uncorr_uncs = [0.5, 0.1]
# 'keep' stands for keeping uncorrelated uncertainties of parameters and 'override' stands
# for substituting them values from uncorr_uncs
policy = 'keep'
params = ['extra1', 'extra2', 'extra3']
cov_mat = [[1, 0.1, 0.1], [0.1, 1, 0.1], [0.1, 0.1, 1]]
| [
2,
6530,
840,
418,
12931,
351,
257,
900,
286,
10007,
290,
44829,
590,
17593,
13,
2561,
307,
973,
198,
2,
284,
19818,
340,
220,
198,
3672,
796,
705,
9288,
62,
66,
709,
6,
198,
14171,
796,
705,
43762,
6,
198,
403,
10215,
81,
62,
4... | 2.703947 | 152 |
"""Atakama sdk."""
# only import stuff here that has no co-deps, external-lib deps, etc.
# otherwise, the user should import the from a specific submodule
from .plugin_base import *
| [
37811,
2953,
461,
1689,
264,
34388,
526,
15931,
198,
198,
2,
691,
1330,
3404,
994,
326,
468,
645,
763,
12,
10378,
82,
11,
7097,
12,
8019,
390,
862,
11,
3503,
13,
198,
2,
4306,
11,
262,
2836,
815,
1330,
262,
422,
257,
2176,
850,
... | 3.345455 | 55 |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/04_callbacks.ipynb
from exp.nb_03 import *
import re
_camel_re1 = re.compile('(.)([A-Z][a-z]+)')
_camel_re2 = re.compile('([a-z0-9])([A-Z])')
from typing import *
from functools import partial | [
198,
29113,
14468,
2,
198,
21017,
12680,
45811,
21725,
47044,
7730,
1677,
1137,
11617,
0,
8410,
5626,
48483,
0,
44386,
198,
29113,
14468,
2,
198,
2,
2393,
284,
4370,
25,
1614,
62,
46803,
14,
3023,
62,
13345,
10146,
13,
541,
2047,
65,
... | 2.968 | 125 |
# -*- coding: utf-8 -*-
import os
import sys
import requests
from definitions import ROOT_DIR
ADDRESS = "https://s3.ap-south-1.amazonaws.com/shreyas-gopal-personal/sudoku.csv"
FILE_NAME = "sudoku.csv"
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
7007,
198,
198,
6738,
17336,
1330,
15107,
2394,
62,
34720,
198,
198,
2885,
7707,
7597,
796,
366,
5450,
1378,
82,
18,
13,
499... | 2.47561 | 82 |
import pytest
from valid8 import ValidationError
from core.menu.menu import Key
| [
11748,
12972,
9288,
198,
6738,
4938,
23,
1330,
3254,
24765,
12331,
198,
198,
6738,
4755,
13,
26272,
13,
26272,
1330,
7383,
628,
628,
628,
198
] | 3.48 | 25 |
"""Base class for GLU callback-caching structures"""
import ctypes
import weakref
from OpenGL._bytes import long, integer_types
class GLUStruct( object ):
"""Mix-in class for GLU Structures that want to retain references to callbacks
Also provides original-object-return for the "datapointer" style paremters
Each sub-class must override:
CALLBACK_TYPES -- maps a "which" constant to a function type
CALLBACK_FUNCTION_REGISTRARS -- maps a "which" constant to the
registration function for functions of that type
WRAPPER_METHODS -- maps a "which" consant to a method of the structure
that produces a callable around the function which takes care of
input/output arguments, data conversions, error handling and the
like.
Creates a dictionary member dataPointers if original-object-return is used
Creates a dictionary member callbacks if callback registration is used
"""
def getAsParam( self ):
"""Gets as a ctypes pointer to the underlying structure"""
return ctypes.pointer( self )
_as_parameter_ = property( getAsParam )
CALLBACK_TYPES = None
CALLBACK_FUNCTION_REGISTRARS = None
WRAPPER_METHODS = None
def noteObject( self, object ):
"""Note object for later retrieval as a Python object pointer
This is the registration point for "original object return", returns
a void pointer to the Python object, though this is, effectively, an
opaque value.
"""
identity = id(object)
try:
self.dataPointers[ identity ] = object
except AttributeError as err:
self.dataPointers = { identity: object }
return identity
def originalObject( self, voidPointer ):
"""Given a void-pointer, try to find our original Python object"""
if isinstance( voidPointer, integer_types):
identity = voidPointer
elif voidPointer is None:
return None
else:
try:
identity = voidPointer.value
except AttributeError as err:
identity = voidPointer[0]
try:
return self.dataPointers[ identity ]
except (KeyError,AttributeError) as err:
return voidPointer
def addCallback( self, which, function ):
"""Register a callback for this structure object"""
callbackType = self.CALLBACK_TYPES.get( which )
if not callbackType:
raise ValueError(
"""Don't have a registered callback type for %r"""%(
which,
)
)
wrapperMethod = self.WRAPPER_METHODS.get( which )
if wrapperMethod is not None:
function = getattr(self,wrapperMethod)( function )
cCallback = callbackType( function )
# XXX this is ugly, query to ctypes list on how to fix it...
try:
self.CALLBACK_FUNCTION_REGISTRARS[which]( self, which, cCallback )
except ctypes.ArgumentError as err:
err.args += (which,cCallback)
raise
#gluTessCallbackBase( self, which, cCallback)
# XXX catch errors!
if getattr( self, 'callbacks', None ) is None:
self.callbacks = {}
self.callbacks[ which ] = cCallback
return cCallback
def ptrAsArray( self, ptr, length, type ):
"""Copy length values from ptr into new array of given type"""
result = type.zeros( (length,) )
for i in range(length):
result[i] = ptr[i]
return result
| [
37811,
14881,
1398,
329,
10188,
52,
23838,
12,
66,
8103,
8573,
37811,
198,
11748,
269,
19199,
198,
11748,
4939,
5420,
198,
6738,
30672,
13557,
33661,
1330,
890,
11,
18253,
62,
19199,
198,
198,
4871,
10188,
52,
44909,
7,
2134,
15179,
198... | 2.488356 | 1,460 |
import argparse
import logging
import os
from pathlib import Path
import deepspeed
import pandas as pd
import torch
from tqdm import tqdm
import transformers
os.environ["TOKENIZERS_PARALLELISM"] = "false"
LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
2769,
12287,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
1... | 2.867257 | 113 |
##
# Sabertooth.py: Class implementing packetized serial control of
# Sabertooth 2x32 motor driver (Dimension Engineering).
#
# This code was adapted from MIT licensed
# Copyright 2015, Egan McComb
# copywrite 2017 Kevin J. Walchko
#
##
import serial
import logging
import time
class Sabertooth(object):
"""
Sabertooth: A class to control a Sabertooth 2x60 using the packetized
serial mode (DIP switches 1,2 low).
https://www.dimensionengineering.com/datasheets/Sabertooth2x60.pdf
"""
FORWARD_1 = 0x00
REVERSE_1 = 0x01
FORWARD_2 = 0x04
REVERSE_2 = 0x05
FORWARD_MIXED = 0x08
REVERSE_MIXED = 0x09
RIGHT_MIXED = 0x0A
LEFT_MIXED = 0x0B
RAMP = 0x10
def __init__(self, port, baudrate=9600, address=128, timeout=0.1):
"""
baudrate - 2400, 9600, 19200, 38400, 115200
address - motor controller address
timeout - serial read time out
"""
self.port = port
self.address = address
if 128 > self.address > 135:
raise Exception('PySabertooth, invalid address: {}'.format(address))
# if baudrate in [9600, 19200, 38400, 115200]:
# pass
# else:
# raise Exception('PySabertooth, invalid baudrate {}'.format(baudrate))
# Initialize serial port.
self.saber = serial.Serial()
self.saber.baudrate = baudrate
self.saber.port = port
self.saber.timeout = timeout
self.open()
self.setBaudrate(baudrate)
def __del__(self):
"""
Destructor, stops motors and closes serial port
"""
self.stop()
self.close()
return
def info(self):
"""
Prints out connection info
"""
print('')
print('='*40)
print('Sabertooth Motor Controller')
print(' port: {}'.format(self.saber.port))
print(' baudrate: {} bps'.format(self.saber.baudrate))
print(' address: {}'.format(self.address))
print('-'*40)
print('')
def close(self):
"""
Closes serial port
"""
self.saber.close()
def setBaudrate(self, baudrate):
"""
Sets the baudrate to: 2400, 9600, 19200, 38400, 115200
"""
valid = {
2400: 1,
9600: 2,
19200: 3,
38400: 4,
115200: 5
}
if baudrate in valid:
baud = valid[baudrate]
else:
raise Exception('PySabertooth, invalid baudrate {}'.format(baudrate))
# command = 15
# checksum = (self.address + command + baudrate) & 127
self.sendCommand(15, baud)
self.saber.write(b'\xaa')
time.sleep(0.2)
def open(self):
"""
Opens serial port
"""
if not self.saber.is_open:
self.saber.open()
self.saber.write(b'\xaa')
self.saber.write(b'\xaa')
time.sleep(0.2)
def sendCommand(self, command, message):
"""
sendCommand: Sends a packetized serial command to the Sabertooth
command: Command to send.
FORWARD_1 = 0x00
REVERSE_1 = 0x01
FORWARD_2 = 0x04
REVERSE_2 = 0x05
FORWARD_MIXED = 0x08
REVERSE_MIXED = 0x09
RIGHT_MIXED = 0x0A
LEFT_MIXED = 0x0B
RAMP = 0x10
message: Command
"""
# Calculate checksum termination (page 23 of the documentation).
checksum = (self.address + command + message) & 127
# Write data packet.
msg = [self.address, command, message, checksum]
msg = bytes(bytearray(msg))
self.saber.write(msg)
# Flush UART.
self.saber.flush()
def stop(self):
"""
Stops both motors
"""
sentBytes = 0
self.driveBoth(0,0)
return sentBytes
def drive(self, num, speed):
"""Drive 1 or 2 motor"""
# reverse commands are equal to forward+1
cmds = [self.FORWARD_1, self.FORWARD_2]
try:
cmd = cmds[num-1]
except:
raise Exception('PySabertooth, invalid motor number: {}'.format(num))
if speed < 0:
speed = -speed
cmd += 1
if speed > 100:
raise Exception('PySabertooth, invalid speed: {}'.format(speed))
self.sendCommand(cmd, int(127*speed/100))
def driveBoth(self, speed1, speed2):
"""Drive both 1 and 2 motors at once"""
self.drive(1, speed1)
self.drive(2, speed2)
def text(self, cmds):
"""Send the simple ASCII commands"""
self.saber.write(cmds + b'\r\n')
def textGet(self, cmds):
"""Send the simple ASCII commands"""
self.text(cmds)
ans = self.saber.read(100)
return ans
| [
2235,
198,
2,
9910,
861,
5226,
13,
9078,
25,
5016,
15427,
19638,
1143,
11389,
1630,
286,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
9910,
861,
5226,
362,
87,
2624,
5584,
4639,
357,
29271,
30... | 2.024908 | 2,449 |
import frappe
from datetime import datetime
# bench execute mfi_customization.mfi.patch.set_closing_date_in_issue.execute
| [
11748,
5306,
27768,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
2,
7624,
12260,
285,
12463,
62,
23144,
1634,
13,
76,
12463,
13,
17147,
13,
2617,
62,
565,
2752,
62,
4475,
62,
259,
62,
21949,
13,
41049,
198,
197,
197,
197,
198
... | 2.953488 | 43 |
import os
import pathlib
import json
import flask
import flask_cors
from mnlite import mnode
import opersist.utils
| [
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
33918,
198,
11748,
42903,
198,
11748,
42903,
62,
66,
669,
198,
6738,
285,
21283,
578,
1330,
285,
17440,
198,
11748,
1034,
364,
396,
13,
26791,
628,
198
] | 3.342857 | 35 |
from django import forms
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, Platform, Region, Site, SiteGroup
from extras.forms import CustomFieldModelFilterForm, LocalConfigContextFilterForm
from tenancy.forms import TenancyFilterForm, ContactModelFilterForm
from utilities.forms import (
DynamicModelMultipleChoiceField, StaticSelect, StaticSelectMultiple, TagFilterField, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.choices import *
from virtualization.models import *
__all__ = (
'ClusterFilterForm',
'ClusterGroupFilterForm',
'ClusterTypeFilterForm',
'VirtualMachineFilterForm',
'VMInterfaceFilterForm',
)
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
355,
4808,
198,
198,
6738,
30736,
320,
13,
27530,
1330,
16232,
47445,
11,
19193,
11,
17718,
11,
14413,
11,
14413,
13247,
198,
6738,
33849,
1... | 3.520619 | 194 |
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Note: running this test requires installing the package python-mock.
# pylint: disable=C0103
# pylint: disable=F0401
import PRESUBMIT
import os.path
import subprocess
import sys
import unittest
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'pymock'))
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
import mock
from PRESUBMIT_test_mocks import MockInputApi
from PRESUBMIT_test_mocks import MockOutputApi
from PRESUBMIT_test_mocks import MockAffectedFile
class Capture(object):
"""Class to capture a call argument that can be tested later on."""
if __name__ == '__main__':
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
2177,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,... | 2.906667 | 300 |
from ..exceptions import ArgumentNullException
def has_params(data, *args):
"""
Validates required parameters against an object.
:param data:
:param args: required parameters
:return:
"""
if not data:
return False
for a in args:
if not a in data:
return False
v = data[a]
if not v or v.isspace():
return False
return True
| [
6738,
11485,
1069,
11755,
1330,
45751,
35067,
16922,
628,
198,
4299,
468,
62,
37266,
7,
7890,
11,
1635,
22046,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3254,
37051,
2672,
10007,
1028,
281,
2134,
13,
628,
220,
220,
220,
105... | 2.394286 | 175 |
import os
import io
import yaml
import stringcase
# from pprint import pprint
from pathlib import Path
from korapp import kordir
from korapp import utils
| [
11748,
28686,
198,
11748,
33245,
198,
11748,
331,
43695,
198,
11748,
4731,
7442,
198,
2,
422,
279,
4798,
1330,
279,
4798,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
479,
273,
1324,
1330,
479,
585,
343,
198,
6738,
479,
273,
13... | 3.361702 | 47 |
"""
Define the unit tests for the
:mod:`colour.models.rgb.transfer_functions.arri_alexa_log_c` module.
"""
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
log_encoding_ALEXALogC,
log_decoding_ALEXALogC,
)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestLogEncoding_ALEXALogC",
"TestLogDecoding_ALEXALogC",
]
class TestLogEncoding_ALEXALogC(unittest.TestCase):
"""
Define :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_encoding_ALEXALogC` definition unit tests methods.
"""
def test_log_encoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_encoding_ALEXALogC` definition.
"""
self.assertAlmostEqual(
log_encoding_ALEXALogC(0.0), 0.092809000000000, places=7
)
self.assertAlmostEqual(
log_encoding_ALEXALogC(0.18), 0.391006832034084, places=7
)
self.assertAlmostEqual(
log_encoding_ALEXALogC(1.0), 0.570631558120417, places=7
)
def test_n_dimensional_log_encoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_encoding_ALEXALogC` definition n-dimensional arrays support.
"""
x = 0.18
t = log_encoding_ALEXALogC(x)
x = np.tile(x, 6)
t = np.tile(t, 6)
np.testing.assert_almost_equal(log_encoding_ALEXALogC(x), t, decimal=7)
x = np.reshape(x, (2, 3))
t = np.reshape(t, (2, 3))
np.testing.assert_almost_equal(log_encoding_ALEXALogC(x), t, decimal=7)
x = np.reshape(x, (2, 3, 1))
t = np.reshape(t, (2, 3, 1))
np.testing.assert_almost_equal(log_encoding_ALEXALogC(x), t, decimal=7)
def test_domain_range_scale_log_encoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_encoding_ALEXALogC` definition domain and range scale support.
"""
x = 0.18
t = log_encoding_ALEXALogC(x)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_encoding_ALEXALogC(x * factor), t * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_log_encoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_encoding_ALEXALogC` definition nan support.
"""
log_encoding_ALEXALogC(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
)
class TestLogDecoding_ALEXALogC(unittest.TestCase):
"""
Define :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_decoding_ALEXALogC` definition unit tests methods.
"""
def test_log_decoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_decoding_ALEXALogC` definition.
"""
self.assertAlmostEqual(log_decoding_ALEXALogC(0.092809), 0.0, places=7)
self.assertAlmostEqual(
log_decoding_ALEXALogC(0.391006832034084), 0.18, places=7
)
self.assertAlmostEqual(
log_decoding_ALEXALogC(0.570631558120417), 1.0, places=7
)
def test_n_dimensional_log_decoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_decoding_ALEXALogC` definition n-dimensional arrays support.
"""
t = 0.391006832034084
x = log_decoding_ALEXALogC(t)
t = np.tile(t, 6)
x = np.tile(x, 6)
np.testing.assert_almost_equal(log_decoding_ALEXALogC(t), x, decimal=7)
t = np.reshape(t, (2, 3))
x = np.reshape(x, (2, 3))
np.testing.assert_almost_equal(log_decoding_ALEXALogC(t), x, decimal=7)
t = np.reshape(t, (2, 3, 1))
x = np.reshape(x, (2, 3, 1))
np.testing.assert_almost_equal(log_decoding_ALEXALogC(t), x, decimal=7)
def test_domain_range_scale_log_decoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_decoding_ALEXALogC` definition domain and range scale support.
"""
t = 0.391006832034084
x = log_decoding_ALEXALogC(t)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
log_decoding_ALEXALogC(t * factor), x * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_log_decoding_ALEXALogC(self):
"""
Test :func:`colour.models.rgb.transfer_functions.arri_alexa_log_c.\
log_decoding_ALEXALogC` definition nan support.
"""
log_decoding_ALEXALogC(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])
)
if __name__ == "__main__":
unittest.main()
| [
37811,
198,
7469,
500,
262,
4326,
5254,
329,
262,
198,
25,
4666,
25,
63,
49903,
13,
27530,
13,
81,
22296,
13,
39437,
62,
12543,
2733,
13,
283,
380,
62,
1000,
27865,
62,
6404,
62,
66,
63,
8265,
13,
198,
37811,
198,
198,
11748,
299,... | 2.03906 | 2,637 |
"""
Meteostat JSON API Server
The code is licensed under the MIT license.
"""
from datetime import datetime
import json
from flask import abort
from meteostat import Normals, units
from server import app, utils
"""
Meteostat configuration
"""
cache_time = 60 * 60 * 24 * 30
Normals.max_age = cache_time
Normals.autoclean = False
"""
Endpoint configuration
"""
# Query parameters
parameters = [
('station', str, None),
('start', int, None),
('end', int, None),
('units', str, None)
]
@app.route('/stations/normals')
def stations_normals():
"""
Return station normals data in JSON format
"""
# Get query parameters
args = utils.get_parameters(parameters)
# Check if required parameters are set
if args['station']:
try:
# Get data
if args['start'] and args['end']:
# Get number of years between start and end year
year_diff = args['end'] - args['start']
# Check date range
if year_diff < 0:
# Bad request
abort(400)
data = Normals(args['station'], args['start'], args['end'])
else:
data = Normals(args['station'])
# Check if any data
if data.count() > 0:
# Normalize data
data = data.normalize()
# Unit conversion
if args['units'] == 'imperial':
data = data.convert(units.imperial)
elif args['units'] == 'scientific':
data = data.convert(units.scientific)
# Fetch DataFrame
data = data.fetch()
# To JSON
data = data.reset_index().to_json(orient="records")
else:
# No data
data = '[]'
# Inject meta data
meta = {}
meta['generated'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Generate output string
output = f'''{{"meta":{json.dumps(meta)},"data":{data}}}'''
# Return
return utils.send_response(output, cache_time)
except BaseException:
# Bad request
abort(400)
else:
# Bad request
abort(400)
| [
37811,
198,
9171,
68,
455,
265,
19449,
7824,
9652,
198,
198,
464,
2438,
318,
11971,
739,
262,
17168,
5964,
13,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
33918,
198,
6738,
42903,
1330,
15614,
198,
6738,
47091,... | 2.0746 | 1,126 |
TITLE = "Xor - топ"
STATEMENT_TEMPLATE = '''
Дан код, шифрующий флаг, и результат его работы. Получите флаг.
`
with open("output.txt", "w") as f:
key = 0 # some x 0<x<256
flag = "some string"
encrypted_flag = []
for i in range(len(flag)):
encrypted_flag.append(ord(flag[i]) ^ key)
encrypted_flag.reverse()
print(" ".join(str(e) for e in encrypted_flag), file=f)
`
stdout:
`{0}`
'''
tokens = ['147 175 166 128 219 221 158 223 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 221 167 150 150 159 139 159 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 223 132 172 166 215 222 156 180 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 189 135 183 143 219 167 218 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 152 187 220 136 162 162 170 223 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 172 157 185 166 165 137 135 185 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 186 137 136 171 186 139 218 188 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 187 187 131 220 216 223 159 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 183 165 170 172 159 183 219 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 159 135 139 151 215 162 134 148 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 216 150 216 137 183 155 220 141 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 173 129 128 141 150 150 165 215 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 184 220 169 219 129 160 155 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 191 221 153 191 214 214 166 169 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 180 167 138 143 190 169 150 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 186 163 143 191 166 222 173 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 221 175 221 164 190 136 151 135 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 221 216 140 143 187 131 185 154 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 138 162 133 141 222 184 170 219 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 185 165 214 131 150 161 216 161 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 155 141 136 131 180 219 182 165 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 183 171 219 165 214 190 160 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 162 165 188 186 214 157 160 131 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 132 135 162 217 130 183 158 156 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 217 155 214 151 166 182 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 186 136 190 166 130 158 160 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 138 156 134 128 137 191 216 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 135 216 217 221 220 191 220 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 173 154 184 148 168 143 175 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 218 221 152 184 132 128 220 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 172 163 130 191 217 166 161 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 220 157 221 222 129 131 132 143 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 175 168 143 191 158 157 128 170 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 186 139 137 156 148 185 153 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 216 161 134 143 221 148 137 186 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 166 180 157 164 161 140 133 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 158 182 183 141 154 153 165 141 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 188 221 131 187 175 153 130 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 169 168 137 153 161 218 167 152 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 218 139 166 158 173 141 157 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 214 162 138 219 139 152 219 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 134 132 167 169 216 172 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 171 161 132 138 129 130 155 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 151 137 170 218 190 143 165 185 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 130 143 170 128 161 175 157 154 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 221 191 157 136 187 185 169 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 131 148 139 165 219 187 132 215 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 137 161 215 159 182 172 167 165 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 191 187 131 132 180 171 173 133 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 189 143 156 189 151 219 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 171 138 169 170 175 156 132 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 165 135 152 133 182 170 139 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 141 138 180 165 134 222 136 148 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 215 150 165 160 156 163 164 221 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 219 134 190 183 150 219 130 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 141 151 159 167 154 191 140 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 219 173 138 167 160 184 140 168 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 137 155 180 185 158 152 148 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 216 138 220 216 154 128 163 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 143 222 159 129 180 218 162 141 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 183 134 151 166 169 218 165 219 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 155 189 136 133 182 171 156 132 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 185 214 161 220 190 172 166 167 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 140 223 133 130 180 139 156 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 218 221 169 223 219 165 157 161 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 166 180 191 218 171 152 223 161 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 223 135 152 155 182 172 215 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 167 148 184 186 128 165 218 131 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 157 218 164 172 156 150 128 162 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 172 169 175 175 158 133 168 155 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 162 128 161 161 150 163 164 214 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 150 223 214 216 214 171 162 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 139 170 187 161 159 185 155 183 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 214 183 188 155 171 163 155 219 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 166 129 182 165 183 175 222 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 163 165 129 162 163 158 220 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 168 215 129 182 218 134 217 222 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 137 159 132 187 130 137 217 163 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 139 135 148 148 218 191 221 171 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 216 137 191 135 167 220 152 188 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 164 162 172 218 186 156 154 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 217 171 220 161 180 188 152 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 180 154 175 148 135 190 180 180 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 167 152 190 222 171 135 172 215 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 171 167 222 140 130 152 161 191 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 151 137 133 164 164 139 159 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 173 166 189 215 161 152 184 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 140 187 187 172 170 140 167 129 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 214 171 130 130 221 130 183 143 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 214 151 133 137 163 220 214 162 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 186 166 182 221 150 168 128 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 150 183 157 170 143 151 170 153 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 187 157 190 216 160 136 130 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 143 165 151 222 222 131 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 172 190 180 214 133 184 186 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 138 131 171 152 168 190 182 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 160 139 153 220 168 141 219 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 186 148 130 156 153 129 169 160 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 161 138 135 153 220 190 152 159 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 153 219 141 156 215 169 189 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 166 156 186 188 171 131 136 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 150 222 220 163 157 156 148 141 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 152 218 153 215 175 215 215 159 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 160 188 219 214 151 131 217 182 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 167 182 138 168 221 151 152 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 185 217 153 162 130 165 130 180 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 157 151 139 143 148 180 151 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 173 215 128 189 222 191 186 214 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 164 157 182 191 191 140 222 215 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 161 154 217 217 188 150 148 220 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 215 218 137 185 131 218 189 137 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 214 191 180 218 215 132 130 188 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 189 150 216 159 130 214 157 185 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 157 143 155 152 130 153 153 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 168 130 158 180 187 130 214 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 171 158 172 164 152 191 135 139 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 183 150 152 148 154 188 151 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 159 148 138 152 182 166 161 132 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 132 135 131 160 162 219 166 187 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 180 134 186 153 182 219 173 186 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 183 139 217 217 172 157 165 157 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 160 166 139 186 190 175 221 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 188 187 164 151 218 134 187 140 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 162 143 156 221 184 182 161 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 222 182 171 169 148 129 223 152 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 168 155 170 155 191 135 143 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 166 185 173 180 215 220 187 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 214 168 215 132 182 158 168 170 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 185 158 154 128 190 157 169 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 130 222 219 140 133 219 223 156 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 218 154 186 215 151 172 170 184 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 160 180 173 140 168 131 189 218 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 148 143 175 184 140 220 133 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 152 154 139 130 189 216 219 141 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 132 132 222 173 160 190 171 171 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 152 143 221 170 155 158 163 136 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 164 170 182 162 219 128 137 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 218 128 138 150 128 221 131 128 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 169 137 150 153 143 220 173 143 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 129 183 130 159 158 219 173 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 133 162 136 129 141 221 189 140 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 180 156 164 150 188 162 222 170 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 222 184 168 168 151 161 216 167 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 164 130 136 143 154 215 151 136 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 159 219 216 172 150 223 167 173 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 159 148 171 157 175 173 154 143 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 188 131 162 157 183 167 156 140 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 219 134 128 128 136 214 170 163 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 217 170 182 182 188 132 223 155 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 161 169 218 162 128 188 134 162 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 135 185 189 133 219 161 159 222 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 151 217 171 220 184 153 191 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 159 143 168 162 187 191 155 164 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 134 220 168 219 219 170 217 222 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 189 161 161 129 135 223 220 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 157 160 135 185 161 214 187 161 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 216 136 137 220 180 222 171 134 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 218 133 162 159 175 172 141 173 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 131 221 167 157 161 182 169 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 128 221 187 162 171 153 169 139 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 220 135 155 158 163 138 148 216 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 171 130 148 167 175 169 157 172 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 190 138 139 184 219 220 138 220 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 131 129 190 168 169 216 161 170 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 162 216 217 134 183 158 169 151 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 173 187 141 221 135 182 185 163 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 220 180 143 172 162 130 217 170 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 216 132 168 152 141 143 221 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 134 222 151 153 133 158 185 138 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 137 143 183 185 148 161 171 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 160 164 136 150 155 188 182 190 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 141 150 169 171 143 216 165 217 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 140 150 216 167 132 152 184 169 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 140 165 182 133 160 140 187 129 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 180 173 136 138 129 150 159 218 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 166 162 167 167 173 189 168 129 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 136 172 180 135 133 182 215 163 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 167 128 131 140 138 152 182 191 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 189 182 156 169 128 175 186 183 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 133 128 129 164 216 214 216 182 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 152 132 132 162 159 218 171 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 168 170 165 130 219 130 173 184 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 157 175 130 220 221 129 132 173 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 172 139 163 214 158 139 188 182 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 220 148 150 185 191 183 188 158 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 219 165 157 158 141 133 180 166 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 157 150 140 131 168 187 216 155 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 191 165 218 190 132 223 136 137 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 134 137 218 184 190 171 165 216 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 129 191 189 129 157 152 167 150 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 221 183 140 154 223 182 175 220 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 184 152 129 190 191 172 148 138 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 220 188 140 170 161 183 168 131 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 189 187 137 131 154 156 151 169 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 182 150 188 154 159 168 171 148 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 189 137 148 148 159 188 129 222 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 191 214 161 163 184 158 141 171 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 164 184 184 138 141 150 159 139 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 162 141 191 131 186 161 170 136 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162', '147 153 132 216 163 135 157 156 187 177 138 130 156 222 153 177 218 138 177 128 223 177 156 221 134 158 223 141 177 222 154 158 151 156 141 177 154 219 221 140 177 221 134 186 149 162 165 162'] | [
49560,
2538,
796,
366,
55,
273,
532,
220,
20375,
25443,
123,
1,
201,
198,
35744,
12529,
62,
51,
3620,
6489,
6158,
796,
705,
7061,
201,
198,
140,
242,
16142,
22177,
12466,
118,
25443,
112,
11,
220,
141,
230,
18849,
141,
226,
21169,
3... | 3.84249 | 10,266 |
from sys import argv, exit
import torch
import torchgeometry as tgm
import cv2
import matplotlib.pyplot as plt
import torch.nn as nn
if __name__ == '__main__':
imgFile1 = argv[1]
imgFile2 = argv[2]
# rotate(imgFile)
# warp(imgFile2)
image1 = cv2.imread(imgFile1)[..., (2,1,0)]
img1 = tgm.utils.image_to_tensor(image1)
img1 = torch.unsqueeze(img1.float(), dim=0)
image2 = cv2.imread(imgFile2)[..., (2,1,0)]
img2 = tgm.utils.image_to_tensor(image2)
img2 = torch.unsqueeze(img2.float(), dim=0)
alignment = Align()
im1, im2 = alignment(img1, img2)
print(im1.shape, im2.shape, type(im1), im1.requires_grad) | [
6738,
25064,
1330,
1822,
85,
11,
8420,
198,
11748,
28034,
198,
11748,
28034,
469,
15748,
355,
256,
39870,
198,
11748,
269,
85,
17,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
13,
20471,
355,
299,
7... | 2.259928 | 277 |
import tensorflow as tf
import numpy as np
from models import build_model as build_model
from datagen import genxy
from utils import genanchors, nsm, comiou
train_anno_file_path = '../datasets/widerface/train1024.txt'
train_image_dir = '../datasets/widerface/train1024'
test_anno_file_path = '../datasets/widerface/test1024.txt'
test_image_dir = '../datasets/widerface/test1024'
output_path = 'output'
ishape = [256, 256, 3]
ssizes = [
[64, 64],
[32, 32],
[16, 16],
]
asizes = [
[[32, 32]],
[[64, 64]],
[[128, 128]],
]
total_classes = 1
resnet_settings = [[16, 16, 64], [4, [2, 2]], [8, [2, 2]], [16, [2, 2]]]
top_down_pyramid_size = 64
total_epoches = 1000
iou_thresholds = [0.3, 0.5]
nsm_iou_threshold = 0.5
nsm_score_threshold = 0.8
nsm_max_output_size = 10
anchor_sampling = 164
total_train_examples = 1
total_test_examples = 1
a1box2d = genanchors(isize=ishape[:2], ssize=ssizes[0], asizes=asizes[0]) # (h1 * w1 * k1, 4)
a2box2d = genanchors(isize=ishape[:2], ssize=ssizes[1], asizes=asizes[1]) # (h2 * w2 * k2, 4)
a3box2d = genanchors(isize=ishape[:2], ssize=ssizes[2], asizes=asizes[2]) # (h3 * w3 * k3, 4)
abox2d = np.concatenate([a1box2d, a2box2d, a3box2d], axis=0) # (h1*w1*k1 + h2*w2*k2 + h3*w3*k3, 4)
abox_2dtensor = tf.constant(value=abox2d, dtype='float32') # (h1*w1*k1 + h2*w2*k2 + h3*w3*k3, 4)
model = build_model(
ishape=ishape,
resnet_settings=resnet_settings,
top_down_pyramid_size=top_down_pyramid_size,
k=[len(asizes[0]), len(asizes[1]), len(asizes[2])],
total_classes=total_classes)
# model.summary()
# model.load_weights('{}/_weights.h5'.format(output_path), by_name=True)
min_loss = 2**32
max_precision = 0
max_recall = 0
for epoch in range(total_epoches):
gen = genxy(
anno_file_path=train_anno_file_path,
image_dir=train_image_dir,
ishape=ishape,
abox_2dtensor=abox_2dtensor,
iou_thresholds=iou_thresholds,
total_classes=total_classes,
anchor_sampling=anchor_sampling,
mode='train')
print('\nTrain epoch {}'.format(epoch))
loss = np.zeros(total_train_examples)
for batch in range(total_train_examples):
batchx_4dtensor, batchy_2dtensor, _, _ = next(gen)
batch_loss = model.train_on_batch(batchx_4dtensor, batchy_2dtensor)
loss[batch] = batch_loss
if batch%10==9:
print('-', end='')
if batch%1000==999:
print('{:.3f}%'.format((batch+1)*100/total_train_examples), end='\n')
mean_loss = float(np.mean(loss, axis=-1))
print('\nLoss: {:.3f}'.format(mean_loss))
model.save_weights('{}/_weights.h5'.format(output_path))
print('\nValidate')
gen = genxy(
anno_file_path=test_anno_file_path,
image_dir=test_image_dir,
ishape=ishape,
abox_2dtensor=abox_2dtensor,
iou_thresholds=iou_thresholds,
total_classes=total_classes,
anchor_sampling=anchor_sampling,
mode='test')
loss = np.zeros(total_test_examples)
precision = np.zeros(total_test_examples)
recall = np.zeros(total_test_examples)
total_faces = 0
total_pred_faces = 0
TP = 0
FP = 0
FN = 0
for batch in range(total_test_examples):
batchx_4dtensor, batchy_2dtensor, bboxes, _ = next(gen)
batch_loss = model.test_on_batch(batchx_4dtensor, batchy_2dtensor)
loss[batch] = batch_loss
prediction = model.predict_on_batch(batchx_4dtensor) # (h1*w1*k1 + h2*w2*k2 + h3*w3*k3, total_classes+1+4)
boxclz_2dtensor, valid_outputs = nsm(
abox_2dtensor=abox_2dtensor,
prediction=prediction,
nsm_iou_threshold=nsm_iou_threshold,
nsm_score_threshold=nsm_score_threshold,
nsm_max_output_size=nsm_max_output_size,
total_classes=total_classes)
boxclz_2dtensor = boxclz_2dtensor[:valid_outputs]
pred_bboxes = list(boxclz_2dtensor.numpy())
total_bboxes = len(bboxes)
total_pred_bboxes = len(pred_bboxes)
true_positives = 0
for i in range(total_bboxes):
for j in range(total_pred_bboxes):
iou = comiou(bbox=bboxes[i], pred_bbox=pred_bboxes[j])
if iou >= iou_thresholds[1]:
true_positives += 1
break
false_negatives = total_bboxes - true_positives
false_positives = total_pred_bboxes - true_positives
precision[batch] = true_positives / (true_positives + false_positives + 0.00001)
recall[batch] = true_positives / (true_positives + false_negatives + 0.00001)
total_faces += total_bboxes
total_pred_faces += total_pred_bboxes
TP += true_positives
FP += false_positives
FN += false_negatives
if batch%10==9:
print('-', end='')
if batch%1000==999:
print('{:.2f}%'.format((batch+1)*100/total_test_examples), end='\n')
mean_loss = float(np.mean(loss, axis=-1))
mean_precision = float(np.mean(precision, axis=-1))
mean_recall = float(np.mean(recall, axis=-1))
if mean_loss < min_loss:
min_loss = mean_loss
model.save_weights('{}/weights.h5'.format(output_path))
if mean_precision > max_precision:
max_precision = mean_precision
model.save_weights('{}/weights_best_precision.h5'.format(output_path))
if mean_recall > max_recall:
max_recall = mean_recall
model.save_weights('{}/weights_best_recall.h5'.format(output_path))
print('\nLoss: {:.3f}/{:.3f}, Precision: {:.3f}/{:.3f}, Recall: {:.3f}/{:.3f}, Total bboxes: {}, Total predicted bboxes: {}, TP: {}, FP: {}, FN: {}'.format(mean_loss, min_loss, mean_precision, max_precision, mean_recall, max_recall, total_faces, total_pred_faces, TP, FP, FN))
| [
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4981,
1330,
1382,
62,
19849,
355,
1382,
62,
19849,
198,
6738,
4818,
11286,
1330,
2429,
5431,
198,
6738,
3384,
4487,
1330,
2429,
3702,
669,
11,
299,
579... | 2.242257 | 2,357 |
from itassets.test_api import ApiTestCase, random_dbca_email
from mixer.backend.django import mixer
from tracking.models import FreshdeskTicket, FreshdeskContact
| [
6738,
340,
19668,
13,
9288,
62,
15042,
1330,
5949,
72,
14402,
20448,
11,
4738,
62,
9945,
6888,
62,
12888,
198,
6738,
33938,
13,
1891,
437,
13,
28241,
14208,
1330,
33938,
198,
198,
6738,
9646,
13,
27530,
1330,
20138,
8906,
74,
51,
9715... | 3.416667 | 48 |
import numpy as np
from math_study.numpy_basics.data_types.print_data_type_info import print_info
if __name__ == '__main__':
int_array = np.array(list(range(10)), dtype=np.int64)
float_array = np.array(list(range(10)), dtype=np.float128)
complex_array = np.array(list(range(10)), dtype=np.complex256)
int_to_float_array = int_array.astype(np.float128)
int_to_complex_array = int_array.astype(np.complex256)
float_to_int_array = int_array.astype(np.int64)
float_to_complex_array = int_array.astype(np.complex256)
complex_to_int_array = int_array.astype(np.int64)
complex_to_float_array = int_array.astype(np.float128)
_arrays = [
int_to_float_array, int_to_complex_array,
float_to_int_array, float_to_complex_array,
complex_to_int_array, complex_to_float_array
]
for _array in _arrays:
print_info(_array)
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
10688,
62,
44517,
13,
77,
32152,
62,
12093,
873,
13,
7890,
62,
19199,
13,
4798,
62,
7890,
62,
4906,
62,
10951,
1330,
3601,
62,
10951,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12... | 2.345646 | 379 |