content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# # USAGE
# # Start the server:
# # python run_keras_server.py
# # Submit a request via cURL:
# # curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# # Submita a request via Python:
# # python simple_request.py
#
# # import the necessary packages
# from flask_cors import CORS, cross_origin
# from keras.applications import ResNet50
# from keras.applications import InceptionV3
# from keras.preprocessing.image import img_to_array
# from keras.applications import imagenet_utils
# from PIL import Image
# from constants import MODELS
# import numpy as np
# import flask
# import io
# import json
#
# # initialize our Flask application and the Keras model
# app = flask.Flask(__name__)
# cors = CORS(app)
#
# resnetModel = ResNet50(weights="imagenet")
# inceptionV3Model = InceptionV3(weights="imagenet")
#
# # def load_model():
# # load the pre-trained Keras model (here we are using a model
# # pre-trained on ImageNet and provided by Keras, but you can
# # substitute in your own networks just as easily)
# # global model
# # model = ResNet50(weights="imagenet")
#
# def prepare_image(image, target):
# # if the image mode is not RGB, convert it
# if image.mode != "RGB":
# image = image.convert("RGB")
#
# # resize the input image and preprocess it
# image = image.resize(target)
# image = img_to_array(image)
# image = np.expand_dims(image, axis=0)
# image = imagenet_utils.preprocess_input(image)
#
# # return the processed image
# return image
#
#
# def build_html_with_layer(layer):
# layer_class = layer['class_name']
# layer_config = layer['config']
# html = ""
#
# if layer_class == 'InputLayer':
# html = "input shape " + str(layer_config['batch_input_shape']) + "<br>"
# elif layer_class == 'ZeroPadding2D':
# html = "padding " + str(layer_config['padding']) + "<br>"
# elif layer_class == 'Conv2D':
# html = "filters " + str(layer_config['filters']) + "<br>" \
# "kernel size " + str(layer_config['kernel_size']) + "<br>" \
# "strides " + str(
# layer_config['strides']) + "<br>"
# elif layer_class == 'BatchNormalization':
# html = ""
# elif layer_class == 'Activation':
# html = "activation func</b> " + str(layer_config['activation'])
# elif layer_class == 'MaxPooling2D':
# html = "pool size " + str(layer_config['pool_size']) + "<br>" \
# "strides " + str(layer_config['strides']) + "<br>"
#
# return html
#
#
# def create_model_graph(layers):
# data = []
# tooltip = {}
# links = []
# for idx in range(1, len(layers)):
# links.append({
# "source": idx - 1,
# "target": idx
# })
#
# for idx, layer in enumerate(layers):
# flag = False
# prior_node = ""
#
# inbound_nodes = layer["inbound_nodes"]
#
# if len(inbound_nodes) != 0:
# for inbound_node in inbound_nodes[0]:
# if inbound_node[0] != data[len(data)-1]["name"]:
# flag = True
# prior_node = inbound_node[0]
# break
# else:
# break
#
# if flag is True:
# for d in data:
# if d["name"] == prior_node:
# data.append({
# "name": layer['name'],
# "x": d["x"] + 1200,
# "y": d["y"],
# "value": layer['class_name']
# })
# else:
# data.append({
# "name": layer['name'],
# "x": 500,
# "y": idx * 200,
# "value": layer['class_name']
# })
#
# tooltip[layer['name']] = build_html_with_layer(layer)
#
#
#
# model_graph = {
# "graph": {
# "data": data,
# "links": links
# },
# "tooltip": tooltip
# }
#
# return model_graph
#
#
# @app.route("/predict", methods=["POST"])
# def predict():
# # initialize the data dictionary that will be returned from the
# # view
# data = {"success": False}
#
# # ensure an image was properly uploaded to our endpoint
# if flask.request.method == "POST":
# if flask.request.files.get("image"):
# # read the image in PIL format
# image = flask.request.files["image"].read()
# image = Image.open(io.BytesIO(image))
#
# # preprocess the image and prepare it for classification
# image = prepare_image(image, target=(224, 224))
#
# # classify the input image and then initialize the list
# # of predictions to return to the client
# preds = resnetModel.predict(image)
# results = imagenet_utils.decode_predictions(preds)
# data["predictions"] = []
#
# # loop over the results and add them to the list of
# # returned predictions
# for (imagenetID, label, prob) in results[0]:
# r = {"label": label, "probability": float(prob)}
# data["predictions"].append(r)
#
# # indicate that the request was a success
# data["success"] = True
#
# # return the data dictionary as a JSON response
# return flask.jsonify(data)
#
#
# @app.route("/layers/<int:model_id>", methods=["GET"])
# @cross_origin()
# def layers(model_id):
#
# if model_id == MODELS['ResNet50']:
# jmodel = json.loads(resnetModel.to_json())
# elif model_id == MODELS['InceptionV3']:
# jmodel = json.loads(inceptionV3Model.to_json())
# else:
# return ('',204) # No Content
#
# layers = jmodel["config"]["layers"]
#
# # print(json.dumps(layers, indent=2, sort_keys=True))
#
# model_graph = create_model_graph(layers)
# # print(json.dumps(model_graph, indent=2, sort_keys=True))
# return flask.jsonify(model_graph)
#
#
# # if this is the main thread of execution first load the model and
# # then start the server
# if __name__ == "__main__":
# print(("* Loading Keras model and Flask starting server..."
# "please wait until server has fully started"))
# app.run()
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
from flask_cors import CORS, cross_origin
from constants import MODELS
from keras.applications import ResNet50
from keras.applications import InceptionV3
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
from PIL import Image
import numpy as np
import flask
import io
import json
import requests
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
cors = CORS(app)
# PBW: 0505_18
MODEL_ID_RESNET = 'ResNet50'
MODEL_ID_INCEPTIONV3 = 'InceptionV3'
currentModel = 0 # model pointer
resnetModel = ResNet50(weights="imagenet")
inceptionV3Model = InceptionV3(weights="imagenet")
# def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
# global model
# model = ResNet50(weights="imagenet")
@app.route("/predict", methods=["POST"])
@app.route("/layers/<int:model_id>", methods=["GET"])
@cross_origin()
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
app.run() | [
2,
1303,
1294,
11879,
198,
2,
1303,
7253,
262,
4382,
25,
198,
2,
1303,
220,
197,
29412,
1057,
62,
6122,
292,
62,
15388,
13,
9078,
198,
2,
1303,
39900,
257,
2581,
2884,
269,
21886,
25,
198,
2,
1303,
220,
197,
66,
6371,
532,
55,
2... | 2.464432 | 2,924 |
#!/usr/bin/env python3
#
# Copyright 2021, by the California Institute of Technology.
# ALL RIGHTS RESERVED.
# United States Government sponsorship acknowledged.
# Any commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
# This software may be subject to U.S. export control laws and regulations.
# By accepting this document, the user agrees to comply with all applicable
# U.S. export laws and regulations. User has the responsibility to obtain
# export licenses, or other export authority as may be required, before
# exporting such information to foreign countries or providing access to
# foreign persons.
#
"""
================
test_dswx_pge.py
================
Unit tests for the pge/dswx_pge.py module.
"""
import os
import tempfile
import unittest
from io import StringIO
from os.path import abspath, join
from pkg_resources import resource_filename
import yaml
from opera.pge import DSWxExecutor, RunConfig
from opera.util import PgeLogger
class DSWxPgeTestCase(unittest.TestCase):
"""Base test class using unittest"""
starting_dir = None
working_dir = None
test_dir = None
input_file = None
@classmethod
def setUpClass(cls) -> None:
"""Set up directories and files for testing"""
cls.starting_dir = abspath(os.curdir)
cls.test_dir = resource_filename(__name__, "")
cls.data_dir = join(cls.test_dir, "data")
os.chdir(cls.test_dir)
cls.working_dir = tempfile.TemporaryDirectory(
prefix="test_dswx_pge_", suffix='_temp', dir=os.curdir
)
# Create the input dir expected by the test RunConfig and add a dummy
# input file for validation
input_dir = join(cls.working_dir.name, "dswx_pge_test/input_dir")
os.makedirs(input_dir, exist_ok=True)
cls.input_file = tempfile.NamedTemporaryFile(
dir=input_dir, prefix="test_input", suffix=".tif")
@classmethod
def tearDownClass(cls) -> None:
"""At completion re-establish starting directory"""
cls.input_file.close()
cls.working_dir.cleanup()
os.chdir(cls.starting_dir)
def setUp(self) -> None:
"""Use the temporary directory as the working directory"""
os.chdir(self.working_dir.name)
def tearDown(self) -> None:
"""Return to starting directory"""
os.chdir(self.test_dir)
def test_dswx_pge_execution(self):
"""
Test execution of the DSWxExecutor class and its associated mixins using
a test RunConfig that creates a dummy expected output file and logs a
message to be captured by PgeLogger.
"""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=runconfig_path)
# Check that basic attributes were initialized
self.assertEqual(pge.name, "DSWx")
self.assertEqual(pge.pge_name, "DSWxPgeTest")
self.assertEqual(pge.runconfig_path, runconfig_path)
# Check that other objects have not been instantiated yet
self.assertIsNone(pge.runconfig)
self.assertIsNone(pge.logger)
# Kickoff execution of DSWx PGE
pge.run()
# Check that the runconfig and logger were instantiated
self.assertIsInstance(pge.runconfig, RunConfig)
self.assertIsInstance(pge.logger, PgeLogger)
# Check that directories were created according to RunConfig
self.assertTrue(os.path.isdir(pge.runconfig.output_product_path))
self.assertTrue(os.path.isdir(pge.runconfig.scratch_path))
# Check that a in-memory log was created
stream = pge.logger.get_stream_object()
self.assertTrue(isinstance(stream, StringIO))
# Check that a RunConfig for the SAS was isolated within the scratch directory
expected_sas_config_file = join(pge.runconfig.scratch_path, 'test_dswx_hls_config_sas.yaml')
self.assertTrue(os.path.exists(expected_sas_config_file))
# Check that the log file was created and moved into the output directory
expected_log_file = join(pge.runconfig.output_product_path, pge.logger.get_file_name())
self.assertTrue(os.path.exists(expected_log_file))
# Open and read the log
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"DSWx-HLS invoked with RunConfig {expected_sas_config_file}", log_contents)
def test_dswx_pge_input_validation(self):
"""Test the input validation checks made by DSWxPreProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
input_files_group = runconfig_dict['RunConfig']['Groups']['PGE']['InputFilesGroup']
# Test that a non-existent file is detected by pre-processor
input_files_group['InputFilePaths'] = ['non_existent_file.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as input_path:
yaml.safe_dump(runconfig_dict, input_path, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
# Config validation occurs before the log is fully initialized, but the
# initial log file should still exist and contain details of the validation
# error
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
# Open the log file, and check that the validation error details were captured
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Could not locate specified input file/directory "
f"{abspath('non_existent_file.tif')}", log_contents)
# Test that an input directory with no .tif files is caught
input_files_group['InputFilePaths'] = ['dswx_pge_test/scratch_dir']
with open(test_runconfig_path, 'w', encoding='utf-8') as out_file:
yaml.safe_dump(runconfig_dict, out_file, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input directory {abspath('dswx_pge_test/scratch_dir')} "
f"does not contain any tif files", log_contents)
# Lastly, check that a file that exists but is not a tif is caught
input_files_group['InputFilePaths'] = [runconfig_path]
with open(test_runconfig_path, 'w', encoding='utf-8') as runconfig_fh:
yaml.safe_dump(runconfig_dict, runconfig_fh, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Input file {abspath(runconfig_path)} does not have "
f".tif extension", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
def test_dswx_pge_output_validation(self):
"""Test the output validation checks made by DSWxPostProcessorMixin."""
runconfig_path = join(self.data_dir, 'test_dswx_hls_config.yaml')
test_runconfig_path = join(self.data_dir, 'invalid_dswx_runconfig.yaml')
with open(runconfig_path, 'r', encoding='utf-8') as stream:
runconfig_dict = yaml.safe_load(stream)
product_path_group = runconfig_dict['RunConfig']['Groups']['PGE']['ProductPathGroup']
primary_executable_group = runconfig_dict['RunConfig']['Groups']['PGE']['PrimaryExecutable']
# Test with a SAS command that does not produce any output file,
# post-processor should detect that expected output is missing
product_path_group['SASOutputFile'] = 'missing_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'echo'
primary_executable_group['ProgramOptions'] = ['hello world']
with open(test_runconfig_path, 'w', encoding='utf-8') as config_fh:
yaml.safe_dump(runconfig_dict, config_fh, sort_keys=False)
try:
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/missing_dswx_hls.tif'
self.assertFalse(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"Expected SAS output file {abspath(expected_output_file)} "
f"does not exist", log_contents)
# Test with a SAS command that produces the expected output file, but
# one that is empty (size 0 bytes). Post-processor should detect this
# and flag an error
product_path_group['SASOutputFile'] = 'empty_dswx_hls.tif'
primary_executable_group['ProgramPath'] = 'touch'
primary_executable_group['ProgramOptions'] = ['dswx_pge_test/output_dir/empty_dswx_hls.tif']
with open(test_runconfig_path, 'w', encoding='utf-8') as outfile:
yaml.safe_dump(runconfig_dict, outfile, sort_keys=False)
pge = DSWxExecutor(pge_name="DSWxPgeTest", runconfig_path=test_runconfig_path)
with self.assertRaises(RuntimeError):
pge.run()
expected_output_file = 'dswx_pge_test/output_dir/empty_dswx_hls.tif'
self.assertTrue(os.path.exists(expected_output_file))
expected_log_file = pge.logger.get_file_name()
self.assertTrue(os.path.exists(expected_log_file))
with open(expected_log_file, 'r', encoding='utf-8') as infile:
log_contents = infile.read()
self.assertIn(f"SAS output file {abspath(expected_output_file)} was "
f"created but is empty", log_contents)
finally:
if os.path.exists(test_runconfig_path):
os.unlink(test_runconfig_path)
if __name__ == "__main__":
unittest.main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
15069,
33448,
11,
416,
262,
3442,
5136,
286,
8987,
13,
198,
2,
11096,
371,
34874,
15731,
1137,
53,
1961,
13,
198,
2,
1578,
1829,
5070,
27418,
10810,
13,
198,
2,
4377... | 2.334706 | 4,855 |
import sys
from typing import List, Callable, Protocol
import pandas as pd
import streamlit as st
import seaborn as sns
import sympy as sp
from sympy.abc import x as x_symbol
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial import Polynomial
sys.path.append('')
sys.path.append('../../..')
from src.common.qf.complex_gauss import calc_complex_gauss
from src.common.utils import integrate, err, rel_err
from src.common.qf.qf_utils import calc_qf, plot_qf
from src.common.polynomial.legendre import legendres
from src.common.qf.gauss import gauss_qf
from src.common.segment import Segment
from src.common.streamlit import function_input, segment_input
st.write("# 6.1 Применение составной КФ Гаусса")
with st.form('main'):
f_expr, f_lambda = function_input("sqrt(1-x)*sin(x)")
a, b = segment_input(default_a=0.0, default_b=1.0)
st.form_submit_button()
N = st.sidebar.number_input(min_value=1, value=2 , label='N: кол-во узлов')
m = st.sidebar.number_input(min_value=1, label='m: кол-во разбиений')
domain = Segment(a, b)
partitions = domain.split(m)
st.write(f_expr)
J = integrate(f_expr, domain)
J_approx = calc_complex_gauss(N, partitions, f_lambda)
with st.expander(label='Более подробно'):
nodes, coefs = gauss_qf(N)
gauss_df = pd.DataFrame({"Узлы": nodes, "Коэф.": coefs})
st.dataframe(gauss_df)
st.latex(f"J = {J}")
st.latex(f"J_{{approx}} = {J_approx}")
st.latex(f"Error = {err(J, J_approx)}")
st.latex(f"Rel.Error = {rel_err(J, J_approx)}\%")
| [
11748,
25064,
198,
6738,
19720,
1330,
7343,
11,
4889,
540,
11,
20497,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
4269,
18250,
355,
336,
198,
11748,
384,
397,
1211,
355,
3013,
82,
198,
11748,
10558,
88,
355,
599,
198,
6738,... | 2.233728 | 676 |
default_values = {
"ranksep": "0.4",
"nodesep": "0.4",
"font": "arial",
"bold_font": "arial bold",
"fontcolor": "grey15",
"edgecolor": "grey15",
"clear": "white"
}
digraph_template = """
digraph cfn_template {{
graph [
rankdir="TB";
fontname="{font}";
]
concentrate=true;
ratio=compress;
ranksep="{ranksep}";
nodesep="{nodesep}";
node [shape=box,fontname="{font}",fontcolor="{fontcolor}"];
edge [arrowhead="vee",color="{edgecolor}"];
subgraph everything_but_resources {{
color="{clear}";
{outputs_cluster}
{conditions_cluster}
{mappings_cluster}
{parameters_cluster}
{non_resource_edges} [style=invis]
}}
{resources_cluster}
}}
"""
subgraph_template = """
subgraph cluster_{label} {{
label="{label}";
fontsize="36";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=record]
{element_type} [
label="{{{elements}}}";
]
}}
"""
resource_subgraph_template = """
subgraph cluster_resources {{
label="Resources";
fontsize="36";
fontname="{font}";
fontcolor="grey35";
color="{clear}";
node [fontsize=14, shape=none]
{resource_nodes}
{resource_edges}
}}
"""
resource_type_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" COLSPAN="2"><FONT POINT-SIZE="10">{type_path}</FONT><BR/>{type}</TD>
</TR>
'''
resource_name_row_view = '''
<TR>
<TD BORDER="0" CELLPADDING="10" COLSPAN="2"><FONT POINT-SIZE="28" FACE="{bold_font}" COLOR="{clear}">{name}</FONT></TD>
</TR>
'''
resource_attribute_row_view = '''
<TR>
<TD BORDER="0" BGCOLOR="{clear}" VALIGN="top" ALIGN="RIGHT">{name}:</TD>
<TD BORDER="0" BGCOLOR="{clear}" ALIGN="LEFT">{values}</TD>
</TR>
'''
resource_node_view = '''
{name} [
label=<
<TABLE BORDER="1" BGCOLOR="grey65" COLOR="grey15" CELLPADDING="3" CELLSPACING="0">
{type_row}
{name_row}
{attribute_rows}
</TABLE>
>
]
'''
| [
198,
12286,
62,
27160,
796,
1391,
198,
220,
220,
220,
366,
43027,
325,
79,
1298,
366,
15,
13,
19,
1600,
198,
220,
220,
220,
366,
77,
4147,
538,
1298,
366,
15,
13,
19,
1600,
198,
220,
220,
220,
366,
10331,
1298,
366,
36098,
1600,
... | 2.061523 | 1,024 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Niccolò Bonacchi
# @Date: Thursday, January 31st 2019, 4:12:19 pm
from iblrig.poop_count import poop
if __name__ == "__main__":
poop()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
8377,
4033,
127,
110,
7979,
330,
11072,
198,
2,
2488,
10430,
25,
3635,
11,
3269,
3261,
301,
13130,
1... | 2.402439 | 82 |
import numpy as np
import pandas as pd
if __name__ == "__main__":
d = pd.read_csv('your.txt')
dt = DecisionTree(d, 'ID3')
print(dt.tree)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
288,
796,
279,
67,
13,
961,
62,
40664,
10786,
14108,
13,
14116,
11537,
... | 2.202899 | 69 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import pickle
from whiten_norm import Whitening1d, whiten_tensor_svd
from decorrelated_batch_norm import DBN
# from norm_tests import get_corrcoef
file = "/Users/tiany/Downloads/input.pkl"
with open(file, 'rb') as f:
data = pickle.load(f)
# wn = Whitening1d(data.shape[1], eps=0)
dbn = DBN(data.shape[1], eps=0, num_channels=1, dim=2, affine=False)
# x =
# data = torch.rand_like(torch.from_numpy(data)).numpy()
# data = torch.rand((64, 512)).numpy()
# print(np.abs(np.corrcoef(data, rowvar=False)).mean())
print(get_corrcoef(data))
# y = whiten_tensor_svd(torch.from_numpy(data)).numpy()
# y = wn(torch.from_numpy(data)).numpy()
breakpoint()
y = dbn(torch.from_numpy(data)).numpy()
# print(np.abs(np.corrcoef(y, rowvar=False)).mean())
print(get_corrcoef(y))
breakpoint()
x = np.array([
[-1, 0],
[1, 0]
])
print(get_corrcoef(x))
print(np.cov(x, rowvar=False))
# class DecorBatchNorm1d(nn.Module):
# def __init__(self, num_features, num_groups=32, num_channels=0, ndim=2, eps=1e-5, momentum=0.1, gamma=True, beta=True):
# super(DecorBatchNorm1d, self).__init__()
# if num_channels > 0:
# num_groups = num_features // num_channels
# self.num_features = num_features
# self.num_groups = num_groups
# assert self.num_features % self.num_groups == 0
# self.dim = dim
# self.eps = eps
# self.mmomentum = momentum
# # self.affine = affine
# self.gamma = gamma
# self.beta = beta
# self.mode = mode
# self.ndim = ndim
# # if self.affine:
# # self.weight = nn.Parameter(torch.Tensor(self.num_features))
# # self.bias = nn.Parameter(torch.Tensor(self.num_features))
# self.register_parameter('weight', nn.Parameter(torch.ones(num_features)) if gamma else None)
# self.register_parameter('bias', nn.Parameter(torch.zeros(num_features)) if beta else None)
# self.register_buffer('running_mean', torch.zeros(num_features))
# self.register_buffer('running_projection', torch.eye(num_features))
# self.reset_parameter()
# def reset_parameter(self):
# if self.gamma: nn.init.ones_(self.weight)
# if self.beta: nn.init.zeros_(self.bias)
# def forward(self, x):
# if self.training:
# mean = x.mean(dim=1, keepdim=True)
# self.running_mean = (1-self.momentum) * self.running_mean + self.mmomentum * mean
# x = x - mean
# cov = x.matmut(x.t()) / x.size(1) + self.eps * torch.eye()
# u, eig, _ = cov.cpu().svd()
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
6738,
20542,
268,
62,
27237,
1330,
13183,
3101,
16,
67,
11,
2... | 2.114486 | 1,284 |
import webrtcvad, os, wave, contextlib, collections, argparse
if __name__=='__main__':
parser = argparse.ArgumentParser(description='training script')
parser.add_argument('--in_dir', type=str, help='type dataset for trimming')
parser.add_argument('--out_dir', type=str, help='type dataset for trimming')
args = parser.parse_args()
if not args.in_dir or not args.out_dir:
parser.error('--in_dir and --out_dir should be given')
in_dir = args.in_dir
out_dir = args.out_dir
# ------ trimming scilence using VAD
os.makedirs(out_dir, exist_ok=True)
trim(in_dir, out_dir)
| [
11748,
356,
1671,
23047,
85,
324,
11,
28686,
11,
6769,
11,
4732,
8019,
11,
17268,
11,
1822,
29572,
628,
628,
198,
361,
11593,
3672,
834,
855,
6,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
4... | 2.584362 | 243 |
#! /usr/bin/env python
"""SetAdmin launcher.
"""
import sys
import pkgloader
pkgloader.require('skytools', '3.0')
import pgq.cascade.admin
if __name__ == '__main__':
script = pgq.cascade.admin.CascadeAdmin('cascade_admin', 'node_db', sys.argv[1:], worker_setup = False)
script.start()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
7248,
46787,
24008,
13,
198,
37811,
198,
198,
11748,
25064,
198,
198,
11748,
279,
10025,
29356,
198,
35339,
29356,
13,
46115,
10786,
15688,
31391,
3256,
705,
18,
13,
15,
... | 2.6 | 115 |
from rest_framework import authentication
from rest_framework import exceptions
from dataloaderinterface.models import SiteRegistration
| [
6738,
1334,
62,
30604,
1330,
18239,
198,
6738,
1334,
62,
30604,
1330,
13269,
198,
198,
6738,
4818,
282,
1170,
263,
39994,
13,
27530,
1330,
14413,
47133,
628
] | 5.111111 | 27 |
import logging
from pprint import pformat
from uuid import uuid4
from rdflib import Graph, parser, plugin, serializer
from rdflib.resource import Resource
from rdflib.namespace import RDF
from lakesuperior.model.ldpr import Ldpr
from lakesuperior.model.ldp_nr import LdpNr
from lakesuperior.model.ldp_rs import LdpRs, Ldpc, LdpDc, LdpIc
from lakesuperior.config_parser import config
from lakesuperior.env import env
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
from lakesuperior.exceptions import (
IncompatibleLdpTypeError, InvalidResourceError, ResourceExistsError,
ResourceNotExistsError, TombstoneError)
LDP_NR_TYPE = nsc['ldp'].NonRDFSource
LDP_RS_TYPE = nsc['ldp'].RDFSource
rdfly = env.app_globals.rdfly
logger = logging.getLogger(__name__)
class LdpFactory:
'''
Generate LDP instances.
The instance classes are based on provided client data or on stored data.
'''
@staticmethod
@staticmethod
def from_stored(uid, repr_opts={}, **kwargs):
'''
Create an instance for retrieval purposes.
This factory method creates and returns an instance of an LDPR subclass
based on information that needs to be queried from the underlying
graph store.
N.B. The resource must exist.
@param uid UID of the instance.
'''
#logger.info('Retrieving stored resource: {}'.format(uid))
imr_urn = nsc['fcres'][uid]
rsrc_meta = rdfly.get_metadata(uid)
#logger.debug('Extracted metadata: {}'.format(
# pformat(set(rsrc_meta.graph))))
rdf_types = set(rsrc_meta.graph[imr_urn : RDF.type])
if LDP_NR_TYPE in rdf_types:
logger.info('Resource is a LDP-NR.')
rsrc = LdpNr(uid, repr_opts, **kwargs)
elif LDP_RS_TYPE in rdf_types:
logger.info('Resource is a LDP-RS.')
rsrc = LdpRs(uid, repr_opts, **kwargs)
else:
raise ResourceNotExistsError(uid)
# Sneak in the already extracted metadata to save a query.
rsrc._metadata = rsrc_meta
return rsrc
@staticmethod
def from_provided(uid, mimetype, stream=None, **kwargs):
'''
Determine LDP type from request content.
@param uid (string) UID of the resource to be created or updated.
@param mimetype (string) The provided content MIME type.
@param stream (IOStream | None) The provided data stream. This can be
RDF or non-RDF content, or None. In the latter case, an empty container
is created.
'''
uri = nsc['fcres'][uid]
if not stream:
# Create empty LDPC.
logger.info('No data received in request. '
'Creating empty container.')
inst = Ldpc(uid, provided_imr=Resource(Graph(), uri), **kwargs)
elif __class__.is_rdf_parsable(mimetype):
# Create container and populate it with provided RDF data.
input_rdf = stream.read()
gr = Graph().parse(data=input_rdf, format=mimetype, publicID=uri)
#logger.debug('Provided graph: {}'.format(
# pformat(set(provided_gr))))
provided_imr = Resource(gr, uri)
# Determine whether it is a basic, direct or indirect container.
if Ldpr.MBR_RSRC_URI in gr.predicates() and \
Ldpr.MBR_REL_URI in gr.predicates():
if Ldpr.INS_CNT_REL_URI in gr.predicates():
cls = LdpIc
else:
cls = LdpDc
else:
cls = Ldpc
inst = cls(uid, provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-RS with an LDP-NR.
if inst.is_stored and LDP_NR_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
if kwargs.get('handling', 'strict') != 'none':
inst._check_mgd_terms(inst.provided_imr.graph)
else:
# Create a LDP-NR and equip it with the binary file provided.
provided_imr = Resource(Graph(), uri)
inst = LdpNr(uid, stream=stream, mimetype=mimetype,
provided_imr=provided_imr, **kwargs)
# Make sure we are not updating an LDP-NR with an LDP-RS.
if inst.is_stored and LDP_RS_TYPE in inst.ldp_types:
raise IncompatibleLdpTypeError(uid, mimetype)
logger.info('Creating resource of type: {}'.format(
inst.__class__.__name__))
try:
types = inst.types
except (TombstoneError, ResourceNotExistsError):
types = set()
return inst
@staticmethod
def is_rdf_parsable(mimetype):
'''
Checks whether a MIME type support RDF parsing by a RDFLib plugin.
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, parser.Parser)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def is_rdf_serializable(mimetype):
'''
Checks whether a MIME type support RDF serialization by a RDFLib plugin
@param mimetype (string) MIME type to check.
'''
try:
plugin.get(mimetype, serializer.Serializer)
except plugin.PluginException:
return False
else:
return True
@staticmethod
def mint_uid(parent_uid, path=None):
'''
Mint a new resource UID based on client directives.
This method takes a parent ID and a tentative path and returns an LDP
resource UID.
This may raise an exception resulting in a 404 if the parent is not
found or a 409 if the parent is not a valid container.
@param parent_uid (string) UID of the parent resource. It must be an
existing LDPC.
@param path (string) path to the resource, relative to the parent.
@return string The confirmed resource UID. This may be different from
what has been indicated.
'''
if path and path.startswith('/'):
raise ValueError('Slug cannot start with a slash.')
# Shortcut!
if not path and parent_uid == '/':
return '/' + split_if_legacy(str(uuid4()))
if not parent_uid.startswith('/'):
raise ValueError('Invalid parent UID: {}'.format(parent_uid))
parent = LdpFactory.from_stored(parent_uid)
if nsc['ldp'].Container not in parent.types:
raise InvalidResourceError(parent_uid,
'Parent {} is not a container.')
pfx = parent_uid.rstrip('/') + '/'
if path:
cnd_uid = pfx + path
if not rdfly.ask_rsrc_exists(cnd_uid):
return cnd_uid
return pfx + split_if_legacy(str(uuid4()))
| [
11748,
18931,
198,
198,
6738,
279,
4798,
1330,
279,
18982,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
198,
6738,
374,
67,
2704,
571,
1330,
29681,
11,
30751,
11,
13877,
11,
11389,
7509,
198,
6738,
374,
67,
2704,
571,
13,
31092,
... | 2.223074 | 3,129 |
__version__ = '2.0.2.1cb'
| [
834,
9641,
834,
796,
705,
17,
13,
15,
13,
17,
13,
16,
21101,
6,
198
] | 1.733333 | 15 |
ID = input("Enter your 11 digit account number:") # 04230647978
encry = "*" * 5
start_ID = ID[0:4]
end_ID = ID[9:]
final_ID = start_ID + encry + end_ID
print(final_ID)
| [
198,
2389,
796,
5128,
7203,
17469,
534,
1367,
16839,
1848,
1271,
25,
4943,
220,
1303,
657,
3682,
1270,
2414,
3720,
3695,
198,
198,
12685,
563,
796,
366,
9,
1,
1635,
642,
198,
9688,
62,
2389,
796,
4522,
58,
15,
25,
19,
60,
198,
437... | 2.310811 | 74 |
# to count the number of positive and negative numbers
invoke_function()
| [
2,
284,
954,
262,
1271,
286,
3967,
290,
4633,
3146,
628,
198,
198,
37669,
62,
8818,
3419,
198
] | 4.222222 | 18 |
import torch
import itertools
from pytorch_lightning.callbacks import ModelCheckpoint
from data_preparation import token_padding, characters_padding
| [
11748,
28034,
198,
11748,
340,
861,
10141,
198,
6738,
12972,
13165,
354,
62,
2971,
768,
13,
13345,
10146,
1330,
9104,
9787,
4122,
198,
198,
6738,
1366,
62,
3866,
1845,
341,
1330,
11241,
62,
39231,
11,
3435,
62,
39231,
628,
198
] | 3.8 | 40 |
# Generated by Django 2.2.10 on 2020-04-08 11:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
940,
319,
12131,
12,
3023,
12,
2919,
1367,
25,
1507,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
1... | 3.065574 | 61 |
from sys import stdin
### Fasta Reading Functions ###############
#################################################################################################################
#################################################################################################################
| [
6738,
25064,
1330,
14367,
259,
198,
198,
21017,
12549,
64,
11725,
40480,
220,
220,
1303,
7804,
4242,
2235,
628,
198,
198,
29113,
29113,
29113,
14468,
2,
628,
198,
198,
29113,
29113,
29113,
14468,
2,
198
] | 8.657143 | 35 |
import nltk
from nltk import data
data.path.append(r'D:\NL2SQL\nltk_data')
#from nltk.book import *
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle') | [
11748,
299,
2528,
74,
198,
6738,
299,
2528,
74,
1330,
1366,
198,
7890,
13,
6978,
13,
33295,
7,
81,
6,
35,
7479,
32572,
17,
17861,
59,
77,
2528,
74,
62,
7890,
11537,
198,
198,
2,
6738,
299,
2528,
74,
13,
2070,
1330,
1635,
628,
19... | 2.447761 | 67 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.service import ServiceError
from tests.pytest_plugins.gcc import with_system_gcc, without_system_gcc
from tests.test_main import main
@with_system_gcc
@without_system_gcc
if __name__ == "__main__":
main()
| [
2,
15069,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 3.26506 | 166 |
import DataTranslate
if __name__ == "__main__":
main()
| [
11748,
6060,
8291,
17660,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
201,
198,
220,
220,
220,
1388,
3419,
201,
198
] | 1.948718 | 39 |
#!/usr/bin/env python3
import logging
import sys
import cv2
import time
import os.path
from PIL import Image
import imageUtils
import videoUtils
import numpy
import requests
from mastodon import Mastodon
import config_mastodon as cf
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
dirname = os.path.dirname(__file__)
mastodon = Mastodon(
client_id = cf.credentials['consumer_key'],
client_secret = cf.credentials['consumer_secret'],
access_token = cf.credentials['access_token'],
api_base_url = cf.credentials['base_url']
)
mastodon.log_in(
username = cf.credentials['login'],
password = cf.credentials['password'],
scopes = ['read', 'write']
)
if not os.path.isfile(os.path.join(dirname, 'sinceId_mastodon.txt')):
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write('1')
while True:
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'r') as readFile:
sinceId = readFile.read()
sinceId = int(sinceId)
sinceId = checkMentions(mastodon, ['light', 'sparkles'], sinceId)
with open(os.path.join(dirname, 'sinceId_mastodon.txt'), 'w') as saveFile:
saveFile.write(str(sinceId))
logger.info('Waiting...')
time.sleep(120) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
18931,
198,
11748,
25064,
198,
11748,
269,
85,
17,
198,
11748,
640,
198,
11748,
28686,
13,
6978,
198,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
2939,
18274,
4487,
1... | 2.57085 | 494 |
import numpy as np
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Dense, Input, Lambda
from keras.optimizers import Adam
from sklearn.metrics import roc_auc_score
from utility.triplet import l2Norm, euclidean_distance, triplet_loss, accuracy
from utility.model import predict
| [
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
30203,
355,
509,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
11,
2746,
62,
6738,
62,
17752,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
23412,
11,
21114,
6814... | 3.37 | 100 |
# -*- coding: utf-8 -*-
"""
Organization Registry - Controllers
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
item = None
if settings.has_module("cms"):
table = s3db.cms_post
_item = db(table.module == module).select(table.id,
table.body,
limitby=(0, 1)).first()
if _item:
if s3_has_role(ADMIN):
item = DIV(XML(_item.body),
BR(),
A(T("Edit"),
_href=URL(c="cms", f="post",
args=[_item.id, "update"],
vars={"module":module}),
_class="action-btn"))
else:
item = XML(_item.body)
elif s3_has_role(ADMIN):
item = DIV(H2(module_name),
A(T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module":module}),
_class="action-btn"))
if not item:
#item = H2(module_name)
# Just redirect to the Facilities Map
redirect(URL(f="facility", args=["map"]))
# tbc
report = ""
response.view = "index.html"
return dict(item=item, report=report)
# -----------------------------------------------------------------------------
def sector():
""" RESTful CRUD controller """
# Pre-processor
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def subsector():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def site():
"""
RESTful CRUD controller
- used by S3SiteAutocompleteWidget(), which doesn't yet support filtering
to just updateable sites
"""
# Pre-processor
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def sites_for_org():
"""
Used to provide the list of Sites for an Organisation
- used in User Registration
"""
try:
org = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Org provided!")
else:
table = s3db.org_site
query = (table.organisation_id == org)
records = db(query).select(table.id,
table.name,
orderby=table.name)
result = records.json()
finally:
response.headers["Content-Type"] = "application/json"
return result
# -----------------------------------------------------------------------------
def site_org_json():
"""
Provide the Org(s) belonging to a Site
- unused?
"""
table = s3db.org_site
otable = s3db.org_organisation
query = (table.site_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Use Symbology
"""
table = db.org_facility_type
types = record.facility_type_id
if isinstance(types, list):
rows = db(table.id.belongs(types)).select(table.name)
else:
rows = db(table.id == types).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def facility():
""" RESTful CRUD controller """
# Pre-processor
s3.prep = prep
s3.postp = postp
output = s3_rest_controller(rheader=s3db.org_rheader)
return output
# -----------------------------------------------------------------------------
def facility_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def office_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation_type():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def organisation():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_organisation_controller()
# -----------------------------------------------------------------------------
def org_search():
"""
Organisation REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller(module, "organisation")
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
def office():
""" RESTful CRUD controller """
# Defined in the Model for use from Multiple Controllers for unified menus
return s3db.org_office_controller()
# -----------------------------------------------------------------------------
def person():
""" Person controller for AddPersonWidget """
s3.prep = prep
return s3_rest_controller("pr", "person")
# -----------------------------------------------------------------------------
def room():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def mailing_list():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
# Only groups with a group_type of 5
response.s3.filter = (table.group_type == 5)
table.group_type.writable = False
table.group_type.readable = False
table.name.label = T("Mailing List Name")
s3.crud_strings[tablename] = s3.pr_mailing_list_crud_strings
# define the list_fields
list_fields = s3db.configure(tablename,
list_fields = ["id",
"name",
"description",
])
# Components
_rheader = s3db.pr_rheader
_tabs = [(T("Organisation"), "organisation/"),
(T("Mailing List Details"), None),
]
if len(request.args) > 0:
_tabs.append((T("Members"), "group_membership"))
if "viewing" in request.vars:
tablename, record_id = request.vars.viewing.rsplit(".", 1)
if tablename == "org_organisation":
table = s3db[tablename]
_rheader = s3db.org_rheader
_tabs = []
s3db.add_component("pr_group_membership", pr_group="group_id")
rheader = lambda r: _rheader(r, tabs = _tabs)
return s3_rest_controller("pr",
"group",
rheader=rheader)
# -----------------------------------------------------------------------------
def donor():
""" RESTful CRUD controller """
tablename = "org_donor"
table = s3db[tablename]
tablename = "org_donor"
s3.crud_strings[tablename] = Storage(
title_create = ADD_DONOR,
title_display = T("Donor Details"),
title_list = T("Donors Report"),
title_update = T("Edit Donor"),
title_search = T("Search Donors"),
subtitle_create = T("Add New Donor"),
label_list_button = T("List Donors"),
label_create_button = ADD_DONOR,
label_delete_button = T("Delete Donor"),
msg_record_created = T("Donor added"),
msg_record_modified = T("Donor updated"),
msg_record_deleted = T("Donor deleted"),
msg_list_empty = T("No Donors currently registered"))
s3db.configure(tablename, listadd=False)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests for Sites """
return s3db.req_match()
# -----------------------------------------------------------------------------
def incoming():
"""
Incoming Shipments for Sites
@unused
"""
return inv_incoming()
# END =========================================================================
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
220,
220,
220,
12275,
33432,
532,
2345,
36667,
198,
37811,
198,
198,
21412,
796,
2581,
13,
36500,
198,
411,
454,
66,
12453,
796,
2581,
13,
8818,
198,
... | 2.348387 | 4,650 |
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging as log
import os
import pytest
import mender.config.config as config
GLOBAL_TESTDATA = {
"InventoryPollIntervalSeconds": 200,
"RootfsPartA": "/dev/hda2",
"RootfsPartB": "/dev/hda3",
"ServerURL": "https://hosted.mender.io",
"TenantToken": """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJtZW5k
ZXIudGVuYW50IjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4IiwiaXNzIjoiTWVuZ
GVyIiwic3ViIjoiNTllMGIwNzA3ZDZmMGQwMGYwYzFmZTM4In0.uAw2KPrwH6DPT
2ZnDLm4p6lZPlIDbK07QA2I4qcWrLQ7R-WVEuQSx4WmlXYPAgRGU0zeOPiRW-i9_faoY
56tJuLA2-DRMPcoQTn9kieyu8eCB60-gMg10RPa_XCwTAIot8eBjUSPSxjTvFm0pZ3N8
GeBi412EBUw_N2ZVsdto4bhivOZHzJwS5qZoRrCY15_5qa6-9lVbSWVZdzAjoruZKteH
a_KSGtDdg_586QZRzDUXH-kwhItkDJz5LlyiWXpVpk3f4ujX8iwk-u42WBwYbuWN4g
Ti4mNozX4tR_C9OgE-Xf3vmFkIBc_JfJeNUxsp-rPKERDrVxA_sE2l0OVoEZzcquw3c
df2ophsIFIu7scEWavKjZlmEm_VB6vZVfy1NtMkq1xJnrzssJf-eDYti-CJM3E6lSsO
_OmbrDbLa4-bxl8GJjRNH86LX6UOxjgatxaZyKEZhDG-gK6_f57c7MiA0KglOGuA
GNWAxI8A7jyOqKOvY3iemL9TvbKpoIP""",
}
LOCAL_TESTDATA = {
"InventoryPollIntervalSeconds": 100,
"UpdatePollIntervalSeconds": 100,
"RetryPollIntervalSeconds": 100,
}
@pytest.fixture(scope="session", name="local_and_global")
@pytest.fixture(scope="session", name="global_only")
@pytest.fixture(scope="session", name="local_only")
@pytest.fixture(scope="session", name="local_priority")
| [
2,
15069,
33448,
8342,
13,
13670,
7054,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 2.016461 | 972 |
import json
import os
import time
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('instance')
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
198,
198,
9288,
10745,
430,
62,
4774,
82,
796,
1332,
10745,
430,
13,
26791,
13,
504,
856,
62,
16737,
13,
2025,
... | 2.641026 | 78 |
import keyboard
import smtplib
from threading import Timer
from datetime import date, datetime
SEND_REPORT_EVERY = 60
EMAIL_ADDRESS = ''
EMAIL_PASSWORD = ''
if __name__=="__main__":
keylogger = keylogger(interval=SEND_REPORT_EVERY, report_method="file")
keylogger.start() | [
11748,
10586,
201,
198,
11748,
895,
83,
489,
571,
201,
198,
6738,
4704,
278,
1330,
5045,
263,
201,
198,
6738,
4818,
8079,
1330,
3128,
11,
4818,
8079,
201,
198,
201,
198,
50,
10619,
62,
2200,
15490,
62,
36,
5959,
56,
796,
3126,
201,
... | 2.453782 | 119 |
# The MIT License
#
# Copyright (C) 2008-2009 Floris Bruynooghe
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import unittest
import psi
class ProcessAttributeTests(unittest.TestCase):
"""Check the bahaviour of some process attributes
Some process attributes must be present on all processes, these
tests check for this.
"""
if __name__ == '__main__':
unittest.main()
| [
2,
383,
17168,
13789,
198,
2,
198,
2,
15069,
357,
34,
8,
3648,
12,
10531,
4432,
271,
8274,
2047,
78,
519,
258,
198,
2,
198,
2,
15069,
357,
34,
8,
3648,
12,
10531,
2275,
346,
29719,
12052,
13,
198,
2,
198,
2,
198,
2,
2448,
3411... | 3.569343 | 411 |
import os
import tempfile
from contextlib import contextmanager
from typing import Generator, Optional
from unittest.mock import patch, Mock, call
import pytest # type: ignore
from click.testing import CliRunner, Result
import purgeraw.main
from purgeraw.index_extraction import indexer
| [
11748,
28686,
198,
11748,
20218,
7753,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
19720,
1330,
35986,
11,
32233,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
44123,
11,
869,
198,
198,
11748,
12972,
9288,
220,
1303,
... | 3.779221 | 77 |
import pytest
import json
import os
import logging
from importers.common.helpers import clean_html
log = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.realpath(__file__)) + '/'
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
@pytest.mark.unit
| [
11748,
12972,
9288,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
18931,
198,
6738,
848,
3816,
13,
11321,
13,
16794,
364,
1330,
3424,
62,
6494,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
14421,
6... | 2.487047 | 193 |
import setuptools
modules = [
'code_gen',
'coupling',
'graph',
'ir',
'mapping',
'sim',
'transformations',
]
setuptools.setup(name='pairs',
description="A code generator for particle simulations",
version="0.0.1",
long_description=readme(),
long_description_content_type="text/markdown",
author="Rafael Ravedutti Lucio Machado",
license='MIT',
author_email="rafael.r.ravedutti@fau.de",
url="https://github.com/rafaelravedutti/pairs",
install_requires=[],
packages=['pairs'] + [f"pairs.{mod}" for mod in modules],
package_dir={'pairs': 'src/pairs'},
package_data={'pairs': ['runtime/*.hpp']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
project_urls={
"Bug Tracker": "https://github.com/rafaelravedutti/pairs",
"Documentation": "https://github.com/rafaelravedutti/pairs",
"Source Code": "https://github.com/rafaelravedutti/pairs",
},
extras_require={},
tests_require=[],
python_requires=">=3.6",
)
| [
11748,
900,
37623,
10141,
628,
198,
198,
18170,
796,
685,
198,
220,
220,
220,
705,
8189,
62,
5235,
3256,
198,
220,
220,
220,
705,
66,
280,
11347,
3256,
198,
220,
220,
220,
705,
34960,
3256,
198,
220,
220,
220,
705,
343,
3256,
198,
... | 2.423729 | 472 |
#THIS CODE IS AN EXAMPLE HOW TO COMPUTE VARIOUS PROPERTIES
import numpy as np
import wemrr
milestones = [2.45,2.7,3.5,4.5,5.5,7.0,9.0]
#==================================================
#Compute steady state K and mean first passage time
#===================================================
K,t,Nhit = wemrr.compute_kernel(milestones)
#MFPT from r=2.7 to r=7.0
print(wemrr.MFPT(K,t,1,5))
#==================================================
#Compute equilibrium K and free energy profile
#===================================================
G = wemrr.free_energy(K,t,milestones,radial=True)
print(G)
#===================================================
#Compute MFPT with error bars
#===================================================
N_total = 300
interval = 10
K_list = wemrr.Monte_Carlo_bootstrapping(N_total,K,t,Nhit,interval)
print(K_list)
mfpt_list = []
for i in range(len(K_list)):
mfpt_list.append(wemrr.MFPT(K_list[i],t,1,5))
mfpt_list = np.array(mfpt_list)
mfpt_mean = np.mean(mfpt_list)
mfpt_std = np.std(mfpt_list)
mfpt_err = 1.96*mfpt_std #95% confidence interval
print("Mean First Passage Time = ",mfpt_mean," +/- ",mfpt_err)
| [
2,
43559,
42714,
3180,
3537,
7788,
2390,
16437,
29630,
5390,
24301,
37780,
569,
33604,
20958,
4810,
3185,
17395,
11015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
356,
76,
21062,
220,
198,
198,
25433,
30637,
796,
685,
17,
13,
... | 2.725352 | 426 |
from classes.specializedRequesters import CMSReqMD5, CMSReqString, CMSReqRegex
| [
6738,
6097,
13,
20887,
1143,
16844,
8586,
1330,
40773,
3041,
80,
12740,
20,
11,
40773,
3041,
80,
10100,
11,
40773,
3041,
80,
3041,
25636,
628
] | 3.2 | 25 |
import requests
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import csv
data = []
with open('you.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader)
datum = {}
for idx, row in enumerate(reader):
mod = (idx % 6)
if mod < 5:
if mod == 0:
datum['grade'] = str(row[0])
elif mod == 1:
datum['channel'] = str(row[0])
elif mod == 2:
datum['uploads'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 3:
datum['subscribers'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
elif mod == 4:
datum['views'] = int(row[0].replace(
',', '')) if row[0] != '--' else 0
else:
data.append(datum)
datum = {}
df = pd.DataFrame(data)
df.head()
df.to_csv('formatted_youtube_data.csv')
#print (idx, row)
| [
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
269,
21370,
198,
7890,
796,
17635,
198,
4480,
1280,
10786,
5832,
13,
40664,
11537,
355,
... | 1.814617 | 561 |
from social_core.backends.eveonline import EVEOnlineOAuth2
| [
6738,
1919,
62,
7295,
13,
1891,
2412,
13,
44655,
25119,
1330,
32356,
14439,
23621,
1071,
17,
198
] | 3.470588 | 17 |
"""rt (i.e. Radio Telescopes) module is for handling real telescope meta-data."""
import os
import glob
import pickle
import dreambeam.telescopes
class TelescopeBndStn(object):
"""Model of one station and one band of a telescope."""
feed_pat = None
def __init__(self, stnPos, stnRot):
"""Set the station's position and attitude."""
self.stnPos = stnPos
self.stnRot = stnRot
def getEJones(self):
"""Create ejones for station based on antenna patterns."""
ejones = None
return ejones
class TelescopesWiz():
"""Database over available telescopes patterns."""
def telbndmdl2dirfile(self, tscopename, band, beammodel):
"""Map tscopename, band, beammodel tuple to file-path. file-path is a tuple
of (absolute_directory, filename), where
absolute_directory=/path-to-telescopes/TELESCOPENAME/data/
and
filename BAND_MODEL.teldat.p"""
metadata_dir = "data/" #subdir within telescope dir with telbnd metadata.
#Currently it only maps requests to filename
tbdata_fname = band+"_"+beammodel+".teldat.p"
tbdata_dir = self.telescopes_dir+"/"+tscopename+"/"+metadata_dir
return tbdata_dir, tbdata_fname
| [
37811,
17034,
357,
72,
13,
68,
13,
8829,
34495,
13920,
8,
8265,
318,
329,
9041,
1103,
24344,
13634,
12,
7890,
526,
15931,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
2298,
293,
198,
11748,
4320,
40045,
13,
37524,
3798,
13920,
628,... | 2.481336 | 509 |
import json
from dataclasses import field, dataclass
from typing import List, Dict, Generator
import requests
from gcp_pilot import exceptions
from gcp_pilot.base import GoogleCloudPilotAPI, DiscoveryMixin, ResourceType
@dataclass
@dataclass
__all__ = (
"Text",
"Section",
"Card",
"ChatsBot",
"ChatsHook",
)
| [
11748,
33918,
198,
6738,
4818,
330,
28958,
1330,
2214,
11,
4818,
330,
31172,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
35986,
198,
198,
11748,
7007,
198,
198,
6738,
308,
13155,
62,
79,
23439,
1330,
13269,
198,
6738,
308,
13155,
... | 2.746032 | 126 |
"""Wrapping R.
This module contains all functionality related to the use of functions from R for testing purposes.
"""
import numpy as np
import rpy2.robjects.packages as rpackages
from rpy2 import robjects
from rpy2.robjects import numpy2ri
r_package_cond_mvnorm = rpackages.importr("condMVNorm")
def r_cond_mvn(mean, cov, dependent_ind, given_ind, given_value):
"""The original function for `cond_mvn`."""
numpy2ri.activate()
r_mean = robjects.FloatVector(mean)
n = cov.shape[0]
r_cov = robjects.r.matrix(cov, n, n)
r_dependent_ind = robjects.IntVector([x + 1 for x in dependent_ind])
r_given_ind = robjects.IntVector([x + 1 for x in given_ind])
r_given_value = robjects.FloatVector(given_value)
args = (r_mean, r_cov, r_dependent_ind, r_given_ind, r_given_value)
r_cond_mean, r_cond_cov = r_package_cond_mvnorm.condMVN(*args)
r_cond_mean, r_cond_cov = np.array(r_cond_mean), np.array(r_cond_cov)
numpy2ri.deactivate()
return r_cond_mean, r_cond_cov
| [
37811,
36918,
2105,
371,
13,
198,
198,
1212,
8265,
4909,
477,
11244,
3519,
284,
262,
779,
286,
5499,
422,
371,
329,
4856,
4959,
13,
198,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
374,
9078,
17,
13,
22609,
752,
82,
... | 2.44686 | 414 |
"""izi/store.py.
A collecton of native stores which can be used with, among others, the session middleware.
Copyright (C) 2018 DiepDT-IZIGlobal
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from izi.exceptions import StoreKeyNotFound
class InMemoryStore:
"""
Naive store class which can be used for the session middleware and unit tests.
It is not thread-safe and no data will survive the lifecycle of the izi process.
Regard this as a blueprint for more useful and probably more complex store implementations, for example stores
which make use of databases like Redis, PostgreSQL or others.
"""
def get(self, key):
"""Get data for given store key. Raise izi.exceptions.StoreKeyNotFound if key does not exist."""
try:
data = self._data[key]
except KeyError:
raise StoreKeyNotFound(key)
return data
def exists(self, key):
"""Return whether key exists or not."""
return key in self._data
def set(self, key, data):
"""Set data object for given store key."""
self._data[key] = data
def delete(self, key):
"""Delete data for given store key."""
if key in self._data:
del self._data[key]
| [
37811,
528,
72,
14,
8095,
13,
9078,
13,
198,
198,
32,
2824,
261,
286,
6868,
7000,
543,
460,
307,
973,
351,
11,
1871,
1854,
11,
262,
6246,
3504,
1574,
13,
198,
198,
15269,
357,
34,
8,
2864,
6733,
79,
24544,
12,
14887,
3528,
75,
2... | 3.239826 | 688 |
__doc__ = """Unit-testing the `.tools` package.
"""
__author__ = "Rui Campos"
import _cmd
import sys
del sys.argv[1]
import numpy as np
import unittest as ut
# Importing
from MontyCarlo.types import PySTATE
from MontyCarlo.particles.photons import python_hooks
Photon = python_hooks.Photon
class input_val:
"""A namespace indicating input values.
"""
pass
class ground_truth:
"""A namespace indicating groundtruth.
"""
pass
class output_val:
"""A namespace indicating calculated values.
"""
pass
class test_Photon(ut.TestCase):
"""Unit testing photons.
"""
# A basic set-up for holding one particle -----------------------
print("SETTING UP")
from MontyCarlo.geometry.CSG import Sphere
from MontyCarlo.geometry.CSG import InfiniteVolume
from MontyCarlo.materials.materials import Mat
from MontyCarlo._init import eax
print("Creating photon...")
photon = Photon()
print("Creating water...")
water = Mat({1:2, 8:1}, 1)
print("Creating geometry...")
with InfiniteVolume() as OUTER:
OUTER.fill(water)
OUTER.configure("no_name", render = False)
with Sphere(1) as sphere:
sphere in OUTER
sphere.fill(water)
sphere.configure("no_name", render = False)
print("Setting current region...")
photon.current_region = sphere
print("UPDATING")
photon.update_references()
photon.update_imfp()
print("DONE. STARTING TESTS")
# ----------------------------------------------------------------
def test_updates(self):
"""Checks for segmentation errors when calling update methods.
"""
print("\n\nTESTING UPDATES")
cls = test_Photon
cls.photon.update_references()
cls.photon.update_imfp()
if __name__ == '__main__':
ut.main()
| [
834,
15390,
834,
796,
37227,
26453,
12,
33407,
262,
4600,
13,
31391,
63,
5301,
13,
198,
37811,
198,
198,
834,
9800,
834,
796,
366,
49,
9019,
5425,
418,
1,
628,
198,
198,
11748,
4808,
28758,
198,
11748,
25064,
198,
12381,
25064,
13,
... | 2.594521 | 730 |
import sys, os.path, logging, time, atexit, glob
logger = None
logfile = sys.stderr
logfile_handler = None
NUM_RECENT_LOGS = 5
try:
PipeError = BrokenPipeError
except NameError: # for py2
PipeError = IOError
class TimestampFilter(logging.Filter):
"""Adds a timestamp attribute to the LogRecord, if enabled"""
time0 = time.time()
enable = False
class ColorizingFormatter(logging.Formatter):
"""This Formatter inserts color codes into the string according to severity"""
_default_format = "%(name)s%(timestamp)s: {<{<%(severity)s%(message)s>}>}"
_default_format_boring = "%(name)s%(timestamp)s: %(severity)s%(message)s"
_boring_formatter = logging.Formatter(_default_format_boring)
_colorful_formatter = ColorizingFormatter(_default_format)
_default_console_handler = MultiplexingHandler()
| [
11748,
25064,
11,
28686,
13,
6978,
11,
18931,
11,
640,
11,
379,
37023,
11,
15095,
198,
198,
6404,
1362,
796,
6045,
198,
6404,
7753,
796,
25064,
13,
301,
1082,
81,
198,
6404,
7753,
62,
30281,
796,
6045,
198,
198,
41359,
62,
38827,
35... | 2.897527 | 283 |
"""Functions to calculate two-point correlations.
"""
import numpy as np
import pandas as pd
from scipy.fftpack import fft, ifft
from scipy.linalg import toeplitz
try:
from progress import getLogger
except ImportError:
from logging import getLogger
from .helpers import is_number_like, is_string_like, get_nfft
# Helpers
# ===========================================================================
def corr_mat(x, maxlag=None):
"""Return correlation matrix from correlation array.
Parameters:
===========
x: array-like
Correlation array in the form returned by e.g. acorr, xcorr.
NOT centered!
maxlag: int
Maximum lag to consider (should be < len(x) / 2).
"""
# | c_0 c_1 ... c_L |
# | c_-1 c_0 ... |
# | ... |
# | c_-L ... c_0 |
if maxlag:
# topeliz(
# first_column(l=0,-1,-2,...,-maxlag), first_row(l=0,1,2,...,+maxlag)
# )
return toeplitz(np.concatenate([[x[0]], x[:-maxlag:-1]]), x[:maxlag])
else:
return toeplitz(np.concatenate([[x[0]], x[:0:-1]]), x)
def xcorrshift(x, maxlag=None, as_pandas=False):
"""Return shifted (cross- / auto) correlation to center lag zero."""
if not maxlag:
maxlag = len(x) // 2
# force pandas output?
if as_pandas and not hasattr(x, 'iloc'):
if len(np.shape(x)) > 1:
x = pd.DataFrame(x)
else:
x = pd.Series(x)
# slice
ix = np.arange(-maxlag, maxlag+1, dtype=int)
if hasattr(x, 'iloc'):
xs = x.iloc[ix]
xs.index = ix
else:
try:
xs = x[ix]
except:
xs = np.asanyarray(x)[ix]
return xs
def fftcrop(x, maxlag):
"""Return cropped fft or correlation (standard form starting at lag 0)."""
return np.concatenate([x[:maxlag], x[-maxlag:]])
def padded_xcorr_norm(nfft, pad, debias=False):
"""Return a vector of weights necessary to normalise xcorr
(cross-correlation) calculated with zero-padded ffts.
For pad = 0, all weights are equal to N.
Parameters:
===========
nfft: int
Length of the fft segment(s)
pad: int
Number of padded zeros
"""
ndat = nfft - pad
if pad <= 0:
w = nfft * np.ones(1)
elif debias:
nmp = max(1, ndat - pad)
w = np.concatenate([
np.arange(ndat,nmp, -1), # lag0, lag1, ...
nmp * np.ones(max(0, nfft - 2 * (ndat-nmp)+1)), # lags > ndat
np.arange(nmp+1, ndat,1) # ...lag-1
])
else:
w = ndat * np.ones(1)
return w
# For arrays
# ===========================================================================
def xcorr(
x, y,
norm='corr',
nfft='auto',
subtract_mean=True,
debias=False,
e=0
):
"""Return cross-correlation or covariance calculated using FFT.
Parameters:
-----------
x, y: array-like (1-D)
Time series to analyse.
norm: [optional]
How to normalise the result
"corr": Return correlation, i.e. r \\in [-1, 1] (default).
"cov": Return covariance. E.g. the peak of an autocorrelation
will have the height var(x) = var(y)
int, float:
Normalise result by this number.
nfft: int, str [optional]
How to set the length of the FFT (default: 'pad').
'len': Always use len(x), exact for periodic x, y.
'pad': Pad length to next number of two.
'demix': Zero-pad to demix causal and anti-causal part, giving
the exact result for an aperiodic signal.
'auto': Equal to 'len' for short series and 'pad' for long series
for better performance. This setting is appropriate when
the maximum lag of interest much smaller then half the signal
length.
int: Passed through to fft.
subtract_mean: bool [optional]
Subtract the signals' means (default: True).
debias: bool [optional]
True: Correct the bias from zero-padding if applicable.
This corresponds to the assumption that x, y are segments
of two stationary processes.
The SNR will decrease with |lag| because the number of
data points decreases.
False: Don't correct. This corresponds to the assumption that x and y
are zero outside of the observed range. As a consequence,
the correlation (or covariance) converges to zero for long lags.
Default: False because the bias is only significant compared to the
noise level when many short segments are averaged. It is also
consistent with similar functions like e.g. numpy.correlate.
e: float [optional]
Small epsilon to add to normalisation. This avoids e.g. blowing
up correlations when the variances of x, y are extremely small.
Default: 0.
Notes:
-----
The Fourier transform relies on the assumption that x and y are periodic.
This may create unexpected resuls for long lags in time series that are
shorter than the correlation length. To mitigate this effect, consider
nfft='pad'.
The output is uncentered, use xcorrshift to center.
The parameter combination
nfft='pad', norm=1, subtract_mean=False, debias=False
corresponds to numpy.correlate with mode='full'.
"""
lx = len(x)
assert lx == len(y), "Arrays must have the same length"
# padding for demixing and higher performance
crop_pad = False
if nfft == 'auto':
if lx >= 10**4:
nfft = 'pad'
else:
nfft = 'len'
if nfft == 'demix':
nfft = int(2**(np.ceil(np.log2(len(x))) + 1))
crop_pad = True
elif nfft == 'pad':
nfft = int(2**(np.ceil(np.log2(len(x)))))
crop_pad = True
elif nfft == 'len':
nfft = lx
else:
assert nfft == int(nfft), "nfft must be either 'pad', 'len', or an int"
#print "xcorr nfft:", nfft
# flatten arrays to 1 dimension, extracts values from pd.Dataframe too
x = np.ravel(x)
y = np.ravel(y)
# fourier transform of x
if subtract_mean:
# normally the mean is subtracted from the signal
x = x-np.mean(x)
xfft = fft(x, n=nfft)
# fourier transform of y
if x is y:
yfft = xfft
else:
if subtract_mean:
y = y-np.mean(y)
yfft = fft(y, n=nfft)
# inverse transform
r = np.real(ifft(xfft * np.conjugate(yfft)))
del xfft, yfft
# normalisation
ly = padded_xcorr_norm(nfft, nfft - len(y), debias=debias)
if norm == "cov":
n = ly
elif is_number_like(norm):
n = np.asanyarray(norm, dtype=float)
else:
n = ly
if x is y:
n *= np.var(x)
else:
n *= np.std(x) * np.std(y)
# done
r = r / (n + e)
if crop_pad:
r = fftcrop(r, lx)
return r
def acorr(y, **kwargs):
"""Return autocorrelation, equivalent to xcorr(y,y, **kwargs).
See xcorr for documentation.
"""
r = xcorr(y, y, **kwargs)
return r
# For pandas
# ===========================================================================
def xcorr_grouped_df(
df,
cols,
by = 'date',
nfft = 'pad',
funcs = (lambda x: x, lambda x: x),
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc cross correlation for each group separately.
Returns: mean and std over groups.
Parameters:
===========
df: pandas.DataFrame
input time series, must include the columns
for which we calculate the xcorr and the one by which we group.
cols: list of str
colums with the time series' of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
Twice the maximal lag measured. default: 'pad'
'len': use smallest group size.
'pad > 100': zero pad to next power of two of smallest froup size
larger than 100. I.e. at least 128.
... see get_nfft for more details
funcs: list of functions [optional]
functions to apply to cols before calculating the xcorr.
default: identity (lambda x: x)
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
Normalisation. default: 'total' (normalise normalise days to cov,
the end result by total cov giving approx. a correlation.)
Other Values are passed to xcorr and used on each day separately.
return_df: bool
Return a pandas.DataFrame. Default: True.
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, acorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
# we always need columns
cols = list(cols)
df = df[np.unique(cols)]
g = g[cols]
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((2*maxlag, len(g)))
# what to subtract
fdf0 = None
fdf1 = None
if subtract_mean in ('total', 'auto'):
# must match normalisation code below
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
subtract = [
fdf0.mean(),
fdf1.mean(),
]
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = [0,0]
sm = True
else:
subtract = [0,0]
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day and later divide by global cov.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(nfft)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += xcorr(
funcs[0](gs[cols[0]][tj[j]:tj[j]+nfft]) - subtract[0],
funcs[1](gs[cols[1]][tj[j]:tj[j]+nfft]) - subtract[1],
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)
acd[:,i] = fftcrop(x / nit, maxlag)
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1.
if norm in ("total", "auto"):
if fdf0 is None:
# maybe we didn't calculate these yet
# must match subtract code above!
fdf0 = funcs[0](df[cols[0]])
fdf1 = funcs[1](df[cols[1]])
# from cross covariance to cross correlation
n = 1./(np.std(fdf0) * np.std(fdf1))
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
n *= len(g) / float(len(g) - discarded_days)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(-maxlag,maxlag+1)), name='lag')
return pd.DataFrame({
'xcorr': xcorrshift(acdm, maxlag),
'xcorr_std': xcorrshift(acde, maxlag),
}, index=lag)
else:
return acdm, acde
def acorr_grouped_df(
df,
col = None,
by = 'date',
nfft = 'pad',
func = lambda x: x,
subtract_mean = 'total',
norm = 'total',
return_df = True,
debias = True,
**kwargs
):
"""Group dataframe and calc autocorrelation for each group separately.
Returns: mean and std over groups for positive lags only.
Parameters:
===========
df: pandas.DataFrame, pandas.Series
input time series. If by is a string, df must include the column
for which we calculate the autocorr and the one by which we group.
If by is a series, df can be a series, too.
col: str, None [optional]
column with the time series of interest.
by: str [optional]
column by which to group. default: 'date'
nfft: int, str [optional]
twice the maximal lag measured. default: 'auto'
'auto': use smallest group size.
'auto pad > 100': zero pad to segments of length >= 200,
skip days with fewer events
func: function [optional]
function to apply to col before calculating the autocorr.
default: identity.
subtract_mean: str [optional]
what to subtract from the time series before calculating the
autocorr.
'total': subtract mean of the whole series from each group
'group': subtract group mean from each group
None: subtract nothing
default: 'total'
norm: str [optional]
default: 'total' (normalise mean response to one at lag zero).
Other values
debias: bool [optional]
True: Correct the bias from zero-padding if applicable (default).
False: Don't debias.
**kwargs are passed through. see also: acorr, xcorr, xcorr_grouped_df
"""
# group, allocate, slice
g = df.groupby(by)
if not col:
if (
is_string_like(by)
and hasattr(df, 'columns')
and by in df.columns
):
# we just got two columns, one is group, so it's clear what to do
col = list(df.columns)
col.remove(by)
elif len(df.shape) > 1:
# unclear what to do
raise ValueError
# determine fft segment size
nfft, events_required = get_nfft(nfft, g)
maxlag = int(min(nfft//2, events_required))
# allocate
acd = np.zeros((maxlag + 1, len(g)))
# what to subtract
fdf = None
if subtract_mean in ('total', 'auto'):
subtract = func(df[col]).mean()
sm = False
elif subtract_mean in ('group', 'each', True, by):
subtract = 0
sm = True
else:
subtract = 0
sm = False
# which norm for each day?
if norm in ("total", "auto"):
# calculate covariances for each day, later norm to one giving a corr.
nd = 'cov'
else:
nd = norm
# do it
discarded_days = 0
for i, (gk, gs) in enumerate(g):
if len(gs) < events_required:
# this day is too short
discarded_days += 1
continue
else:
x = np.zeros(maxlag+1)
# average over minimally overlapping segments
nit = int(np.ceil(len(gs) / float(nfft)))
tj = np.unique(np.linspace(0, len(gs)-nfft, nit, dtype=int))
for j in range(nit):
x += acorr(
func(gs[col][tj[j]:tj[j]+nfft]) - subtract,
subtract_mean=sm,
norm = nd,
nfft = nfft,
debias = debias,
**kwargs
)[:maxlag+1]
acd[:,i] = x / nit
del x
# average
acdm = acd.mean(axis=1)
acde = acd.std(axis=1)
n = 1
if norm in ("total", "auto"):
# norm to one
n = 1./acdm[0]
elif discarded_days:
n = len(g) / float(len(g) - discarded_days)
if discarded_days:
getLogger(__name__).info(
"Discarded %i %ss < %i events" % (
discarded_days, by, events_required
)
)
acdm *= n
acde *= n
# done
if return_df:
lag = pd.Index(list(range(maxlag+1)), name='lag')
return pd.DataFrame({
'acorr': acdm,
'acorr_std': acde,
}, index=lag)
else:
return acdm, acde
| [
37811,
24629,
2733,
284,
15284,
734,
12,
4122,
35811,
13,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
629,
541,
88,
13,
487,
83,
8002,
1330,
277,
701,
11,
611,
701,
198,
6738... | 2.06058 | 8,270 |
import cv2 as cv
N = 2
modelname = "parasaurolophus_6700"
scenename = "rs1_normals"
detector = cv.ppf_match_3d_PPF3DDetector(0.025, 0.05)
print('Loading model...')
pc = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % modelname, 1)
print('Training...')
detector.trainModel(pc)
print('Loading scene...')
pcTest = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % scenename, 1)
print('Matching...')
results = detector.match(pcTest, 1.0/40.0, 0.05)
print('Performing ICP...')
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
print("Poses: ")
for i, result in enumerate(results):
#result.printPose()
print("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" % (result.modelIndex, result.numVotes, result.residual, result.pose))
if i == 0:
pct = cv.ppf_match_3d.transformPCPose(pc, result.pose)
cv.ppf_match_3d.writePLY(pct, "%sPCTrans.ply" % modelname)
| [
11748,
269,
85,
17,
355,
269,
85,
198,
198,
45,
796,
362,
198,
19849,
3672,
796,
366,
1845,
292,
559,
3225,
2522,
385,
62,
3134,
405,
1,
198,
1416,
268,
12453,
796,
366,
3808,
16,
62,
27237,
874,
1,
198,
198,
15255,
9250,
796,
2... | 2.233333 | 420 |
"""
Write a function that finds the number of times a sub-string occurs in a given string and
also the position (index number) at which the sub-string is found.
Example:
main_string = 'Let it be, let it be, let it be'
sub_string = 'let it be'
Expected output:
number of times sub-string occurs = 2, position =[11, 22]
""" | [
37811,
198,
16594,
257,
2163,
326,
7228,
262,
1271,
286,
1661,
257,
850,
12,
8841,
8833,
287,
257,
1813,
4731,
290,
220,
198,
14508,
262,
2292,
357,
9630,
1271,
8,
379,
543,
262,
850,
12,
8841,
318,
1043,
13,
198,
198,
16281,
25,
... | 3.27 | 100 |
from flask import Flask, request, render_template, redirect
from flask_cors import CORS, cross_origin
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
import psycopg2
import jinja2
import json, ast
from sendgrid.helpers.mail import *
from flask_mail import Mail, Message
import boto3, botocore
import logistic_reg as model
from werkzeug.utils import secure_filename
from io import BytesIO
import io
import base64
app = Flask(__name__)
api = Api(app)
CORS(app)
#comment
conn, cur = initDB()
import os
@app.route("/")
@app.route("/dbinfo")
api.add_resource(Students, '/students/<id>/<adbool>')
@app.route("/postResponse", methods = ['POST'])
@app.route("/addCollegeQuestions", methods = ['GET'])
@app.route("/removeWatchList", methods = ['GET'])
@app.route("/getWatchList", methods = ['GET'])
@app.route("/addWatchList", methods = ['GET'])
@app.route("/getApplicationPool")
@app.route("/addAdmin", methods = ['POST'])
@app.route("/getQuestions", methods = ['GET'])
@app.route("/getStudentResponse", methods = ['GET'])
@app.route("/getCategories", methods=['GET'])
@app.route("/getData", methods = ['GET'])
@app.route("/getCollegeStatsMajor", methods=['GET'])
@app.route("/getCollegeCountMajor", methods=['GET'])
@app.route("/getCollegeCountSex", methods=['GET'])
@app.route("/getCollegeCountRace", methods=['GET'])
@app.route("/getCollegeStats", methods = ['GET'])
@app.route("/getCollegeInfo", methods = ['GET'])
@app.route("/getColleges", methods = ['GET'])
@app.route("/getrecommendedColleges", methods = ['GET'])
# using SendGrid's Python Library
# https://github.com/sendgrid/sendgrid-python
# import sendgrid
# import os
# from sendgrid.helpers.mail import *
# sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
# from_email = Email("sanatmouli@gmail.com")
# to_email = Email("sanatmouli@gmail.com")
# subject = "Sending with SendGrid is Fun"
# content = Content("text/plain", "and easy to do anywhere, even with Python")
# mail = Mail(from_email, subject, to_email, content)
# response = sg.client.mail.send.post(request_body=mail.get())
# print(response.status_code)
# print(response.body)
# print(response.headers)
@app.route("/sendEmail/<email_id>/<collegename>", methods = ['GET'])
@app.route("/sendEmailtoStudent/<email_id>/<fname>", methods = ['GET'])
#@app.route("/sendEmailAccept/<email_id>/<collegename>/<studentname>", methods = ['GET'])
#@app.route("/sendEmailReject/<email_id>/<collegename>/<studentname>", methods = ['GET'])
@app.route("/sendEmailStatus/<email_id>/<collegename>/<studentid>/<accept_status>", methods = ['GET'])
@app.route("/putStudents", methods = ['POST'])
@app.route("/getStudents/<uid>", methods = ['GET'])
@app.route("/setCollegeDetails/<collegename>", methods = ['POST'])
@app.route("/setCollegeQuestions/<collegename>", methods = ['POST'])
@app.route("/getCollegeName", methods = ['GET'])
@app.route("/getIDType/<sid>", methods=['GET'])
@app.route("/getCollegeNameForUID/<uid>", methods=['GET'])
@app.route("/getStudentsForCollegeName/<collegename>", methods=['GET'])
@app.route("/getListOfAcceptedStudents/<collegename>", methods=['GET'])
@app.route("/getStatsEachStudent", methods=['GET'])
@app.route("/getCollegeStatsEachMajor", methods=['GET'])
@app.route("/postImage", methods=['POST'])
#@cross_origin(origin='http://localhost:4200',headers=['Content-Type','Authorization','Access-Control-Allow-Origin','Access-Control-Allow-Methods'])
if __name__ == '__main__':
conn, cur = initDB()
app.run(debug=True)
| [
6738,
42903,
1330,
46947,
11,
2581,
11,
8543,
62,
28243,
11,
18941,
198,
6738,
42903,
62,
66,
669,
1330,
327,
20673,
11,
3272,
62,
47103,
198,
6738,
42903,
62,
2118,
913,
1330,
20857,
11,
5949,
72,
198,
6738,
33918,
1330,
45514,
198,
... | 2.791894 | 1,283 |
from django.contrib.auth.models import UserManager as BaseUserManager
from django.db.models.query import QuerySet
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
13511,
355,
7308,
12982,
13511,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
22766,
1330,
43301,
7248,
628,
198
] | 3.625 | 32 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.shared_file_system.v2 import availability_zone as az
from openstack.tests.unit import base
IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c'
EXAMPLE = {
"id": IDENTIFIER,
"name": "nova",
"created_at": "2021-01-21T20:13:55.000000",
"updated_at": None,
}
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.088889 | 270 |
from selenium import webdriver | [
6738,
384,
11925,
1505,
1330,
3992,
26230
] | 4.285714 | 7 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/TheThingsNetwork/api/router/router.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from github_com.TheThingsNetwork.api import api_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2
from github_com.TheThingsNetwork.api.protocol import protocol_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2
from github_com.TheThingsNetwork.api.gateway import gateway_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2
from github_com.TheThingsNetwork.api.trace import trace_pb2 as github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/TheThingsNetwork/api/router/router.proto',
package='router',
syntax='proto3',
serialized_options=b'\n\037org.thethingsnetwork.api.routerB\013RouterProtoP\001Z&github.com/TheThingsNetwork/api/router\252\002\033TheThingsNetwork.API.Router',
serialized_pb=b'\n3github.com/TheThingsNetwork/api/router/router.proto\x12\x06router\x1a\x1bgoogle/protobuf/empty.proto\x1a-github.com/gogo/protobuf/gogoproto/gogo.proto\x1a)github.com/TheThingsNetwork/api/api.proto\x1a\x37github.com/TheThingsNetwork/api/protocol/protocol.proto\x1a\x35github.com/TheThingsNetwork/api/gateway/gateway.proto\x1a\x31github.com/TheThingsNetwork/api/trace/trace.proto\"\x12\n\x10SubscribeRequest\"\xcd\x01\n\rUplinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12\x35\n\x11protocol_metadata\x18\x0b \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x0c \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xe3\x01\n\x0f\x44ownlinkMessage\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12?\n\x16protocol_configuration\x18\x0b \x01(\x0b\x32\x19.protocol.TxConfigurationB\x04\xc8\xde\x1f\x00\x12=\n\x15gateway_configuration\x18\x0c \x01(\x0b\x32\x18.gateway.TxConfigurationB\x04\xc8\xde\x1f\x00\x12\x1b\n\x05trace\x18\x15 \x01(\x0b\x32\x0c.trace.Trace\"\xbe\x03\n\x17\x44\x65viceActivationRequest\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\"\n\x07message\x18\x02 \x01(\x0b\x32\x11.protocol.Message\x12T\n\x07\x64\x65v_eui\x18\x0b \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x44\x65vEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.DevEUI\x12T\n\x07\x61pp_eui\x18\x0c \x01(\x0c\x42\x43\xe2\xde\x1f\x06\x41ppEUI\xc8\xde\x1f\x00\xda\xde\x1f\x31github.com/TheThingsNetwork/ttn/core/types.AppEUI\x12\x35\n\x11protocol_metadata\x18\x15 \x01(\x0b\x32\x14.protocol.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x33\n\x10gateway_metadata\x18\x16 \x01(\x0b\x32\x13.gateway.RxMetadataB\x04\xc8\xde\x1f\x00\x12\x39\n\x13\x61\x63tivation_metadata\x18\x17 \x01(\x0b\x32\x1c.protocol.ActivationMetadata\x12\x1b\n\x05trace\x18\x1f \x01(\x0b\x32\x0c.trace.Trace\"\x1a\n\x18\x44\x65viceActivationResponse\"9\n\x14GatewayStatusRequest\x12!\n\ngateway_id\x18\x01 \x01(\tB\r\xe2\xde\x1f\tGatewayID\"Q\n\x15GatewayStatusResponse\x12\x11\n\tlast_seen\x18\x01 \x01(\x03\x12%\n\x06status\x18\x02 \x01(\x0b\x32\x0f.gateway.StatusB\x04\xc8\xde\x1f\x00\"\x0f\n\rStatusRequest\"\x88\x02\n\x06Status\x12 \n\x06system\x18\x01 \x01(\x0b\x32\x10.api.SystemStats\x12&\n\tcomponent\x18\x02 \x01(\x0b\x32\x13.api.ComponentStats\x12\"\n\x0egateway_status\x18\x0b \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x06uplink\x18\x0c \x01(\x0b\x32\n.api.Rates\x12\x1c\n\x08\x64ownlink\x18\r \x01(\x0b\x32\n.api.Rates\x12\x1f\n\x0b\x61\x63tivations\x18\x0e \x01(\x0b\x32\n.api.Rates\x12\x1a\n\x12\x63onnected_gateways\x18\x15 \x01(\r\x12\x19\n\x11\x63onnected_brokers\x18\x16 \x01(\r2\x90\x02\n\x06Router\x12:\n\rGatewayStatus\x12\x0f.gateway.Status\x1a\x16.google.protobuf.Empty(\x01\x12\x39\n\x06Uplink\x12\x15.router.UplinkMessage\x1a\x16.google.protobuf.Empty(\x01\x12@\n\tSubscribe\x12\x18.router.SubscribeRequest\x1a\x17.router.DownlinkMessage0\x01\x12M\n\x08\x41\x63tivate\x12\x1f.router.DeviceActivationRequest\x1a .router.DeviceActivationResponse2\x91\x01\n\rRouterManager\x12L\n\rGatewayStatus\x12\x1c.router.GatewayStatusRequest\x1a\x1d.router.GatewayStatusResponse\x12\x32\n\tGetStatus\x12\x15.router.StatusRequest\x1a\x0e.router.StatusBv\n\x1forg.thethingsnetwork.api.routerB\x0bRouterProtoP\x01Z&github.com/TheThingsNetwork/api/router\xaa\x02\x1bTheThingsNetwork.API.Routerb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2.DESCRIPTOR,github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2.DESCRIPTOR,])
_SUBSCRIBEREQUEST = _descriptor.Descriptor(
name='SubscribeRequest',
full_name='router.SubscribeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=363,
)
_UPLINKMESSAGE = _descriptor.Descriptor(
name='UplinkMessage',
full_name='router.UplinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.UplinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.UplinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.UplinkMessage.protocol_metadata', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.UplinkMessage.gateway_metadata', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.UplinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=366,
serialized_end=571,
)
_DOWNLINKMESSAGE = _descriptor.Descriptor(
name='DownlinkMessage',
full_name='router.DownlinkMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DownlinkMessage.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DownlinkMessage.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_configuration', full_name='router.DownlinkMessage.protocol_configuration', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_configuration', full_name='router.DownlinkMessage.gateway_configuration', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DownlinkMessage.trace', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=574,
serialized_end=801,
)
_DEVICEACTIVATIONREQUEST = _descriptor.Descriptor(
name='DeviceActivationRequest',
full_name='router.DeviceActivationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='payload', full_name='router.DeviceActivationRequest.payload', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='router.DeviceActivationRequest.message', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dev_eui', full_name='router.DeviceActivationRequest.dev_eui', index=2,
number=11, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006DevEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.DevEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='app_eui', full_name='router.DeviceActivationRequest.app_eui', index=3,
number=12, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\006AppEUI\310\336\037\000\332\336\0371github.com/TheThingsNetwork/ttn/core/types.AppEUI', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='protocol_metadata', full_name='router.DeviceActivationRequest.protocol_metadata', index=4,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_metadata', full_name='router.DeviceActivationRequest.gateway_metadata', index=5,
number=22, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_metadata', full_name='router.DeviceActivationRequest.activation_metadata', index=6,
number=23, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trace', full_name='router.DeviceActivationRequest.trace', index=7,
number=31, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=804,
serialized_end=1250,
)
_DEVICEACTIVATIONRESPONSE = _descriptor.Descriptor(
name='DeviceActivationResponse',
full_name='router.DeviceActivationResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1252,
serialized_end=1278,
)
_GATEWAYSTATUSREQUEST = _descriptor.Descriptor(
name='GatewayStatusRequest',
full_name='router.GatewayStatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gateway_id', full_name='router.GatewayStatusRequest.gateway_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\342\336\037\tGatewayID', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1280,
serialized_end=1337,
)
_GATEWAYSTATUSRESPONSE = _descriptor.Descriptor(
name='GatewayStatusResponse',
full_name='router.GatewayStatusResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='last_seen', full_name='router.GatewayStatusResponse.last_seen', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='router.GatewayStatusResponse.status', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1339,
serialized_end=1420,
)
_STATUSREQUEST = _descriptor.Descriptor(
name='StatusRequest',
full_name='router.StatusRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1422,
serialized_end=1437,
)
_STATUS = _descriptor.Descriptor(
name='Status',
full_name='router.Status',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='system', full_name='router.Status.system', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='component', full_name='router.Status.component', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gateway_status', full_name='router.Status.gateway_status', index=2,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uplink', full_name='router.Status.uplink', index=3,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='downlink', full_name='router.Status.downlink', index=4,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activations', full_name='router.Status.activations', index=5,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_gateways', full_name='router.Status.connected_gateways', index=6,
number=21, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='connected_brokers', full_name='router.Status.connected_brokers', index=7,
number=22, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1440,
serialized_end=1704,
)
_UPLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_UPLINKMESSAGE.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_UPLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DOWNLINKMESSAGE.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._TXCONFIGURATION
_DOWNLINKMESSAGE.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_DEVICEACTIVATIONREQUEST.fields_by_name['message'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._MESSAGE
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._RXMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['activation_metadata'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_protocol_dot_protocol__pb2._ACTIVATIONMETADATA
_DEVICEACTIVATIONREQUEST.fields_by_name['trace'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_trace_dot_trace__pb2._TRACE
_GATEWAYSTATUSRESPONSE.fields_by_name['status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS
_STATUS.fields_by_name['system'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._SYSTEMSTATS
_STATUS.fields_by_name['component'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._COMPONENTSTATS
_STATUS.fields_by_name['gateway_status'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['uplink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['downlink'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
_STATUS.fields_by_name['activations'].message_type = github_dot_com_dot_TheThingsNetwork_dot_api_dot_api__pb2._RATES
DESCRIPTOR.message_types_by_name['SubscribeRequest'] = _SUBSCRIBEREQUEST
DESCRIPTOR.message_types_by_name['UplinkMessage'] = _UPLINKMESSAGE
DESCRIPTOR.message_types_by_name['DownlinkMessage'] = _DOWNLINKMESSAGE
DESCRIPTOR.message_types_by_name['DeviceActivationRequest'] = _DEVICEACTIVATIONREQUEST
DESCRIPTOR.message_types_by_name['DeviceActivationResponse'] = _DEVICEACTIVATIONRESPONSE
DESCRIPTOR.message_types_by_name['GatewayStatusRequest'] = _GATEWAYSTATUSREQUEST
DESCRIPTOR.message_types_by_name['GatewayStatusResponse'] = _GATEWAYSTATUSRESPONSE
DESCRIPTOR.message_types_by_name['StatusRequest'] = _STATUSREQUEST
DESCRIPTOR.message_types_by_name['Status'] = _STATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SubscribeRequest = _reflection.GeneratedProtocolMessageType('SubscribeRequest', (_message.Message,), {
'DESCRIPTOR' : _SUBSCRIBEREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.SubscribeRequest)
})
_sym_db.RegisterMessage(SubscribeRequest)
UplinkMessage = _reflection.GeneratedProtocolMessageType('UplinkMessage', (_message.Message,), {
'DESCRIPTOR' : _UPLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.UplinkMessage)
})
_sym_db.RegisterMessage(UplinkMessage)
DownlinkMessage = _reflection.GeneratedProtocolMessageType('DownlinkMessage', (_message.Message,), {
'DESCRIPTOR' : _DOWNLINKMESSAGE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DownlinkMessage)
})
_sym_db.RegisterMessage(DownlinkMessage)
DeviceActivationRequest = _reflection.GeneratedProtocolMessageType('DeviceActivationRequest', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationRequest)
})
_sym_db.RegisterMessage(DeviceActivationRequest)
DeviceActivationResponse = _reflection.GeneratedProtocolMessageType('DeviceActivationResponse', (_message.Message,), {
'DESCRIPTOR' : _DEVICEACTIVATIONRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.DeviceActivationResponse)
})
_sym_db.RegisterMessage(DeviceActivationResponse)
GatewayStatusRequest = _reflection.GeneratedProtocolMessageType('GatewayStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusRequest)
})
_sym_db.RegisterMessage(GatewayStatusRequest)
GatewayStatusResponse = _reflection.GeneratedProtocolMessageType('GatewayStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _GATEWAYSTATUSRESPONSE,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.GatewayStatusResponse)
})
_sym_db.RegisterMessage(GatewayStatusResponse)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
'DESCRIPTOR' : _STATUSREQUEST,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.StatusRequest)
})
_sym_db.RegisterMessage(StatusRequest)
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'github.com.TheThingsNetwork.api.router.router_pb2'
# @@protoc_insertion_point(class_scope:router.Status)
})
_sym_db.RegisterMessage(Status)
DESCRIPTOR._options = None
_UPLINKMESSAGE.fields_by_name['protocol_metadata']._options = None
_UPLINKMESSAGE.fields_by_name['gateway_metadata']._options = None
_DOWNLINKMESSAGE.fields_by_name['protocol_configuration']._options = None
_DOWNLINKMESSAGE.fields_by_name['gateway_configuration']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['dev_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['app_eui']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['protocol_metadata']._options = None
_DEVICEACTIVATIONREQUEST.fields_by_name['gateway_metadata']._options = None
_GATEWAYSTATUSREQUEST.fields_by_name['gateway_id']._options = None
_GATEWAYSTATUSRESPONSE.fields_by_name['status']._options = None
_ROUTER = _descriptor.ServiceDescriptor(
name='Router',
full_name='router.Router',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=1707,
serialized_end=1979,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.Router.GatewayStatus',
index=0,
containing_service=None,
input_type=github_dot_com_dot_TheThingsNetwork_dot_api_dot_gateway_dot_gateway__pb2._STATUS,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Uplink',
full_name='router.Router.Uplink',
index=1,
containing_service=None,
input_type=_UPLINKMESSAGE,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Subscribe',
full_name='router.Router.Subscribe',
index=2,
containing_service=None,
input_type=_SUBSCRIBEREQUEST,
output_type=_DOWNLINKMESSAGE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='Activate',
full_name='router.Router.Activate',
index=3,
containing_service=None,
input_type=_DEVICEACTIVATIONREQUEST,
output_type=_DEVICEACTIVATIONRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTER)
DESCRIPTOR.services_by_name['Router'] = _ROUTER
_ROUTERMANAGER = _descriptor.ServiceDescriptor(
name='RouterManager',
full_name='router.RouterManager',
file=DESCRIPTOR,
index=1,
serialized_options=None,
serialized_start=1982,
serialized_end=2127,
methods=[
_descriptor.MethodDescriptor(
name='GatewayStatus',
full_name='router.RouterManager.GatewayStatus',
index=0,
containing_service=None,
input_type=_GATEWAYSTATUSREQUEST,
output_type=_GATEWAYSTATUSRESPONSE,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='GetStatus',
full_name='router.RouterManager.GetStatus',
index=1,
containing_service=None,
input_type=_STATUSREQUEST,
output_type=_STATUS,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ROUTERMANAGER)
DESCRIPTOR.services_by_name['RouterManager'] = _ROUTERMANAGER
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
33084,
13,
785,
14,
464,
22248,
26245,
14,
15042,
14,
472,
353,
14,
472,... | 2.436257 | 12,119 |
import os.path
import unittest
from io import StringIO
import numpy as np
from dectree.compiler import compile
from dectree.config import VECTORIZE_PROP
from dectree.transpiler import transpile
| [
11748,
28686,
13,
6978,
198,
11748,
555,
715,
395,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
390,
310,
631,
13,
5589,
5329,
1330,
17632,
198,
6738,
390,
310,
631,
13,
11250,
1330,
56... | 3.15873 | 63 |
# This file is part of 'NTLM Authorization Proxy Server'
# Copyright 2001 Dmitry A. Rozmanov <dima@xenon.spb.ru>
#
# NTLM APS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# NTLM APS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import string, urlparse
http_debug_file_name = 'http.debug'
#-----------------------------------------------------------------------
# tests client's header for correctness
def test_client_http_header(header_str):
""
request = string.split(header_str, '\012')[0]
parts = string.split(request)
# we have to have at least 3 words in the request
# poor check
if len(parts) < 3:
return 0
else:
return 1
#-----------------------------------------------------------------------
# tests server's response header for correctness
def test_server_http_header(header_str):
""
response = string.split(header_str, '\012')[0]
parts = string.split(response)
# we have to have at least 2 words in the response
# poor check
if len(parts) < 2:
return 0
else:
return 1
#-----------------------------------------------------------------------
def extract_http_header_str(buffer):
""
# let's remove possible leading newlines
t = string.lstrip(buffer)
# searching for the RFC header's end
delimiter = '\015\012\015\012'
header_end = string.find(t, delimiter)
if header_end < 0:
# may be it is defective header made by junkbuster
delimiter = '\012\012'
header_end = string.find(t, delimiter)
if header_end >=0:
# we have found it, possibly
ld = len(delimiter)
header_str = t[0:header_end + ld]
# Let's check if it is a proper header
if test_server_http_header(header_str) or test_client_http_header(header_str):
# if yes then let's do our work
if (header_end + ld) >= len(t):
rest_str = ''
else:
rest_str = t[header_end + ld:]
else:
# if not then let's leave the buffer as it is
# NOTE: if there is some junk before right header we will never
# find that header. Till timeout, I think. Not that good solution.
header_str = ''
rest_str = buffer
else:
# there is no complete header in the buffer
header_str = ''
rest_str = buffer
return (header_str, rest_str)
#-----------------------------------------------------------------------
def extract_server_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_SERVER_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def extract_client_header(buffer):
""
header_str, rest_str = extract_http_header_str(buffer)
if header_str:
header_obj = HTTP_CLIENT_HEAD(header_str)
else:
header_obj = None
return (header_obj, rest_str)
#-----------------------------------------------------------------------
def capitalize_value_name(str):
""
tl = string.split(str, '-')
for i in range(len(tl)):
tl[i] = string.capitalize(tl[i])
return string.join(tl, '-')
#-----------------------------------------------------------------------
# some helper classes
#-----------------------------------------------------------------------
class HTTP_HEAD:
""
pass
#-------------------------------
def __init__(self, head_str):
""
self.head_source = ''
self.params = None
self.fields = None
self.order_list = []
self.head_source = head_str
head_str = string.strip(head_str)
records = string.split(head_str, '\012')
# Dealing with response line
#fields = string.split(records[0], ' ', 2)
t = string.split(string.strip(records[0]))
fields = t[:2] + [string.join(t[2:])]
self.fields = []
for i in fields:
self.fields.append(string.strip(i))
# Dealing with params
params = {}
order_list = []
for i in records[1:]:
parts = string.split(string.strip(i), ':', 1)
pname = string.lower(string.strip(parts[0]))
if not params.has_key(pname):
params[pname] = []
order_list.append(string.lower(pname))
try:
params[pname].append(string.strip(parts[1]))
except:
msg = "ERROR: Exception in head parsing. ValueName: '%s'" % pname
#print msg
self.debug(msg)
self.params = params
self.order_list = order_list
#-------------------------------
def debug(self, message):
""
try:
f = open(http_debug_file_name, 'a')
f.write(message)
f.write('\n=====\n')
f.write(self.head_source)
f.close()
except IOError:
pass
# Yes, yes, I know, this is just sweeping it under the rug...
# TODO: implement a persistent filehandle for logging debug messages to.
#-------------------------------
def copy(self):
""
import copy
return copy.deepcopy(self)
#-------------------------------
def get_param_values(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name):
return self.params[param_name]
else:
return []
#-------------------------------
def del_param(self, param_name):
""
param_name = string.lower(param_name)
if self.params.has_key(param_name): del self.params[param_name]
#-------------------------------
def has_param(self, param_name):
""
param_name = string.lower(param_name)
return self.params.has_key(param_name)
#-------------------------------
def add_param_value(self, param_name, value):
""
param_name = string.lower(param_name)
if not self.params.has_key(param_name):
self.params[param_name] = []
if param_name not in self.order_list:
self.order_list.append(param_name)
self.params[param_name].append(value)
#-------------------------------
def replace_param_value(self, param_name, value):
""
self.del_param(param_name)
self.add_param_value(param_name, value)
#-------------------------------
def __repr__(self, delimiter='\n'):
""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\n'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\n'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\n'
res = res + cookies
res = res + '\n'
return res
#-------------------------------
def send(self, socket):
""
#"""
res = ''
cookies = ''
res = string.join(self.fields, ' ') + '\015\012'
for i in self.order_list:
if self.params.has_key(i):
if i == 'cookie':
for k in self.params[i]:
cookies = cookies + capitalize_value_name(i) + ': ' + k + '\015\012'
else:
for k in self.params[i]:
res = res + capitalize_value_name(i) + ': ' + k + '\015\012'
res = res + cookies
res = res + '\015\012'
#"""
#res = self.__repr__('\015\012')
# NOTE!!! 0.9.1 worked, 0.9.5 and 0.9.7 did not with MSN Messenger.
# We had problem here that prevent MSN Messenger from working.
# Some work is needed to make __rerp__ working instead of current code..
try:
#socket.send(self.head_source)
socket.send(res)
# self.debug(res)
return 1
except:
return 0
#-----------------------------------------------------------------------
#-----------------------------------------------------------------------
| [
2,
770,
2393,
318,
636,
286,
705,
11251,
31288,
35263,
38027,
9652,
6,
198,
2,
15069,
5878,
45181,
317,
13,
5564,
32054,
709,
1279,
67,
8083,
31,
87,
268,
261,
13,
2777,
65,
13,
622,
29,
198,
2,
198,
2,
399,
14990,
44,
3486,
50,... | 2.394925 | 3,783 |
from .language import Language
__all__ = ['Language']
| [
6738,
764,
16129,
1330,
15417,
628,
198,
834,
439,
834,
796,
37250,
32065,
20520,
198
] | 3.733333 | 15 |
from . import scraper
from .browser import Browser
| [
6738,
764,
1330,
19320,
525,
198,
6738,
764,
40259,
1330,
34270,
198
] | 4.25 | 12 |
import pygame as pg
import constants
from state import GameState
from tower import Tower
| [
11748,
12972,
6057,
355,
23241,
198,
198,
11748,
38491,
198,
6738,
1181,
1330,
3776,
9012,
198,
6738,
10580,
1330,
8765,
628
] | 4.333333 | 21 |
# -*- coding: utf-8 -*-
# @Date : 2019-10-01
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models_search
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params, cur_stages
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
from adamw import AdamW
import random
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
from models_search.ViT_8_8 import matmul, count_matmul
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
10430,
220,
220,
220,
1058,
13130,
12,
940,
12,
486,
198,
2,
2488,
13838,
220,
1058,
1395,
3541,
84,
47142,
357,
5431,
62,
70,
506,
31,
83,
321,
84,
13,
1... | 2.925 | 320 |
from keras.layers import Input, Dense
from keras.models import Model
# This returns a tensor
inputs = Input(shape=(784,))
# a layer instance is callable on a tensor, and returns a tensor
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
# This creates a model that includes
# the Input layer and three Dense layers
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels) # starts training
| [
6738,
41927,
292,
13,
75,
6962,
1330,
23412,
11,
360,
1072,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
198,
198,
2,
770,
5860,
257,
11192,
273,
198,
15414,
82,
796,
23412,
7,
43358,
16193,
37688,
11,
4008,
198,
198,
2,
257,
7679,... | 2.827103 | 214 |
""" Ichimoku Indicator
"""
import math
import numpy
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
from importlib import import_module
| [
37811,
26364,
320,
11601,
1423,
26407,
198,
37811,
198,
198,
11748,
10688,
198,
198,
11748,
299,
32152,
198,
11748,
19798,
292,
198,
6738,
3305,
571,
1330,
12531,
198,
198,
6738,
4284,
47031,
13,
26791,
1330,
1423,
26407,
18274,
4487,
198... | 3.666667 | 48 |
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Gambit Neutral file.
"""
from .core import FormatIO
class ElementGroup(object):
"""
One single element group information in Gambit Neutral file.
@ivar ngp: element group index (1-based).
@type ngp: int
@ivar nelgp: number elements in this group.
@type nelgp: int
@ivar mtyp: material type (0: undefined, 1: conjugate, 2: fluid, 3: porous,
4: solid, 5: deformable).
@type mtyp: int
@ivar nflags: number of solver dependent flags.
@type nflags: int
@ivar solver: array of solver dependent flags of shape of (nflags).
@type solver: numpy.ndarray
@ivar elems: elements array of shape of (nelgp).
@type elems: numpy.ndarray
"""
def _parse(self, data):
"""
Parse given string data for element group. Set all instance variables.
@param data: string data for element group.
@type data: string
@return: nothing
"""
from numpy import fromstring
# parse header.
control, enttype, solver, data = data.split('\n', 3)
# parse control.
self.ngp, self.nelgp, self.mtyp, self.nflags = [
int(val) for val in control.split()[1::2]]
# get name.
self.elmmat = enttype.strip()
# get solver flags.
self.solver = fromstring(solver, dtype='int32', sep=' ')
# parse into array and renumber.
self.elems = fromstring(data, dtype='int32', sep=' ')-1
class BoundaryCondition(object):
"""
Hold boundary condition values.
@cvar CLFCS_RMAP: map clfcs definition back from block object to neutral
object.
@type CLFCS_RMAP: dict
@ivar name: name of boundary condition.
@type name: str
@ivar itype: type of data (0: nodal, 1: elemental).
@type itype: int
@ivar nentry: number of entry (nodes or elements/cells).
@type nentry: int
@ivar nvalues: number of values for each data record.
@type nvalues: int
@ivar ibcode: 1D array of boundary condition code.
@type ibcode: numpy.ndarray
@ivar values: array of values attached to each record.
@type values: numpy.ndarray
"""
def _parse(self, data):
"""
Parse given data string to boundary condition set. Set all instance
variables.
@param data: string data for boundary condition set.
@type data: str
@return: nothing
"""
from numpy import fromstring
# parse header.
header, data = data.split('\n', 1)
self.name = header[:32].strip()
tokens = fromstring(header[32:], dtype='int32', sep=' ')
self.itype, self.nentry, self.nvalues = tokens[:3]
self.ibcode = tokens[3:].copy()
# parse entries.
if self.itype == 0: # for nodes.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.elems = (arr[:,0]-1).copy()
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+1))
self.values = (arr[:,1:]).copy()
elif self.itype == 1: # for elements/cells.
arr = fromstring(data, dtype='int32', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.elems = arr[:,:3].copy()
self.elems[:,0] -= 1
arr = fromstring(data, dtype='float64', sep=' ').reshape(
(self.nentry, self.nvalues+3))
self.values = (arr[:,3:]).copy()
else:
raise ValueError("itype has to be either 0/1, but get %d" %
self.itype)
# define map for clfcs (from block to neu).
CLFCS_RMAP = {}
# tpn=1: edge.
CLFCS_RMAP[1] = [1,2]
# tpn=2: quadrilateral.
CLFCS_RMAP[2] = [1,2,3,4]
# tpn=3: triangle.
CLFCS_RMAP[3] = [1,2,3]
# tpn=4: hexahedron.
CLFCS_RMAP[4] = [5,2,6,4,1,3]
# tpn=5: tetrahedron.
CLFCS_RMAP[5] = [1,2,4,3]
# tpn=6: prism.
CLFCS_RMAP[6] = [4,5,3,1,2]
# tpn=6: pyramid.
CLFCS_RMAP[7] = [5,2,3,4,1]
def tobc(self, blk):
"""
Extract gambit boundary condition information from self into BC object.
Only process element/cell type of (gambit) boundary condition, and
return None while nodal BCs encountered.
@param blk: Block object for reference, nothing will be altered.
@type blk: solvcon.block.Block
@return: generic BC object.
@rtype: solvcon.boundcond.BC
"""
from numpy import empty
from ..boundcond import BC
clfcs_rmap = self.CLFCS_RMAP
# process only element/cell type of bc.
if self.itype != 1:
return None
# extrace boundary face list.
facn = empty((self.nentry,3), dtype='int32')
facn.fill(-1)
ibnd = 0
for entry in self.elems:
icl, nouse, it = entry[:3]
tpn = blk.cltpn[icl]
facn[ibnd,0] = blk.clfcs[icl, clfcs_rmap[tpn][it-1]]
ibnd += 1
# craft BC object.
bc = BC(fpdtype=blk.fpdtype)
bc.name = self.name
slct = facn[:,0].argsort() # sort face list for bc object.
bc.facn = facn[slct]
bc.value = self.values[slct]
# finish.
return bc
class GambitNeutralParser(object):
"""
Parse and store information of a Gambit Neutral file.
@ivar data: data to be parsed.
@type data: str
@ivar neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
def __init__(self, data, neu):
"""
@param data: data to be parsed.
@type data: str
@param neu: GambitNeutral object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNeutral
"""
self.data = data
self.neu = neu
processors = {}
def _control_info(data, neu):
"""
Take string data for "CONTROL INFO" and parse it to GambitNeutral
object. Set:
- header
- title
- data_source
- numnp
- nelem
- ngrps
- nbsets
- ndfcd
- ndfvl
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring
data = data.rstrip()
records = data.splitlines()
neu.header = records[1].strip()
neu.title = records[2].strip()
neu.data_source = records[3].strip()
values = fromstring(records[6], dtype='int32', sep=' ')
neu.numnp, neu.nelem, neu.ngrps, \
neu.nbsets, neu.ndfcd, neu.ndfvl = values
processors['CONTROL INFO'] = _control_info
def _nodal_coordinate(data, neu):
"""
Take string data for "NODAL COORDINATES" and parse it to GambitNuetral
object. Set:
- nodes
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array and reshape to 2D array.
nodes = fromstring(data, dtype='float64', sep=' ')
nodes = nodes.reshape((neu.numnp, (neu.ndfcd+1)))
# renumber according to first value of each line.
# NOTE: unused number contains garbage.
number = nodes[:,0].astype(int) - 1
newnodes = empty((number.max()+1,neu.ndfcd))
newnodes[number] = nodes[number,1:]
# set result to neu.
neu.nodes = newnodes
processors['NODAL COORDINATE'] = _nodal_coordinate
def _elements_cells(data, neu):
"""
Take string data for "ELEMENTS/CELLS" and parse it to GambitNeutral
object. Set:
- elems
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# parse into array.
serial = fromstring(data, dtype='int32', sep=' ')
# parse element data -- 1st pass:
# element index, shape, and number of nodes.
meta = empty((neu.nelem, 3), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
meta[ielem,:] = serial[ival:ival+3]
ival += 3+meta[ielem,2]
ielem += 1
# parse element data -- 2nd pass:
# node definition.
maxnnode = meta[:,2].max()
elems = empty((neu.nelem, maxnnode+2), dtype='int32')
ielem = 0
ival = 0
while ielem < neu.nelem:
elems[ielem,2:2+meta[ielem,2]] = serial[ival+3:ival+3+meta[ielem,2]]
ival += 3+meta[ielem,2]
ielem += 1
elems[:,:2] = meta[:,1:] # copy the first two columns from meta.
elems[:,2:] -= 1 # renumber node indices in elements.
# set result to neu.
neu.elems = elems
processors['ELEMENTS/CELLS'] = _elements_cells
def _element_group(data, neu):
"""
Take string data for "ELEMENTS GROUP" and parse it to GambitNeutral
object. Set:
- grps
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.grps.append(ElementGroup(data))
processors['ELEMENT GROUP'] = _element_group
def _boundary_conditions(data, neu):
"""
Take string data for "BOUNDARY CONDITIONS" and parse it to
GambitNeutral object. Set:
- bcs
@param data: sectional data.
@type data: str
@param neu: object to be saved.
@type neu: solvcon.io.gambit.neutral.GambitNetral
@return: nothing
"""
from numpy import fromstring, empty
# discard header.
data = data.split('\n', 1)[-1]
# build group.
neu.bcs.append(BoundaryCondition(data))
processors['BOUNDARY CONDITIONS'] = _boundary_conditions
class GambitNeutralReader(object):
"""
Read and store information of a Gambit Neutral file line by line.
@ivar neuf: source file.
@itype neuf: file
@ivar neu: GambitNeutral object to be saved to.
@itype neu: solvcon.io.gambit.neutral.GambitNeutral
"""
@staticmethod
@staticmethod
@staticmethod
@classmethod
@staticmethod
@staticmethod
def _read_values(neuf, width, nval, dtype):
"""
Read homogeneous values from the current position of the opened
neutral file.
@param neuf: neutral file.
@type neuf: file
@param width: character width per value.
@type width: int
@param nval: number of values to read.
@type nval: int
@param dtype: dtype string to construct ndarray.
@type dtype: str
@return: read array.
@rtype: numpy.ndarray
"""
from numpy import empty
# determine type.
if dtype.startswith('int'):
vtype = int
elif dtype.startswith('float'):
vtype = float
else:
raise TypeError('%s not supported'%dtype)
# allocate array.
arr = empty(nval, dtype=dtype)
# read.
iline = 0
ival = 0
while ival < nval:
line = neuf.readline()
iline += 1
nchar = len(line)
line = line.rstrip()
nc = len(line)
if nc%width != 0:
raise IndexError('not exact chars at line %d'%(ival/iline))
nt = nc//width
arr[ival:ival+nt] = [vtype(line[8*it:8*(it+1)]) for it in range(nt)]
ival += nt
assert ival == nval
return arr
class GambitNeutral(object):
"""
Represent information in a Gambit Neutral file.
@cvar CLTPN_MAP: map cltpn from self to block.
@type CLTPN_MAP: numpy.ndarray
@cvar CLNDS_MAP: map clnds definition from self to block.
@type CLNDS_MAP: dict
@cvar CLFCS_RMAP: map clfcs definition back from block to self.
@type CLFCS_RMAP: dict
@ivar header: file header string.
@type header: str
@ivar title: title for this file.
@type title: str
@ivar data_source: identify the generation of the file from which program
and version.
@type data_source: str
@ivar numnp: number of nodes.
@type numnp: int
@ivar nelem: number of elements.
@type nelem: int
@ivar ngrps: number of element groups.
@type ngrps: int
@ivar nbsets: number of boundary condition sets.
@type nbsets: int
@ivar ndfcd: number of coordinate directions (2/3).
@type ndfcd: int
@ivar ndfvl: number of velocity components (2/3).
@type ndfvl: int
@ivar nodes: nodes array of shape of (numnp, ndfcd).
@type nodes: numpy.ndarray
@ivar elems: elements array of shape of (nelem, :).
@type elems: numpy.ndarray
@ivar grps: list of ElementGroup objects.
@type grps: list
@ivar bcs: list of BoundaryCondition objects.
@type bcs: list
"""
@property
@property
@property
def toblock(self, onlybcnames=None, bcname_mapper=None, fpdtype=None,
use_incenter=False):
"""
Convert GambitNeutral object to Block object.
@keyword onlybcnames: positively list wanted names of BCs.
@type onlybcnames: list
@keyword bcname_mapper: map name to bc type number.
@type bcname_mapper: dict
@keyword fpdtype: floating-point dtype.
@type fpdtype: str
@keyword use_incenter: use incenter when creating block.
@type use_incenter: bool
@return: Block object.
@rtype: solvcon.block.Block
"""
from ..block import Block
# create corresponding block according to GambitNeutral object.
blk = Block(ndim=self.ndim, nnode=self.nnode, ncell=self.ncell,
fpdtype=fpdtype, use_incenter=use_incenter)
self._convert_interior_to(blk)
blk.build_interior()
self._convert_bc_to(blk,
onlynames=onlybcnames, name_mapper=bcname_mapper)
blk.build_boundary()
blk.build_ghost()
return blk
from numpy import array
# define map for cltpn (from self to block).
CLTPN_MAP = array([0, 1, 2, 3, 4, 6, 5, 7], dtype='int32')
# define map for clnds (from self to block).
CLNDS_MAP = {}
# tpn=1: edge.
CLNDS_MAP[1] = {}
CLNDS_MAP[1][2] = [2,3] # 2 nodes.
CLNDS_MAP[1][3] = [2,4] # 3 nodes.
# tpn=2: quadrilateral.
CLNDS_MAP[2] = {}
CLNDS_MAP[2][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[2][8] = [2,4,6,8] # 8 nodes.
CLNDS_MAP[2][9] = [2,4,6,8] # 9 nodes.
# tpn=3: triangle.
CLNDS_MAP[3] = {}
CLNDS_MAP[3][3] = [2,3,4] # 3 nodes.
CLNDS_MAP[3][6] = [2,4,6] # 6 nodes.
CLNDS_MAP[3][7] = [2,4,6] # 7 nodes.
# tpn=4: brick.
CLNDS_MAP[4] = {}
CLNDS_MAP[4][8] = [2,3,5,4,6,7,9,8] # 8 nodes.
CLNDS_MAP[4][20] = [2,4,9,7,14,16,21,19] # 20 nodes.
CLNDS_MAP[4][27] = [2,4,10,8,20,22,28,26] # 27 nodes.
# tpn=5: tetrahedron.
CLNDS_MAP[5] = {}
CLNDS_MAP[5][4] = [2,3,4,5] # 4 nodes.
CLNDS_MAP[5][10] = [2,4,7,11] # 10 nodes.
# tpn=6: wedge.
CLNDS_MAP[6] = {}
CLNDS_MAP[6][6] = [2,4,3,5,7,6] # 6 nodes.
CLNDS_MAP[6][15] = [2,7,4,11,16,13] # 15 nodes.
CLNDS_MAP[6][18] = [2,7,4,14,19,16] # 18 nodes.
# tpn=7: pyramid.
CLNDS_MAP[7] = {}
CLNDS_MAP[7][5] = [2,3,5,4,6] # 5 nodes.
CLNDS_MAP[7][13] = [2,4,9,7,14] # 13 nodes.
CLNDS_MAP[7][14] = [2,4,10,8,15] # 14 nodes.
CLNDS_MAP[7][18] = [2,4,10,8,19] # 18 nodes.
CLNDS_MAP[7][19] = [2,4,10,8,20] # 19 nodes.
def _convert_interior_to(self, blk):
"""
Convert interior information, i.e., connectivities, from GambitNeutral
to Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@return: nothing.
"""
from numpy import array
from ..block import elemtype
cltpn_map = self.CLTPN_MAP
clnds_map = self.CLNDS_MAP
# copy nodal coordinate data.
blk.ndcrd[:,:] = self.nodes[:,:]
# copy node difinition in cells.
cltpn = blk.cltpn
clnds = blk.clnds
ncell = self.ncell
icell = 0
while icell < ncell:
# translate tpn from GambitNeutral to Block.
tpn = cltpn_map[self.elems[icell,0]]
cltpn[icell] = tpn
# translate clnds from GambitNeutral to Block.
nnd = elemtype[tpn,2]
nnd_self = self.elems[icell,1]
clnds[icell,0] = nnd
clnds[icell,1:nnd+1] = self.elems[icell,clnds_map[tpn][nnd_self]]
# advance cell.
icell += 1
# create cell groups for the block.
clgrp = blk.clgrp
for grp in self.grps:
igrp = len(blk.grpnames)
assert grp.ngp == igrp+1
clgrp[grp.elems] = igrp
blk.grpnames.append(grp.elmmat)
def _convert_bc_to(self, blk, onlynames=None, name_mapper=None):
"""
Convert boundary condition information from GambitNeutral object into
Block object.
@param blk: to-be-written Block object.
@type blk: solvcon.block.Block
@keyword onlynames: positively list wanted names of BCs.
@type onlynames: list
@keyword name_mapper: map name to bc type and value dictionary; the two
objects are organized in a tuple.
@type name_mapper: dict
@return: nothing.
"""
# process all neutral bc objects.
for neubc in self.bcs:
# extract boundary faces from neutral bc object.
bc = neubc.tobc(blk)
if bc is None: # skip if got nothing.
continue
# skip unwanted BCs.
if onlynames:
if bc.name not in onlynames:
continue
# recreate BC according to name mapping.
if name_mapper is not None:
bct, vdict = name_mapper.get(bc.name, None)
if bct is not None:
bc = bct(bc=bc)
bc.feedValue(vdict)
# save to block object.
bc.sern = len(blk.bclist)
bc.blk = blk
blk.bclist.append(bc)
class NeutralIO(FormatIO):
"""
Proxy to gambit neutral file format.
"""
def load(self, stream, bcrej=None):
"""
Load block from stream with BC mapper applied.
@keyword stream: file object or file name to be read.
@type stream: file or str
@keyword bcrej: names of the BC to reject.
@type bcrej: list
@return: the loaded block.
@rtype: solvcon.block.Block
"""
import gzip
# load gambit neutral file.
if isinstance(stream, (bytes, str)):
if stream.endswith('.gz'):
opener = gzip.open
else:
opener = open
stream = opener(stream)
neu = GambitNeutral(stream)
stream.close()
# convert loaded neutral object into block object.
if bcrej:
onlybcnames = list()
for bc in neu.bcs:
if bc.name not in bcrej:
onlybcnames.append(bc.name)
else:
onlybcnames = None
blk = neu.toblock(onlybcnames=onlybcnames)
return blk
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
fname = sys.argv[1]
neu = GambitNeutral(open(fname).read())
sys.stdout.write("Gambit Neutral object: %s" % neu)
if neu.grps or neu.bcs:
sys.stdout.write(", with:\n")
for lst in neu.grps, neu.bcs:
if len(lst) > 0:
for obj in lst:
sys.stdout.write(" %s\n" % obj)
else:
sys.stdout.write("\n")
else:
sys.stdout.write("usage: %s <file name>\n" % sys.argv[0])
| [
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
3648,
11,
575,
2150,
12,
40728,
12555,
1279,
22556,
66,
31,
34453,
85,
1102,
13,
3262,
29,
198,
2,
198,
2,
1439,
2489,
10395,
13,
198,
2... | 2.098989 | 10,587 |
symbol_lookup = {
'I': 1,
'V': 5,
'X': 10,
'L': 50,
'C': 100,
'D': 500,
'M': 1000
}
| [
1837,
23650,
62,
5460,
929,
796,
1391,
198,
220,
220,
220,
705,
40,
10354,
352,
11,
198,
220,
220,
220,
705,
53,
10354,
642,
11,
198,
220,
220,
220,
705,
55,
10354,
838,
11,
198,
220,
220,
220,
705,
43,
10354,
2026,
11,
198,
220... | 1.561644 | 73 |
import tkinter as tk
from tkinter import ttk
from thonny import get_workbench
from thonnycontrib.codelive.mqtt_connection import generate_topic, topic_exists
from thonnycontrib.codelive.views.hinttext import HintText
from thonnycontrib.codelive.views.textspin import TextSpin
from thonnycontrib.codelive.mqtt_connection import BROKER_URLS
# For testing only!!!!!
if __name__ == "__main__":
if __name__ == "__main__":
root = tk.Tk()
button = tk.Button(root, text="Test", command=start_top)
button.pack(padx=20, pady=20)
root.mainloop()
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
198,
6738,
294,
261,
3281,
1330,
651,
62,
1818,
26968,
198,
6738,
294,
261,
3281,
3642,
822,
13,
19815,
417,
425,
13,
76,
80,
926,
62,
38659,
133... | 2.636792 | 212 |
from PyQt5.QtWidgets import QPushButton,QApplication, QWidget, QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QImage, QPixmap
import cv2
import DTR2
import os, subprocess
import ctypes
import sqlite3
import PySimpleGUI as sg
from PySimpleGUI import SetOptions
import TrainFaces
from easygui import enterbox
## subprocess.Popen(["python", "TrainFaces.py"])
## os._exit
if __name__=='__main__':
import sys
app=QtWidgets.QApplication(sys.argv)
window=Ui_Registration2()
window.show()
sys.exit(app.exec_())
| [
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
49222,
21864,
11,
48,
23416,
11,
1195,
38300,
11,
1195,
12837,
14253,
201,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
1... | 2.119266 | 327 |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch04_strings.solutions.ex06_remove_duplicates import remove_duplicates
@pytest.mark.parametrize("input, expected",
[("bananas", "bans"),
("lalalamama", "lam"),
("MICHAEL", "MICHAEL") ])
| [
2,
1355,
8802,
8207,
23065,
76,
277,
25151,
288,
292,
23670,
366,
37906,
13879,
1,
198,
2,
198,
2,
15069,
12131,
416,
3899,
1423,
268,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
442,
3023,
62,
37336,
13,
82,
14191,
13,
1069,
3312... | 2.112994 | 177 |
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Cacheable object and cache management base classes.
"""
import sys
import time
from logging import getLogger
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import view_management_screens
from AccessControl.SecurityInfo import ClassSecurityInfo
from AccessControl.SecurityManagement import getSecurityManager
from AccessControl.unauthorized import Unauthorized
from Acquisition import aq_acquire
from Acquisition import aq_base
from Acquisition import aq_get
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.special_dtml import DTMLFile
ZCM_MANAGERS = '__ZCacheManager_ids__'
ViewManagementScreensPermission = view_management_screens
ChangeCacheSettingsPermission = 'Change cache settings'
LOG = getLogger('Cache')
def filterCacheManagers(orig, container, name, value, extra):
"""
This is a filter method for aq_acquire.
It causes objects to be found only if they are
in the list of cache managers.
"""
if hasattr(aq_base(container), ZCM_MANAGERS) and \
name in getattr(container, ZCM_MANAGERS):
return 1
return 0
def getVerifiedManagerIds(container):
"""Gets the list of cache managers in a container, verifying each one."""
ids = getattr(container, ZCM_MANAGERS, ())
rval = []
for id in ids:
if getattr(getattr(container, id, None), '_isCacheManager', 0):
rval.append(id)
return tuple(rval)
# Anytime a CacheManager is added or removed, all _v_ZCacheable_cache
# attributes must be invalidated. manager_timestamp is a way to do
# that.
manager_timestamp = 0
class Cacheable(object):
"""Mix-in for cacheable objects."""
manage_options = (
{
'label': 'Cache',
'action': 'ZCacheable_manage',
'filter': filterCacheTab,
},
)
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
security.declareProtected(ViewManagementScreensPermission, 'ZCacheable_manage') # NOQA: D001,E501
ZCacheable_manage = DTMLFile('dtml/cacheable', globals())
_v_ZCacheable_cache = None
_v_ZCacheable_manager_timestamp = 0
__manager_id = None
__enabled = True
_isCacheable = True
@security.private
def ZCacheable_getManager(self):
"""Returns the currently associated cache manager."""
manager_id = self.__manager_id
if manager_id is None:
return None
try:
return aq_acquire(
self,
manager_id,
containment=1,
filter=filterCacheManagers,
extra=None,
default=None
)
except AttributeError:
return None
@security.private
def ZCacheable_getCache(self):
"""Gets the cache associated with this object.
"""
if self.__manager_id is None:
return None
c = self._v_ZCacheable_cache
if c is not None:
# We have a volatile reference to the cache.
if self._v_ZCacheable_manager_timestamp == manager_timestamp:
return aq_base(c)
manager = self.ZCacheable_getManager()
if manager is not None:
c = aq_base(manager.ZCacheManager_getCache())
else:
return None
# Set a volatile reference to the cache then return it.
self._v_ZCacheable_cache = c
self._v_ZCacheable_manager_timestamp = manager_timestamp
return c
@security.private
def ZCacheable_isCachingEnabled(self):
"""
Returns true only if associated with a cache manager and
caching of this method is enabled.
"""
return self.__enabled and self.ZCacheable_getCache()
@security.private
@security.private
def ZCacheable_get(
self,
view_name='',
keywords=None,
mtime_func=None,
default=None
):
"""Retrieves the cached view for the object under the
conditions specified by keywords. If the value is
not yet cached, returns the default.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
val = c.ZCache_get(ob, view_name, keywords,
mtime_func, default)
return val
except Exception:
LOG.warning('ZCache_get() exception')
return default
return default
@security.private
def ZCacheable_set(
self,
data,
view_name='',
keywords=None,
mtime_func=None
):
"""Cacheable views should call this method after generating
cacheable results. The data argument can be of any Python type.
"""
c = self.ZCacheable_getCache()
if c is not None and self.__enabled:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
c.ZCache_set(ob, data, view_name, keywords,
mtime_func)
except Exception:
LOG.warning('ZCache_set() exception')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_invalidate(self, view_name='', REQUEST=None):
"""Called after a cacheable object is edited. Causes all
cache entries that apply to the view_name to be removed.
Returns a status message.
"""
c = self.ZCacheable_getCache()
if c is not None:
ob, view_name = self.ZCacheable_getObAndView(view_name)
try:
message = c.ZCache_invalidate(ob)
if not message:
message = 'Invalidated.'
except Exception:
exc = sys.exc_info()
try:
LOG.warning('ZCache_invalidate() exception')
message = 'An exception occurred: %s: %s' % exc[:2]
finally:
exc = None
else:
message = 'This object is not associated with a cache manager.'
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message=message)
return message
@security.private
def ZCacheable_getModTime(self, mtime_func=None):
"""Returns the highest of the last mod times."""
# Based on:
# mtime_func
# self.mtime
# self.__class__.mtime
mtime = 0
if mtime_func:
# Allow mtime_func to influence the mod time.
mtime = mtime_func()
base = aq_base(self)
mtime = max(getattr(base, '_p_mtime', mtime) or 0, mtime)
klass = getattr(base, '__class__', None)
if klass:
klass_mtime = getattr(klass, '_p_mtime', mtime)
if isinstance(klass_mtime, int):
mtime = max(klass_mtime, mtime)
return mtime
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerId(self):
"""Returns the id of the current ZCacheManager."""
return self.__manager_id
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerURL(self):
"""Returns the URL of the current ZCacheManager."""
manager = self.ZCacheable_getManager()
if manager is not None:
return manager.absolute_url()
return None
@security.protected(ViewManagementScreensPermission)
def ZCacheable_getManagerIds(self):
"""Returns a list of mappings containing the id and title
of the available ZCacheManagers."""
rval = []
ob = self
used_ids = {}
while ob is not None:
if hasattr(aq_base(ob), ZCM_MANAGERS):
ids = getattr(ob, ZCM_MANAGERS)
for id in ids:
manager = getattr(ob, id, None)
if manager is not None:
id = manager.getId()
if id not in used_ids:
title = getattr(aq_base(manager), 'title', '')
rval.append({'id': id, 'title': title})
used_ids[id] = 1
ob = aq_parent(aq_inner(ob))
return tuple(rval)
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setManagerId(self, manager_id, REQUEST=None):
"""Changes the manager_id for this object."""
self.ZCacheable_invalidate()
if not manager_id:
# User requested disassociation
# from the cache manager.
manager_id = None
else:
manager_id = str(manager_id)
self.__manager_id = manager_id
self._v_ZCacheable_cache = None
if REQUEST is not None:
return self.ZCacheable_manage(
self,
REQUEST,
management_view='Cache',
manage_tabs_message='Cache settings changed.'
)
@security.protected(ViewManagementScreensPermission)
def ZCacheable_enabled(self):
"""Returns true if caching is enabled for this object or method."""
return self.__enabled
@security.protected(ChangeCacheSettingsPermission)
def ZCacheable_setEnabled(self, enabled=0, REQUEST=None):
"""Changes the enabled flag."""
self.__enabled = enabled and 1 or 0
if REQUEST is not None:
return self.ZCacheable_manage(
self, REQUEST, management_view='Cache',
manage_tabs_message='Cache settings changed.')
@security.protected(ViewManagementScreensPermission)
def ZCacheable_configHTML(self):
"""Override to provide configuration of caching
behavior that can only be specific to the cacheable object.
"""
return ''
InitializeClass(Cacheable)
def findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
path
):
"""
Used by the CacheManager UI. Recursive. Similar to the Zope
"Find" function. Finds all Cacheable objects in a hierarchy.
"""
try:
if meta_types:
subobs = ob.objectValues(meta_types)
else:
subobs = ob.objectValues()
sm = getSecurityManager()
# Add to the list of cacheable objects.
for subob in subobs:
if not isCacheable(subob):
continue
associated = (subob.ZCacheable_getManagerId() == manager_id)
if require_assoc and not associated:
continue
if not sm.checkPermission('Change cache settings', subob):
continue
subpath = path + (subob.getId(),)
info = {
'sortkey': subpath,
'path': '/'.join(subpath),
'title': getattr(aq_base(subob), 'title', ''),
'icon': None,
'associated': associated,
}
rval.append(info)
# Visit subfolders.
if subfolders:
if meta_types:
subobs = ob.objectValues()
for subob in subobs:
subpath = path + (subob.getId(),)
if hasattr(aq_base(subob), 'objectValues'):
if sm.checkPermission(
'Access contents information', subob):
findCacheables(
subob, manager_id, require_assoc,
subfolders, meta_types, rval, subpath)
except Exception:
# Ignore exceptions.
import traceback
traceback.print_exc()
class Cache(object):
"""
A base class (and interface description) for caches.
Note that Cache objects are not intended to be visible by
restricted code.
"""
class CacheManager(object):
"""
A base class for cache managers. Implement ZCacheManager_getCache().
"""
security = ClassSecurityInfo()
security.setPermissionDefault(ChangeCacheSettingsPermission, ('Manager',))
@security.private
_isCacheManager = 1
manage_options = (
{
'label': 'Associate',
'action': 'ZCacheManager_associate',
},
)
security.declareProtected(ChangeCacheSettingsPermission, 'ZCacheManager_associate') # NOQA: D001,E501
ZCacheManager_associate = DTMLFile('dtml/cmassoc', globals())
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_locate(
self,
require_assoc,
subfolders,
meta_types=[],
REQUEST=None
):
"""Locates cacheable objects.
"""
ob = aq_parent(aq_inner(self))
rval = []
manager_id = self.getId()
if '' in meta_types:
# User selected "All".
meta_types = []
findCacheables(
ob,
manager_id,
require_assoc,
subfolders,
meta_types,
rval,
()
)
if REQUEST is not None:
return self.ZCacheManager_associate(
self,
REQUEST,
show_results=1,
results=rval,
management_view="Associate"
)
return rval
@security.protected(ChangeCacheSettingsPermission)
def ZCacheManager_setAssociations(self, props=None, REQUEST=None):
"""Associates and un-associates cacheable objects with this
cache manager.
"""
addcount = 0
remcount = 0
parent = aq_parent(aq_inner(self))
sm = getSecurityManager()
my_id = str(self.getId())
if props is None:
props = REQUEST.form
for key, do_associate in props.items():
if key[:10] == 'associate_':
path = key[10:]
ob = parent.restrictedTraverse(path)
if not sm.checkPermission('Change cache settings', ob):
raise Unauthorized
if not isCacheable(ob):
# Not a cacheable object.
continue
manager_id = str(ob.ZCacheable_getManagerId())
if do_associate:
if manager_id != my_id:
ob.ZCacheable_setManagerId(my_id)
addcount = addcount + 1
else:
if manager_id == my_id:
ob.ZCacheable_setManagerId(None)
remcount = remcount + 1
if REQUEST is not None:
return self.ZCacheManager_associate(
self, REQUEST, management_view="Associate",
manage_tabs_message='%d association(s) made, %d removed.' %
(addcount, remcount)
)
InitializeClass(CacheManager)
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
6244,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789,
11,
198,
2,
10628,
362,
13,
... | 2.188571 | 7,175 |
Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 21:26:53) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> from exif import Image
>>> with open('C:\\Users\\oswal\\Pictures\\oswaldo.jpg', 'rb') as image_file:
... my_image = Image(image_file)
...
>>> dir(my_image)
['_exif_ifd_pointer', '_gps_ifd_pointer', '_interoperability_ifd_Pointer', '_segments', 'cfa_pattern', 'color_space', 'components_configuration', 'compressed_bits_per_pixel', 'compression', 'contrast', 'custom_rendered', 'datetime', 'datetime_digitized', 'datetime_original', 'digital_zoom_ratio', 'exif_version', 'exposure_bias_value', 'exposure_mode', 'exposure_program', 'exposure_time', 'f_number', 'file_source', 'flash', 'flashpix_version', 'focal_length', 'focal_length_in_35mm_film', 'gain_control', 'get', 'get_file', 'gps_altitude', 'gps_altitude_ref', 'gps_datestamp', 'gps_latitude', 'gps_latitude_ref', 'gps_longitude', 'gps_longitude_ref', 'gps_map_datum', 'gps_satellites', 'gps_timestamp', 'gps_version_id', 'jpeg_interchange_format', 'jpeg_interchange_format_length', 'light_source', 'make', 'maker_note', 'max_aperture_value', 'metering_mode', 'model', 'orientation', 'photographic_sensitivity', 'pixel_x_dimension', 'pixel_y_dimension', 'resolution_unit', 'saturation', 'scene_capture_type', 'scene_type', 'sensing_method', 'sensitivity_type', 'sharpness', 'software', 'subject_distance_range', 'subsec_time', 'subsec_time_digitized', 'subsec_time_original', 'user_comment', 'white_balance', 'x_resolution', 'y_and_c_positioning', 'y_resolution']
>>> import pandas as pd
>>> data = pd.DataFrame(dir(my_image))
>>> datatoexcel = pd.ExcelWriter("CaracteristicasImagen.xlsx",engine='xlsxwriter')
>>> data.to_excel(datatoexcel, sheet_name='Sheet1')
>>> datatoexcel.save()
| [
37906,
513,
13,
22,
13,
18,
357,
85,
18,
13,
22,
13,
18,
25,
891,
19,
721,
21,
276,
1065,
11,
1526,
1679,
13130,
11,
2310,
25,
2075,
25,
4310,
8,
685,
5653,
34,
410,
13,
1129,
1433,
3933,
1643,
357,
24123,
15437,
319,
1592,
26... | 2.667638 | 686 |
from yelp_client import get_yelp_client
if __name__ == '__main__':
client = get_yelp_client()
parser = get_parser()
args = parser.parse_args()
params = {
'lang': args.language
}
response = client.get_business(args.id, **params)
business = response.business
print("Review count: {}".format(business.review_count))
for review in business.reviews:
print("{} (by {})".format(review.excerpt, review.user.name)) | [
6738,
331,
417,
79,
62,
16366,
1330,
651,
62,
88,
417,
79,
62,
16366,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
5456,
796,
651,
62,
88,
417,
79,
62,
16366,
3419,
198,
220,
220,
220,
... | 2.60452 | 177 |
#
# PySNMP MIB module ATTO-PRODUCTS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ATTO-PRODUCTS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:31:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, IpAddress, NotificationType, ObjectIdentity, Bits, Counter64, ModuleIdentity, Integer32, iso, enterprises, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "IpAddress", "NotificationType", "ObjectIdentity", "Bits", "Counter64", "ModuleIdentity", "Integer32", "iso", "enterprises", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
attoProductsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4547, 3, 2))
attoProductsMIB.setRevisions(('2013-04-19 13:45',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: attoProductsMIB.setRevisionsDescriptions(('Initial version of this module.',))
if mibBuilder.loadTexts: attoProductsMIB.setLastUpdated('201304191345Z')
if mibBuilder.loadTexts: attoProductsMIB.setOrganization('ATTO Technology, Inc.')
if mibBuilder.loadTexts: attoProductsMIB.setContactInfo('ATTO Technology 155 Crosspoint Parkway Amherst NY 14068 EMail: <support@attotech.com>')
if mibBuilder.loadTexts: attoProductsMIB.setDescription('This modules defines object identifiers assigned to various hardware platforms, which are returned as values for sysObjectID.')
attotech = MibIdentifier((1, 3, 6, 1, 4, 1, 4547))
attoProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1))
attoMgmt = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 2))
attoModules = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 3))
attoAgentCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 4))
attoGenericDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 1))
attoHba = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 3))
attoFB6500 = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 4))
attoFB6500N = MibIdentifier((1, 3, 6, 1, 4, 1, 4547, 1, 5))
mibBuilder.exportSymbols("ATTO-PRODUCTS-MIB", attoFB6500=attoFB6500, attoModules=attoModules, attotech=attotech, attoMgmt=attoMgmt, attoProductsMIB=attoProductsMIB, attoFB6500N=attoFB6500N, attoHba=attoHba, attoGenericDevice=attoGenericDevice, attoProducts=attoProducts, attoAgentCapability=attoAgentCapability, PYSNMP_MODULE_ID=attoProductsMIB)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
5161,
10468,
12,
4805,
28644,
50,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
14490,
14,
67,... | 2.69797 | 1,182 |
from distutils.core import setup
setup(
name='python-simple-usbrelay',
url='https://github.com/patrickjahns/simpleusbrelay',
version='0.1',
packages=['simpleusbarray'],
author='Patrick Jahns',
author_email='patrick.jahns@gmail.com',
license='MIT',
long_description=open('README.rst').read()
)
| [
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
29412,
12,
36439,
12,
43319,
2411,
323,
3256,
198,
220,
220,
220,
19016,
11639,
5450,
1378,
12567,
13,
785,
14,
29615,
31558,
5907,
14,
3643... | 2.608 | 125 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ConvertCoinUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from View.PY import ConQRC
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
3103,
1851,
24387,
10080,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,... | 2.98374 | 123 |
#!/usr/bin/env python3
# Copyright (c) 2019 The Bitcoin SV developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import p2p_port, disconnect_nodes
from test_framework.blocktools import create_block, create_coinbase, assert_equal
import datetime
# This test checks TOOBUSY reject message and behaviour that it triggers.
# Scenario 1:
# 2 nodes (A and B) send HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A then sends REJECT_TOOBUSY message. After that, node B should be asked for the same block (GetData).
# Scenario 2:
# Node A sends HEADERS message to bitcoind. Bitcoind sends GetData to node A.
# Node A sends REJECT_TOOBUSY message. Bitcoind waits and asks again after 5 seconds.
if __name__ == '__main__':
TooBusyRejectMsgTest().main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
15069,
357,
66,
8,
13130,
383,
6185,
20546,
6505,
198,
2,
4307,
6169,
739,
262,
17168,
3788,
5964,
11,
766,
262,
19249,
198,
2,
2393,
27975,
45761,
393,
2638,
1378,
2503,
13,
... | 3.355705 | 298 |
import inference
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("cv", help = "Cross Validation")
args = parser.parse_args()
cv = args.cv
inference.main(cv)
| [
11748,
32278,
198,
11748,
1822,
29572,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
220,
220,
198,
220,
220,
220,
30751,
13,
2860,
62,
4... | 2.686047 | 86 |
import unittest
from subprocess import run, PIPE
from pybump.pybump import *
valid_helm_chart = {'apiVersion': 'v1',
'appVersion': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
invalid_helm_chart = {'apiVersion': 'v1',
'notAppVersionKeyHere': '1.0',
'description': 'A Helm chart for Kubernetes',
'name': 'test',
'version': '0.1.0'}
empty_helm_chart = {}
valid_setup_py = """
setuptools.setup(
name="pybump",
version="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ArieLevs/PyBump",
license='Apache License 2.0',
packages=setuptools.find_packages(),
)
"""
# This setup.py content is missing 'version' key
invalid_setup_py_1 = """
setuptools.setup(
name="pybump",
invalid_version_string="0.1.3",
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
# This setup.py content 'version' key declared 3 times
invalid_setup_py_2 = """
setuptools.setup(
name="pybump",
version="0.1.3",
version="0.1.2",
__version__="12356"
author="Arie Lev",
author_email="levinsonarie@gmail.com",
description="Python version bumper",
)
"""
valid_version_file_1 = """0.12.4"""
valid_version_file_2 = """
1.5.0
"""
invalid_version_file_1 = """
this is some text in addition to version
1.5.0
nothing except semantic version should be in this file
"""
invalid_version_file_2 = """
version=1.5.0
"""
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
850,
14681,
1330,
1057,
11,
350,
4061,
36,
198,
198,
6738,
12972,
65,
931,
13,
9078,
65,
931,
1330,
1635,
198,
198,
12102,
62,
33485,
62,
40926,
796,
1391,
6,
15042,
14815,
10354,
705,
85,
16,
3256,
... | 2.096234 | 956 |
N = input()
for i in range(int(N)):
inp = input().split()
name = inp[0]
date1 = inp[1].split("/")
date2 = inp[2].split("/")
courses = int(inp[3])
if (int(date1[0]) >= 2010 or int(date2[0]) >= 1991):
eligible = "eligible"
elif courses > 40:
eligible = "ineligible"
else:
eligible = "coach petitions"
print(name, eligible) | [
45,
796,
5128,
3419,
198,
1640,
1312,
287,
2837,
7,
600,
7,
45,
8,
2599,
198,
220,
220,
220,
287,
79,
796,
5128,
22446,
35312,
3419,
198,
220,
220,
220,
1438,
796,
287,
79,
58,
15,
60,
198,
220,
220,
220,
3128,
16,
796,
287,
7... | 2.159091 | 176 |
# -*- coding: utf-8 -*-
import json
import jsonschema
from jsonpath_rw import parse
from jsonselect import jsonselect
class JsonValidator(object):
"""
Библиотека для проверки json.
Основана на: JSONSchema, JSONPath, JSONSelect.
== Дополнительная информация ==
- [ http://json-schema.org/ | Json Schema ]
- [ http://www.jsonschema.net/ | Jsonschema generator ]
- [ http://goessner.net/articles/JsonPath/ | JSONPath by Stefan Goessner ]
- [ http://jsonpath.curiousconcept.com/ | JSONPath Tester ]
- [ http://jsonselect.org/ | JSONSelect]
- [ http://jsonselect.curiousconcept.com/ | JSONSelect Tester]
== Зависимости ==
| jsonschema | https://pypi.python.org/pypi/jsonschema |
| jsonpath-rw | https://pypi.python.org/pypi/jsonpath-rw |
| jsonselect | https://pypi.python.org/pypi/jsonselect |
== Пример использования ==
Пример json, записанного в файле json_example.json
| { "store": {
| "book": [
| { "category": "reference",
| "author": "Nigel Rees",
| "title": "Sayings of the Century",
| "price": 8.95
| },
| { "category": "fiction",
| "author": "Evelyn Waugh",
| "title": "Sword of Honour",
| "price": 12.99
| },
| { "category": "fiction",
| "author": "Herman Melville",
| "title": "Moby Dick",
| "isbn": "0-553-21311-3",
| "price": 8.99
| },
| { "category": "fiction",
| "author": "J. R. R. Tolkien",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "price": 22.99
| }
| ],
| "bicycle": {
| "color": "red",
| "price": 19.95
| }
| }
| }
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | .author:contains("Evelyn Waugh") |
"""
ROBOT_LIBRARY_SCOPE='GLOBAL'
def _validate_json(self, checked_json, schema):
"""
Проверка json по JSONSchema
"""
try:
jsonschema.validate(checked_json, schema)
except jsonschema.ValidationError , e:
raise JsonValidatorError ('Element: %s. Error: %s. '%(e.path[0], e.message))
except jsonschema.SchemaError , e:
raise JsonValidatorError ('Json-schema error:'+e.message)
def validate_jsonschema_from_file (self, json_string, path_to_schema):
"""
Проверка json по схеме, загружаемой из файла.
*Args:*\n
_json_string_ - json-строка;\n
_path_to_schema_ - путь к файлу со схемой json;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
schema=open(path_to_schema).read()
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def validate_jsonschema (self, json_string, input_schema):
"""
Проверка json по схеме.
*Args:*\n
_json_string_ - json-строка;\n
_input_schema_ - схема в виде строки;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | ${schema}= | OperatingSystem.Get File | ${CURDIR}${/}schema_valid.json |
| | Validate jsonschema | {"foo":bar} | ${schema} |
"""
load_input_json=self.string_to_json (json_string)
try:
load_schema=json.loads(input_schema)
except ValueError, e:
raise JsonValidatorError ('Error in schema: '+e.message)
self._validate_json (load_input_json, load_schema)
def string_to_json (self, source):
"""
Десериализация строки в json структуру.
*Args:*\n
_source_ - json-строка
*Return:*\n
Json структура
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| String to json | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | Log | ${json["store"]["book"][0]["price"]} |
=>\n
8.95
"""
try:
load_input_json=json.loads(source)
except ValueError, e:
raise JsonValidatorError("Could not parse '%s' as JSON: %s"%(source, e))
return load_input_json
def json_to_string (self, source):
"""
Cериализация json структуры в строку.
*Args:*\n
_source_ - json структура
*Return:*\n
Json строка
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Json to string | ${json_string}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json}= | String to json | ${json_string} |
| | ${string}= | Json to string | ${json} |
| | ${pretty_string}= | Pretty print json | ${string} |
| | Log to console | ${pretty_string} |
"""
try:
load_input_json=json.dumps(source)
except ValueError, e:
raise JsonValidatorError("Could serialize '%s' to JSON: %s"%(source, e))
return load_input_json
def get_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [http://goessner.net/articles/JsonPath/|JSONPath] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONPath выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то возвращается ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author |
=>\n
| [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonpath
jsonpath_expr=parse(expr)
# список возвращаемых элементов
value_list=[]
for match in jsonpath_expr.find(load_input_json):
value_list.append(match.value)
if not value_list:
return None
else:
return value_list
def select_elements (self, json_string, expr):
"""
Возвращает список элементов из _json_string_, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - JSONSelect выражение;
*Return:*\n
Список найденных элементов. Если элементы не найдены, то ``None``
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Select json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_elements}= | Select elements | ${json_example} | .author:contains("Evelyn Waugh")~.price |
=>\n
| 12.99
"""
load_input_json=self.string_to_json (json_string)
# парсинг jsonselect
jsonselect.Parser(load_input_json)
values=jsonselect.select(expr, load_input_json)
return values
def element_should_exist (self, json_string, expr):
"""
Проверка существования одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | Element should exist | ${json_example} | $..book[?(@.author=='Herman Melville')] |
"""
value=self.select_elements (json_string, expr)
if value is None:
raise JsonValidatorError ('Elements %s does not exist'%expr)
def element_should_not_exist (self, json_string, expr):
"""
Проверка отсутствия одного или более элементов, соответствующих [ http://jsonselect.org/ | JSONSelect] выражению.
*Args:*\n
_json_string_ - json-строка;\n
_expr_ - jsonpath выражение;\n
*Raises:*\n
JsonValidatorError
"""
value=self.select_elements (json_string, expr)
if value is not None:
raise JsonValidatorError ('Elements %s exist but should not'%expr)
def update_json(self, json_string, expr, value, index=0):
"""
Замена значения в json-строке.
*Args:*\n
_json_string_ - json-строка dict;\n
_expr_ - JSONPath выражение для определения заменяемого значения;\n
_value_ - значение, на которое будет произведена замена;\n
_index_ - устанавливает индекс для выбора элемента внутри списка совпадений, по-умолчанию равен 0;\n
*Return:*\n
Изменённый json в виде словаря.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Update element | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json |
| | ${json_update}= | Update_json | ${json_example} | $..color | changed |
"""
load_input_json=self.string_to_json (json_string)
matches = self._json_path_search(load_input_json, expr)
datum_object = matches[int(index)]
if not isinstance(datum_object, DatumInContext):
raise JsonValidatorError("Nothing found by the given json-path")
path = datum_object.path
# Изменить справочник используя полученные данные
# Если пользователь указал на список
if isinstance(path, Index):
datum_object.context.value[datum_object.path.index] = value
# Если пользователь указал на значение (string, bool, integer or complex)
elif isinstance(path, Fields):
datum_object.context.value[datum_object.path.fields[0]] = value
return load_input_json
def pretty_print_json (self, json_string):
"""
Возврещает отформатированную json-строку _json_string_.\n
Используется метод json.dumps с настройкой _indent=2, ensure_ascii=False_.
*Args:*\n
_json_string_ - json-строка.
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| Library | OperatingSystem |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Check element | ${pretty_json}= | Pretty print json | {a:1,foo:[{b:2,c:3},{d:"baz",e:4}]} |
| | Log | ${pretty_json} |
=>\n
| {
| "a": 1,
| "foo": [
| {
| "c": 3,
| "b": 2
| },
| {
| "e": 4,
| "d": "baz"
| }
| ]
| }
"""
return json.dumps(self.string_to_json(json_string), indent=2, ensure_ascii=False)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
33918,
201,
198,
11748,
44804,
684,
2395,
2611,
201,
198,
6738,
33918,
6978,
62,
31653,
1330,
21136,
201,
198,
6738,
33918,
19738,
1330,
33918,
19738... | 1.701407 | 8,031 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_tessellation_shader'
GL_CCW=_C('GL_CCW',0x0901)
GL_CW=_C('GL_CW',0x0900)
GL_EQUAL=_C('GL_EQUAL',0x0202)
GL_FRACTIONAL_EVEN=_C('GL_FRACTIONAL_EVEN',0x8E7C)
GL_FRACTIONAL_ODD=_C('GL_FRACTIONAL_ODD',0x8E7B)
GL_ISOLINES=_C('GL_ISOLINES',0x8E7A)
GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E1E)
GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E1F)
GL_MAX_PATCH_VERTICES=_C('GL_MAX_PATCH_VERTICES',0x8E7D)
GL_MAX_TESS_CONTROL_INPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_INPUT_COMPONENTS',0x886C)
GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS',0x8E83)
GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS',0x8E81)
GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS',0x8E85)
GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS=_C('GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS',0x8E89)
GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS',0x8E7F)
GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS',0x886D)
GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS',0x8E86)
GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS=_C('GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS',0x8E82)
GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS',0x8E8A)
GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS=_C('GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS',0x8E80)
GL_MAX_TESS_GEN_LEVEL=_C('GL_MAX_TESS_GEN_LEVEL',0x8E7E)
GL_MAX_TESS_PATCH_COMPONENTS=_C('GL_MAX_TESS_PATCH_COMPONENTS',0x8E84)
GL_PATCHES=_C('GL_PATCHES',0x000E)
GL_PATCH_DEFAULT_INNER_LEVEL=_C('GL_PATCH_DEFAULT_INNER_LEVEL',0x8E73)
GL_PATCH_DEFAULT_OUTER_LEVEL=_C('GL_PATCH_DEFAULT_OUTER_LEVEL',0x8E74)
GL_PATCH_VERTICES=_C('GL_PATCH_VERTICES',0x8E72)
GL_QUADS=_C('GL_QUADS',0x0007)
GL_TESS_CONTROL_OUTPUT_VERTICES=_C('GL_TESS_CONTROL_OUTPUT_VERTICES',0x8E75)
GL_TESS_CONTROL_SHADER=_C('GL_TESS_CONTROL_SHADER',0x8E88)
GL_TESS_EVALUATION_SHADER=_C('GL_TESS_EVALUATION_SHADER',0x8E87)
GL_TESS_GEN_MODE=_C('GL_TESS_GEN_MODE',0x8E76)
GL_TESS_GEN_POINT_MODE=_C('GL_TESS_GEN_POINT_MODE',0x8E79)
GL_TESS_GEN_SPACING=_C('GL_TESS_GEN_SPACING',0x8E77)
GL_TESS_GEN_VERTEX_ORDER=_C('GL_TESS_GEN_VERTEX_ORDER',0x8E78)
GL_TRIANGLES=_C('GL_TRIANGLES',0x0004)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER',0x84F0)
GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER=_C('GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER',0x84F1)
@_f
@_p.types(None,_cs.GLenum,arrays.GLfloatArray)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint)
| [
7061,
6,
16541,
519,
877,
515,
416,
35555,
62,
8612,
378,
4226,
11,
466,
407,
4370,
0,
7061,
6,
201,
198,
6738,
30672,
1330,
3859,
355,
4808,
79,
11,
26515,
201,
198,
2,
6127,
5270,
3544,
428,
201,
198,
6738,
30672,
13,
1831,
13,
... | 1.833235 | 1,697 |
# Given an array nums and a value val,
# remove all instances of that value in-place and return the new length.
# Do not allocate extra space for another array, you must do this by modifying
# the input array in-place with O(1) extra memory.
solution = Solution()
print(solution.removeElement([0, 1, 2, 2, 3, 0, 4, 2], 2))
| [
2,
11259,
281,
7177,
997,
82,
290,
257,
1988,
1188,
11,
198,
2,
4781,
477,
10245,
286,
326,
1988,
287,
12,
5372,
290,
1441,
262,
649,
4129,
13,
198,
2,
2141,
407,
31935,
3131,
2272,
329,
1194,
7177,
11,
345,
1276,
466,
428,
416,
... | 3.385417 | 96 |
import inspect
import json
import logging
import os
import sqlite3
from pathlib import Path
from haiku_node.blockchain_helpers.accounts import (
AccountManager, make_default_accounts)
log = logging.getLogger('haiku_node')
demo_config = json.loads(Path('data/demo_config.json').read_text())
if __name__ == "__main__":
configure_logging()
process()
| [
11748,
10104,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
387,
28643,
62,
17440,
13,
9967,
7983,
62,
16794,
364,
13,
23317,
82,
1330,
357,... | 2.936508 | 126 |
from spectral import Spectral
from kmeans import Kmeans
import kernel
| [
6738,
37410,
1330,
13058,
1373,
198,
6738,
479,
1326,
504,
1330,
509,
1326,
504,
198,
11748,
9720,
198
] | 3.888889 | 18 |
"""
Determine what paramaters create real-looking EPSCs.
What I concluded was that 30pA and 180ms tau looks good.
"""
import os
import sys
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
PATH_DATA = os.path.abspath(PATH_HERE+"../../../data/abfs/")
PATH_SRC = os.path.abspath(PATH_HERE+"../../../src/")
sys.path.insert(0, PATH_SRC)
import pyabf
import pyabf.tools.generate
import glob
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
plotRealEPSC()
plotFakeEPSC()
plt.grid(alpha=.2)
plt.margins(0, .1)
plt.title("Real vs. Simulated EPSC")
plt.ylabel("current (pA)")
plt.xlabel("time (sec)")
plt.legend()
plt.savefig(__file__+".png")
print("DONE") | [
37811,
198,
35,
2357,
3810,
644,
5772,
8605,
2251,
1103,
12,
11534,
47013,
32274,
13,
198,
2061,
314,
8391,
373,
326,
1542,
79,
32,
290,
11546,
907,
256,
559,
3073,
922,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
... | 2.307937 | 315 |
## Adapted from https://github.com/kaituoxu/Speech-Transformer/blob/master/src/transformer/optimizer.py
import torch
class TransformerOptimizer(object):
"""A simple wrapper class for learning rate scheduling"""
| [
2235,
30019,
276,
422,
3740,
1378,
12567,
13,
785,
14,
74,
4548,
84,
1140,
84,
14,
5248,
3055,
12,
8291,
16354,
14,
2436,
672,
14,
9866,
14,
10677,
14,
7645,
16354,
14,
40085,
7509,
13,
9078,
198,
11748,
28034,
198,
198,
4871,
3602,... | 3.380952 | 63 |
# to install a program within colab use !pip install
import pandas as pd
# !pip show pandas
import requests
# !pip show requests
from bs4 import BeautifulSoup
# !pip show bs4
# !pip install tabulate
from tabulate import tabulate
# pip show tabulate
import csv
# pip show csv
import bs4
# pip3 install lxml
import lxml
import bs4.builder._lxml
res = requests.get('http://www.WEBSITE.com/news/snow-report/')
soup = BeautifulSoup(res.content,'lxml')
#!pip show lxml
table = soup.find_all('table')[0]
df = pd.read_html('http://www.WEBSITE.com/news/snow-report/', header=0)[0]
df
# Variable list objects
resort_list = df.iloc[:, 0]
new_snow = df.iloc[:, 1]
conditions = df.iloc[:, 2]
open_terrian = df.iloc[:, 3]
comments = df.iloc[:, 4
"""
# Export to CSV within google co lab
from google.colab import files
# do not print header and do not print index column
df.to_csv('SnowDatabaseExport.csv', header=0, index=False)
files.download('SnowDatabaseExport.csv')
"""
#Below is under heavy construction: Connecting pandas df to postgres (not complete)
"""
# Create a Postgres table for dataframe import
# https://www.dataquest.io/m/245/intro-to-postgres/4/creating-a-table
conn = psycopg2.connect("dbname=DATABASENAME user=DBUSERNAME")
cur = conn.cursor()
cur.execute("CREATE TABLE users(id integer PRIMARY KEY, email text, name text, address text)")
"""
"""
with open('user_accounts.csv') as f:
reader = csv.reader(f)
next(reader)
rows = [row for row in reader]
"""
"""
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
for row in rows:
cur.execute("INSERT INTO users VALUES (%s, %s, %s, %s)", row)
conn.commit()
cur.execute('SELECT * FROM users')
users = cur.fetchall()
conn.close()
"""
"""
conn = psycopg2.connect('dbname=dq user=dq')
cur = conn.cursor()
# sample_file.csv has a header row.
with open('SnowDataBaseExport.csv', 'r') as f:
# Skip the header row.
next(f)
cur.copy_from(f, 'users', sep=',')
conn.commit()
cur.execute('SELECT * FROM new_snow')
users = cur.fetchall()
conn.close()
"""
"""
# Get all the data from snow data through SQL
import psycopg2
conn = psycopg2.connect("dbname=dq user=dq")
cur = conn.cursor()
cur.execute('SELECT * FROM snow_report')
notes = cur.fetchall()
conn.close()
"""
| [
2,
284,
2721,
257,
1430,
1626,
951,
397,
779,
5145,
79,
541,
2721,
220,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
2,
5145,
79,
541,
905,
19798,
292,
198,
11748,
7007,
198,
2,
5145,
79,
541,
905,
7007,
198,
6738,
275,
82,
1... | 2.531049 | 934 |
from idl.lexer.Token import Token
from idl.parser.Parser import Parser
from idl.parser.ParserError import ParserError
from idl.parser.Desc import MethodDesc, MethodArgDesc
| [
6738,
4686,
75,
13,
2588,
263,
13,
30642,
1330,
29130,
198,
6738,
4686,
75,
13,
48610,
13,
46677,
1330,
23042,
263,
198,
6738,
4686,
75,
13,
48610,
13,
46677,
12331,
1330,
23042,
263,
12331,
198,
198,
6738,
4686,
75,
13,
48610,
13,
... | 3.411765 | 51 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 15:56
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
3070,
12,
1314,
1315,
25,
3980,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.736842 | 57 |
import socket
import sys
#creating socket
#Bind socket to port
#Accept connections
main()
| [
11748,
17802,
198,
11748,
25064,
198,
198,
2,
20123,
278,
17802,
628,
198,
2,
36180,
17802,
284,
2493,
198,
198,
2,
38855,
8787,
628,
198,
12417,
3419,
198
] | 3.428571 | 28 |
#
# Copyright (c) 2017-2018, Helge Mathee. All rights reserved.
#
OVERPLOT_VERSION = '1.0.0'
| [
2,
198,
2,
15069,
357,
66,
8,
2177,
12,
7908,
11,
5053,
469,
6550,
21067,
13,
1439,
2489,
10395,
13,
198,
2,
198,
198,
41983,
6489,
2394,
62,
43717,
796,
705,
16,
13,
15,
13,
15,
6,
198
] | 2.473684 | 38 |
###
import numpy as np
###
print("part 1: {}".format(part1()))
print("part 2: {}".format(part2()))
| [
198,
21017,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
21017,
628,
198,
198,
4798,
7203,
3911,
352,
25,
23884,
1911,
18982,
7,
3911,
16,
3419,
4008,
198,
4798,
7203,
3911,
362,
25,
23884,
1911,
18982,
7,
3911,
17,
3419,
4008,
... | 2.465116 | 43 |
# $ content split
# * title
# tm terminal
# td treads
# n note
# ! important
# alternative
#$ section 5 create functions
#* return
print(mean([1, 4, 6]))
#print(type(mean),type(sum))
#* print
mymean = mean([0, 3, 4])
print(mymean + 10)
#* conditional
#student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5} #n does not work
#print(mean(student_grade))
#* set for conditional
monday_temperatures = [8.8, 9.1, 9.9]
print(mean(monday_temperatures))
student_grade = {"Marry": 9.1, "Sim": 8.8, "John": 7.5}
print(mean(student_grade))
# ex.
print(foo("mypass"))
#* elif
x = 3
y = 1
if x > y:
print("x is greater than y")
elif x == y:
print("x is equal than y")
else:
print("x is less than y")
#* white space (one or more)
if 3 > 1: #n always one white space between operators
print('b') #n indentation (4 white spaces)
# ex.
#$ section 6 create functions
#* user input
#print(weather_condition(7))
user_input = float(input("Enter temperature:")) #n prompting the user to enter a value
print(weather_condition(user_input)) #td "input" function freezes the execution of a program and waits for the user input one the command line
#user_input = input("Enter some input:") #n use "input" only will get a string, so need to add "float" or "int" before
#print(type(user_input), user_input) #n help to check the type
#* string formatting
user_input = input("Enter your name: ")
message = "Hello %s!" % user_input #td "%s" is a special string, use "%" instead of "," and then the value of variable will replace the %s
#message = f"Hello {user_input}" #n only used after python3.6
#* with multiple variables
name = input("Enter your name: ")
surname = input("Enter your surname: ")
when = "today"
message = "Hello %s %s" % (name, surname) #td need more "%s" for more strings to input
message = f"Hello {name} {surname}, what's up {when}" #n as same as line above
print(message)
#def A(): #n use for only run the block A
#if __name__ == '__main__':
#A()
#ex. | [
198,
2,
720,
2695,
6626,
198,
2,
1635,
3670,
198,
2,
256,
76,
12094,
198,
2,
41560,
23153,
82,
198,
2,
299,
3465,
198,
2,
5145,
1593,
198,
2,
5559,
628,
198,
29953,
2665,
642,
2251,
5499,
198,
198,
2,
9,
1441,
198,
4798,
7,
32... | 2.476678 | 879 |
# Generated by Django 2.1.4 on 2019-03-24 19:19
import datetime
from django.db import migrations, models
import upload.storage
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
13130,
12,
3070,
12,
1731,
678,
25,
1129,
198,
198,
11748,
4818,
8079,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
9516,
13,
35350,
628
] | 3.146341 | 41 |
# (c) 2015, Andrew Gaffney <andrew@agaffney.org>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: actionable
type: stdout
short_description: shows only items that need attention
description:
- Use this callback when you dont care about OK nor Skipped.
- This callback suppresses any non Failed or Changed status.
version_added: "2.1"
deprecated:
why: The 'default' callback plugin now supports this functionality
removed_in: '2.11'
alternative: "'default' callback plugin with 'display_skipped_hosts = no' and 'display_ok_hosts = no' options"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout callback in configuration
# Override defaults from 'default' callback plugin
options:
display_skipped_hosts:
name: Show skipped hosts
description: "Toggle to control displaying skipped task/host results in a task"
type: bool
default: no
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without "ANSIBLE_" prefix are deprecated
version: "2.12"
alternatives: the "ANSIBLE_DISPLAY_SKIPPED_HOSTS" environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- key: display_skipped_hosts
section: defaults
display_ok_hosts:
name: Show 'ok' hosts
description: "Toggle to control displaying 'ok' task/host results in a task"
type: bool
default: no
env:
- name: ANSIBLE_DISPLAY_OK_HOSTS
ini:
- key: display_ok_hosts
section: defaults
version_added: '2.7'
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
| [
2,
357,
66,
8,
1853,
11,
6858,
402,
2001,
1681,
1279,
392,
1809,
31,
363,
2001,
1681,
13,
2398,
29,
198,
2,
357,
66,
8,
2177,
28038,
856,
4935,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
27975,
45761,
393,... | 2.50306 | 817 |
import sys, collections, itertools, os.path, optparse
optParser = optparse.OptionParser(
usage = "python %prog [options] <in.gtf> <out.gff>",
description=
"Script to prepare annotation for DEXSeq." +
"This script takes an annotation file in Ensembl GTF format" +
"and outputs a 'flattened' annotation file suitable for use " +
"with the count_in_exons.py script ",
epilog =
"Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology " +
"Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General " +
"Public License v3. Part of the 'DEXSeq' package." )
optParser.add_option( "-r", "--aggregate", type="choice", dest="aggregate",
choices = ( "no", "yes" ), default = "yes",
help = "'yes' or 'no'. Indicates whether two or more genes sharing an exon should be merged into an 'aggregate gene'. If 'no', the exons that can not be assiged to a single gene are ignored." )
(opts, args) = optParser.parse_args()
if len( args ) != 2:
sys.stderr.write( "Script to prepare annotation for DEXSeq.\n\n" )
sys.stderr.write( "Usage: python %s <in.gtf> <out.gff>\n\n" % os.path.basename(sys.argv[0]) )
sys.stderr.write( "This script takes an annotation file in Ensembl GTF format\n" )
sys.stderr.write( "and outputs a 'flattened' annotation file suitable for use\n" )
sys.stderr.write( "with the count_in_exons.py script.\n" )
sys.exit(1)
try:
import HTSeq
except ImportError:
sys.stderr.write( "Could not import HTSeq. Please install the HTSeq Python framework\n" )
sys.stderr.write( "available from http://www-huber.embl.de/users/anders/HTSeq\n" )
sys.exit(1)
gtf_file = args[0]
out_file = args[1]
aggregateGenes = opts.aggregate == "yes"
# Step 1: Store all exons with their gene and transcript ID
# in a GenomicArrayOfSets
exons = HTSeq.GenomicArrayOfSets( "auto", stranded=True )
for f in HTSeq.GFF_Reader( gtf_file ):
if f.type != "exon":
continue
f.attr['gene_id'] = f.attr['gene_id'].replace( ":", "_" )
exons[f.iv] += ( f.attr['gene_id'], f.attr['transcript_id'] )
# Step 2: Form sets of overlapping genes
# We produce the dict 'gene_sets', whose values are sets of gene IDs. Each set
# contains IDs of genes that overlap, i.e., share bases (on the same strand).
# The keys of 'gene_sets' are the IDs of all genes, and each key refers to
# the set that contains the gene.
# Each gene set forms an 'aggregate gene'.
if aggregateGenes == True:
gene_sets = collections.defaultdict( lambda: set() )
for iv, s in exons.steps():
# For each step, make a set, 'full_set' of all the gene IDs occuring
# in the present step, and also add all those gene IDs, whch have been
# seen earlier to co-occur with each of the currently present gene IDs.
full_set = set()
for gene_id, transcript_id in s:
full_set.add( gene_id )
full_set |= gene_sets[ gene_id ]
# Make sure that all genes that are now in full_set get associated
# with full_set, i.e., get to know about their new partners
for gene_id in full_set:
assert gene_sets[ gene_id ] <= full_set
gene_sets[ gene_id ] = full_set
# Step 3: Go through the steps again to get the exonic sections. Each step
# becomes an 'exonic part'. The exonic part is associated with an
# aggregate gene, i.e., a gene set as determined in the previous step,
# and a transcript set, containing all transcripts that occur in the step.
# The results are stored in the dict 'aggregates', which contains, for each
# aggregate ID, a list of all its exonic_part features.
aggregates = collections.defaultdict( lambda: list() )
for iv, s in exons.steps( ):
# Skip empty steps
if len(s) == 0:
continue
gene_id = list(s)[0][0]
## if aggregateGenes=FALSE, ignore the exons associated to more than one gene ID
if aggregateGenes == False:
check_set = set()
for geneID, transcript_id in s:
check_set.add( geneID )
if( len( check_set ) > 1 ):
continue
else:
aggregate_id = gene_id
# Take one of the gene IDs, find the others via gene sets, and
# form the aggregate ID from all of them
else:
assert set( gene_id for gene_id, transcript_id in s ) <= gene_sets[ gene_id ]
aggregate_id = '+'.join( gene_sets[ gene_id ] )
# Make the feature and store it in 'aggregates'
f = HTSeq.GenomicFeature( aggregate_id, "exonic_part", iv )
f.source = os.path.basename( sys.argv[0] )
# f.source = "camara"
f.attr = {}
f.attr[ 'gene_id' ] = aggregate_id
transcript_set = set( ( transcript_id for gene_id, transcript_id in s ) )
f.attr[ 'transcripts' ] = '+'.join( transcript_set )
aggregates[ aggregate_id ].append( f )
# Step 4: For each aggregate, number the exonic parts
aggregate_features = []
for l in aggregates.values():
for i in xrange( len(l)-1 ):
assert l[i].name == l[i+1].name, str(l[i+1]) + " has wrong name"
assert l[i].iv.end <= l[i+1].iv.start, str(l[i+1]) + " starts too early"
if l[i].iv.chrom != l[i+1].iv.chrom:
raise ValueError, "Same name found on two chromosomes: %s, %s" % ( str(l[i]), str(l[i+1]) )
if l[i].iv.strand != l[i+1].iv.strand:
raise ValueError, "Same name found on two strands: %s, %s" % ( str(l[i]), str(l[i+1]) )
aggr_feat = HTSeq.GenomicFeature( l[0].name, "aggregate_gene",
HTSeq.GenomicInterval( l[0].iv.chrom, l[0].iv.start,
l[-1].iv.end, l[0].iv.strand ) )
aggr_feat.source = os.path.basename( sys.argv[0] )
aggr_feat.attr = { 'gene_id': aggr_feat.name }
for i in xrange( len(l) ):
l[i].attr['exonic_part_number'] = "%03d" % ( i+1 )
aggregate_features.append( aggr_feat )
# Step 5: Sort the aggregates, then write everything out
aggregate_features.sort( key = lambda f: ( f.iv.chrom, f.iv.start ) )
fout = open( out_file, "w" )
for aggr_feat in aggregate_features:
fout.write( aggr_feat.get_gff_line() )
for f in aggregates[ aggr_feat.name ]:
fout.write( f.get_gff_line() )
fout.close()
| [
11748,
25064,
11,
17268,
11,
340,
861,
10141,
11,
28686,
13,
6978,
11,
2172,
29572,
198,
198,
8738,
46677,
796,
2172,
29572,
13,
19722,
46677,
7,
220,
198,
220,
220,
220,
198,
220,
220,
8748,
796,
366,
29412,
4064,
1676,
70,
685,
25... | 2.56169 | 2,391 |
import pytest
import logging
import pandas as pd
from cellpy import log
from cellpy.utils import helpers
from . import fdv
from cellpy.exceptions import NullData
log.setup_logging(default_level=logging.DEBUG)
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
18931,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
2685,
9078,
1330,
2604,
198,
6738,
2685,
9078,
13,
26791,
1330,
49385,
198,
6738,
764,
1330,
277,
67,
85,
198,
6738,
2685,
9078,
13,
1069,
11755,
... | 3.222222 | 72 |
from tensorclan.dataset import BaseDataset
from tensorclan.dataset.transform import BaseTransform
from torchvision import datasets
from torch.utils.data import Subset
| [
6738,
11192,
273,
565,
272,
13,
19608,
292,
316,
1330,
7308,
27354,
292,
316,
198,
6738,
11192,
273,
565,
272,
13,
19608,
292,
316,
13,
35636,
1330,
7308,
41762,
198,
198,
6738,
28034,
10178,
1330,
40522,
198,
6738,
28034,
13,
26791,
... | 3.541667 | 48 |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
import json
import traceback
from datetime import datetime, timedelta
import time
# Disable insecure warnings
requests.packages.urllib3.disable_warnings() # pylint: disable=no-member
# flake8: noqa
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
VALID_VARIANTS = ["HTTP","HTTPS"]
verify_certificate = not demisto.params().get('insecure', False)
def test_module() -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type client: ``Client``
:param Client: client to use
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
picus_server = str(demisto.params().get("picus_server"))
picus_server = picus_server[:-1] if picus_server.endswith("/") else picus_server
picus_apikey = demisto.params().get("picus_apikey")
picus_headers = {"X-Refresh-Token": "", "Content-Type": "application/json"}
picus_headers["X-Refresh-Token"] = "Bearer " + str(picus_apikey)
picus_auth_endpoint = "/authenticator/v1/access-tokens/generate"
picus_req_url = str(picus_server) + picus_auth_endpoint
picus_session = requests.Session()
if not demisto.params().get('proxy', False):
picus_session.trust_env = False
picus_auth_response = picus_session.post(picus_req_url, headers=picus_headers, verify=verify_certificate)
picus_auth_response.raise_for_status()
picus_accessToken = json.loads(picus_auth_response.text)["data"]["access_token"]
message = 'ok'
except Exception as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e) or 'NewConnectionError' in str(e) or 'Unauthorized' in str(e) or picus_accessToken is None:
message = 'Authorization Error: make sure API Key or Picus URL is correctly set'
else:
raise e
return message
''' MAIN FUNCTION '''
def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug(f'Command being called is {demisto.command()}')
try:
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module()
return_results(result)
elif demisto.command()=='picus-get-access-token':
result = getAccessToken()
return_results(result)
elif demisto.command()=='picus-get-vector-list':
result = getVectorList()
return_results(result)
elif demisto.command()=='picus-get-peer-list':
result = getPeerList()
return_results(result)
elif demisto.command()=='picus-get-attack-results':
result = getAttackResults()
return_results(result)
elif demisto.command()=='picus-run-attacks':
result = runAttacks()
return_results(result)
elif demisto.command()=='picus-get-threat-results':
result = getThreatResults()
return_results(result)
elif demisto.command()=='picus-set-paramPB':
result = setParamPB()
return_results(result)
elif demisto.command()=='picus-filter-insecure-attacks':
result = filterInsecureAttacks()
return_results(result)
elif demisto.command()=='picus-get-mitigation-list':
result = getMitigationList()
return_results(result)
elif demisto.command() == 'picus-get-vector-compare':
result = getVectorCompare()
return_results(result)
elif demisto.command() == 'picus-version':
result = getPicusVersion()
return_results(result)
elif demisto.command() == 'picus-trigger-update':
result = triggerUpdate()
return_results(result)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| [
11748,
1357,
396,
296,
735,
355,
1357,
396,
78,
198,
6738,
8070,
10697,
37906,
1330,
1635,
198,
6738,
8070,
10697,
12982,
37906,
1330,
1635,
198,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
12854,
1891,
198,
6738,
4818,
8079,
1330,
... | 2.381857 | 1,896 |
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
from itertools import combinations
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.protonet import ProtoNet
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
from utils import *
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
from tqdm import tqdm
import sys
sys.path.append('..')
from methods.protonet import euclidean_dist
if __name__=='__main__':
np.random.seed(10)
params = parse_args('test')
##################################################################
image_size = 224
iter_num = 600
n_query = max(1, int(16* params.test_n_way/params.train_n_way)) #if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_test_shot)
freeze_backbone = params.freeze_backbone
##################################################################
dataset_names = ["ISIC", "EuroSAT", "CropDisease", "ChestX"]
novel_loaders = []
loader_name = "ISIC"
print ("Loading {}".format(loader_name))
datamgr = ISIC_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "EuroSAT"
print ("Loading {}".format(loader_name))
datamgr = EuroSAT_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "CropDisease"
print ("Loading {}".format(loader_name))
datamgr = CropDisease_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
#novel_loaders.append((loader_name, novel_loader))
loader_name = "ChestX"
print ("Loading {}".format(loader_name))
datamgr = Chest_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append((loader_name, novel_loader))
#########################################################################
# Print checkpoint path to be loaded
checkpoint_dir = '%s/checkpoints/%s/%s_%s_%s%s_%s%s' %(configs.save_dir, params.dataset,
params.model, params.method,
params.n_support, "s" if params.no_aug_support else "s_aug",
params.n_query, "q" if params.no_aug_query else "q_aug")
checkpoint_dir += "_bs{}".format(params.batch_size)
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
print('Evaluation from checkpoint:', modelfile)
# Perform evaluation
for idx, (loader_name, novel_loader) in enumerate(novel_loaders):
#for idx, novel_loader in tqdm(enumerate(novel_loaders), total=len(novel_loaders), position=0):
print ('Dataset: ', loader_name)
print ('Pretraining Dataset: ', params.dataset)
print('Adaptation? ', params.adaptation)
if params.adaptation:
print (' --> Freeze backbone?', freeze_backbone)
print (' --> Init classifier via prototypes?', params.proto_init)
print (' --> Adaptation steps: ', params.ft_steps)
print (' --> Adaptation learning rate: ', params.lr_rate)
# replace finetine() with your own method
finetune(novel_loader, n_query = 15, adaptation=params.adaptation,
freeze_backbone=freeze_backbone, proto_init=params.proto_init,
pretrained_dataset=params.dataset, **few_shot_params)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
11748,
28034,
13,
40085,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
1174... | 2.366197 | 1,917 |
# Crie um programa e faça-o jogar Jokenpô com você.
from random import randint
from time import sleep
itens = ('PEDRA', 'PAPEL', 'TESOURA')
pc = randint(0, 2)
print('''\033[1;4;7;30;47mOPÇÕES\033[m
[ 0 ] \033[1;31mPEDRA\033[m
[ 1 ] \033[1;31mPAPEL\033[m
[ 2 ] \033[1;31mTESOURA\033[m''')
jogador = int(input('Qual a sua jogada? '))
print('JO')
sleep(1)
print('KEN')
sleep(1)
print('PO!')
print('-=-' * 11)
print('PC jogou {}'.format(itens[pc]))
print('Jogador jogou {}'.format(itens[jogador]))
print('-=-' * 11)
if pc == 0: # o pc jogou PEDRA
if jogador == 0:
print('EMPATE')
elif jogador == 1:
print('JOGADOR VENCEU !!')
elif jogador == 2:
print('PC VENCEU !!')
elif pc == 1: # o pc jogou PAPEL
if jogador == 0:
print('PC VENCEU !!')
elif jogador == 1:
print('EMPATE')
elif jogador == 2:
print('JOGADOR VENCEU !!')
elif pc == 2: # o pc jogou TESOURA
if jogador == 0:
print('JOGADOR VENCEU !!')
elif jogador == 1:
print('PC VENCEU !!')
elif jogador == 2:
print('EMPATE')
else:
print('JOGADA INVÁLIDA!') | [
2,
327,
5034,
23781,
1430,
64,
304,
24685,
50041,
12,
78,
48342,
283,
449,
4233,
79,
27083,
401,
12776,
25792,
13,
198,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
198,
198,
270,
641,
796,
19203,
47,
1961,
3861,
3... | 2.014363 | 557 |
#!/usr/bin/env python3
# coding: utf-8
# Copyright (c) Latona. All rights reserved.
# from StatusJsonPythonModule import StatusJsonRest
from datetime import datetime
import os
import sys
import wave
import pyaudio
import time
from threading import (Event, Thread)
from aion.logger import lprint
from .mysql import MysqlManager
OUTPUT_DIR = "/var/lib/aion/Data/capture-audio-from-mic_1"
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
15069,
357,
66,
8,
5476,
4450,
13,
1439,
2489,
10395,
13,
198,
198,
2,
422,
12678,
41,
1559,
37906,
26796,
1330,
12678,
41,
1559,
... | 2.918919 | 148 |
#!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import os
import sys
import logging
from plugin import Plugin
from util import dbg
# except:
# print "error loading plugin %s" % name
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
17597,
13,
67,
756,
62,
13564,
62,
26327,
8189,
796,
6407,
198,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
198,
6738,
13877,... | 2.580645 | 93 |
from Bio.SeqIO.FastaIO import FastaIterator, FastaWriter
import argparse
import statistics
import sys
parser = argparse.ArgumentParser(epilog='''''',
description='This script runs the statistics of different fasta files',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--files', nargs='*', help='Fasta files. Select all fasta files you need to process separated by a space', required=True)
parser.add_argument('-o', '--output', default='sorted.fasta', help='Output name. This name will be used for two files: '
'output.fasta: it contains the sorted sequences '
'output.log: it contains a table with all info')
parser.add_argument('-s', '--sort', default=0, choices=[0, 1], type=int, help='Output order. 0- ascendant, 1- descendent')
parser.add_argument('-p', '--print', default=1, choices=[0, 1, 2, 3], type=int,
help='Define how much info is printed. 0- Print only the summary, 1- Print the longest and shortest'
' record per-file, 2- Print the shortest and longest record per-file and 3- Print all stats data for each record and file')
args = parser.parse_args(sys.argv[1:])
# print(args)
# this is the output file for multifasta
sorted_h = open(args.output, "w")
log = open('log.log', 'w')
log.write(' ID | LENGTH | SEQ ID | FILE \n')
# fasta output file handler
gwriter = FastaWriter(sorted_h)
data = {}
stotal = 0
gfsmax = 0
gfsmin = 99999999999
grec_max = []
grec_min = []
gfasta_legth = []
nfiles = 0
# Open the multifasta file
for cfile in args.files:
nfiles += 1
if args.print >= 1:
print(80 * '=')
print(f'Processing {cfile}...')
ftotal = 0
with open(cfile) as fastafile:
rec_max = None
rec_min = None
fasta_legth = []
fsmax = 0
fsmin = 99999999999
for record in FastaIterator(fastafile):
stotal += 1
ftotal += 1
cl = len(record.seq)
fasta_legth.append(cl)
if cl > fsmax:
fsmax = cl
rec_max = record
if cl < fsmin:
fsmin = cl
rec_min = record
if cl > gfsmax:
gfsmax = cl
grec_max = [cfile, record]
if cl < gfsmin:
gfsmin = cl
grec_min = [cfile, record]
if args.print == 3:
print(80 * '-')
print(record)
print(80 * '-')
print('Length: ', cl)
data[stotal] = [cl, cfile, record]
gfasta_legth.extend(fasta_legth)
mean = round(sum(fasta_legth) / len(fasta_legth), 0)
if args.print >= 1:
print('')
print(30 * '*' + ' File Summary ' + 30 * '*')
print(f'Number of records: {ftotal}')
print(f'The average sequences length: {mean}')
print(30 * '_')
print(f'Longest record length: {fsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest record: {rec_max}')
print(30 * '_')
print(f'Shortest record length: {fsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest record: {rec_min}')
print(30 * '_')
print('')
if args.sort:
ordered_list = sorted(data.items(), key=lambda x: x[1][0], reverse=True)
else:
ordered_list = sorted(data.items(), key=lambda x: x[1][0])
c = 1
for gid, rlist in ordered_list:
log.write('{:5d} | {:6d} | {:40s} | {:20s}\n'.format(c, rlist[0], rlist[2].id, rlist[1]))
gwriter.write_record(rlist[2])
c += 1
gmean = round(sum(gfasta_legth) / len(gfasta_legth), 0)
# close the output files
sorted_h.close()
print(80 * '#' + '\n')
print(32 * '#' + ' Summary ' + 32 * '#')
print(f'Number of files: {nfiles}')
print(f'Number of records: {stotal}')
print(f'The average sequences length: {gmean}')
print(30 * '_')
print(f'Longest global record length: {gfsmax}')
if args.print >= 2:
print(30 * '-')
print(f'Longest global record file: {grec_max[0]}')
print(f'Longest global record: \n{grec_max[1]}')
print(30 * '_')
print(f'Shortest global record length: {gfsmin}')
if args.print >= 2:
print(30 * '-')
print(f'Shortest global record file: {grec_min[0]}')
print(f'Shortest global record: \n{grec_min[1]}')
print(30 * '_')
| [
6738,
16024,
13,
4653,
80,
9399,
13,
22968,
64,
9399,
1330,
12549,
64,
37787,
11,
12549,
64,
34379,
198,
11748,
1822,
29572,
198,
11748,
7869,
198,
11748,
25064,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
538,
346,
... | 1.978017 | 2,411 |
from django import template
from django.core.exceptions import ImproperlyConfigured
register = template.Library()
@register.filter(name='currency')
def currency(price, currency):
"""
Returns price in currency format
"""
price = float(price)
price *= float(currency.exchange_rate)
try:
return currency.display_format.format(price)
except Exception as e:
raise ImproperlyConfigured('Invalid currency format string: "%s" for currency "%s". %s' % (currency.display_format, currency.name, e.message)) | [
6738,
42625,
14208,
1330,
11055,
220,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,
306,
16934,
1522,
628,
198,
30238,
796,
11055,
13,
23377,
3419,
628,
198,
31,
30238,
13,
24455,
7,
3672,
11639,
34415,
11537,
19... | 3.126437 | 174 |
# coding=utf-8
from __future__ import division
from honeybee_radiance.modifier.material import Trans, Light, BSDF
from honeybee_radiance.lib.modifiers import generic_wall, generic_ceiling, \
black, generic_exterior_window, air_boundary, white_glow
import os
import json
# run all functions within the file
master_dir = os.path.split(os.path.dirname(__file__))[0]
sample_directory = os.path.join(master_dir, 'samples', 'modifier')
modifier_plastic_generic_wall(sample_directory)
modifier_plastic_generic_ceiling(sample_directory)
modifier_plastic_black(sample_directory)
modifier_glass_generic_exterior_window(sample_directory)
modifier_glass_air_boundary(sample_directory)
modifier_trans_tree_foliage(sample_directory)
modifier_glow_white(sample_directory)
modifier_light_green_spotlight(sample_directory)
#modifier_bsdf_klemsfull(sample_directory)
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
198,
6738,
12498,
20963,
62,
6335,
3610,
13,
4666,
7483,
13,
33665,
1330,
3602,
11,
4401,
11,
24218,
8068,
198,
198,
6738,
12498,
20963,
62,
6335,
3610,
13,
... | 3.045614 | 285 |
import os
from concurrent import futures
from contextlib import contextmanager
from datetime import date
from pathlib import Path
from unittest.mock import patch
import grpc
import pytest
from sqlalchemy.sql import or_, text
from couchers.config import config
from couchers.constants import GUIDELINES_VERSION, TOS_VERSION
from couchers.crypto import random_hex
from couchers.db import get_engine, session_scope
from couchers.interceptors import AuthValidatorInterceptor, _try_get_and_update_user_details
from couchers.models import (
Base,
FriendRelationship,
FriendStatus,
Language,
LanguageAbility,
LanguageFluency,
Region,
RegionLived,
RegionVisited,
User,
UserBlock,
UserSession,
)
from couchers.servicers.account import Account
from couchers.servicers.admin import Admin
from couchers.servicers.api import API
from couchers.servicers.auth import Auth, create_session
from couchers.servicers.blocking import Blocking
from couchers.servicers.bugs import Bugs
from couchers.servicers.communities import Communities
from couchers.servicers.conversations import Conversations
from couchers.servicers.discussions import Discussions
from couchers.servicers.donations import Donations, Stripe
from couchers.servicers.events import Events
from couchers.servicers.groups import Groups
from couchers.servicers.jail import Jail
from couchers.servicers.media import Media, get_media_auth_interceptor
from couchers.servicers.notifications import Notifications
from couchers.servicers.pages import Pages
from couchers.servicers.references import References
from couchers.servicers.reporting import Reporting
from couchers.servicers.requests import Requests
from couchers.servicers.resources import Resources
from couchers.servicers.search import Search
from couchers.servicers.threads import Threads
from couchers.sql import couchers_select as select
from couchers.utils import create_coordinate, now
from proto import (
account_pb2_grpc,
admin_pb2_grpc,
api_pb2_grpc,
auth_pb2_grpc,
blocking_pb2_grpc,
bugs_pb2_grpc,
communities_pb2_grpc,
conversations_pb2_grpc,
discussions_pb2_grpc,
donations_pb2_grpc,
events_pb2_grpc,
groups_pb2_grpc,
jail_pb2_grpc,
media_pb2_grpc,
notifications_pb2_grpc,
pages_pb2_grpc,
references_pb2_grpc,
reporting_pb2_grpc,
requests_pb2_grpc,
resources_pb2_grpc,
search_pb2_grpc,
stripe_pb2_grpc,
threads_pb2_grpc,
)
def drop_all():
"""drop everything currently in the database"""
with session_scope() as session:
# postgis is required for all the Geographic Information System (GIS) stuff
# pg_trgm is required for trigram based search
# btree_gist is required for gist-based exclusion constraints
session.execute(
text(
"DROP SCHEMA public CASCADE; DROP SCHEMA IF EXISTS logging CASCADE; CREATE SCHEMA public; CREATE SCHEMA logging; CREATE EXTENSION postgis; CREATE EXTENSION pg_trgm; CREATE EXTENSION btree_gist;"
)
)
def create_schema_from_models():
"""
Create everything from the current models, not incrementally
through migrations.
"""
# create the slugify function
functions = Path(__file__).parent / "slugify.sql"
with open(functions) as f, session_scope() as session:
session.execute(text(f.read()))
Base.metadata.create_all(get_engine())
def populate_testing_resources(session):
"""
Testing version of couchers.resources.copy_resources_to_database
"""
regions = [
("AUS", "Australia"),
("CAN", "Canada"),
("CHE", "Switzerland"),
("CUB", "Cuba"),
("CXR", "Christmas Island"),
("CZE", "Czechia"),
("DEU", "Germany"),
("EGY", "Egypt"),
("ESP", "Spain"),
("EST", "Estonia"),
("FIN", "Finland"),
("FRA", "France"),
("GBR", "United Kingdom"),
("GEO", "Georgia"),
("GHA", "Ghana"),
("GRC", "Greece"),
("HKG", "Hong Kong"),
("IRL", "Ireland"),
("ISR", "Israel"),
("ITA", "Italy"),
("JPN", "Japan"),
("LAO", "Laos"),
("MEX", "Mexico"),
("MMR", "Myanmar"),
("NAM", "Namibia"),
("NLD", "Netherlands"),
("NZL", "New Zealand"),
("POL", "Poland"),
("PRK", "North Korea"),
("REU", "Réunion"),
("SGP", "Singapore"),
("SWE", "Sweden"),
("THA", "Thailand"),
("TUR", "Turkey"),
("TWN", "Taiwan"),
("USA", "United States"),
("VNM", "Vietnam"),
]
languages = [
("arb", "Arabic (Standard)"),
("deu", "German"),
("eng", "English"),
("fin", "Finnish"),
("fra", "French"),
("heb", "Hebrew"),
("hun", "Hungarian"),
("jpn", "Japanese"),
("pol", "Polish"),
("swe", "Swedish"),
("cmn", "Chinese (Mandarin)"),
]
with open(Path(__file__).parent / ".." / ".." / "resources" / "timezone_areas.sql-fake", "r") as f:
tz_sql = f.read()
for code, name in regions:
session.add(Region(code=code, name=name))
for code, name in languages:
session.add(Language(code=code, name=name))
session.execute(text(tz_sql))
def recreate_database():
"""
Connect to a running Postgres database, build it using metadata.create_all()
"""
# running in non-UTC catches some timezone errors
os.environ["TZ"] = "America/New_York"
# drop everything currently in the database
drop_all()
# create everything from the current models, not incrementally through migrations
create_schema_from_models()
with session_scope() as session:
populate_testing_resources(session)
@pytest.fixture()
def db():
"""
Pytest fixture to connect to a running Postgres database and build it using metadata.create_all()
"""
recreate_database()
def generate_user(*, make_invisible=False, **kwargs):
"""
Create a new user, return session token
The user is detached from any session, and you can access its static attributes, but you can't modify it
Use this most of the time
"""
auth = Auth()
with session_scope() as session:
# default args
username = "test_user_" + random_hex(16)
user_opts = {
"username": username,
"email": f"{username}@dev.couchers.org",
# password is just 'password'
# this is hardcoded because the password is slow to hash (so would slow down tests otherwise)
"hashed_password": b"$argon2id$v=19$m=65536,t=2,p=1$4cjGg1bRaZ10k+7XbIDmFg$tZG7JaLrkfyfO7cS233ocq7P8rf3znXR7SAfUt34kJg",
"name": username.capitalize(),
"city": "Testing city",
"hometown": "Test hometown",
"community_standing": 0.5,
"birthdate": date(year=2000, month=1, day=1),
"gender": "N/A",
"pronouns": "",
"occupation": "Tester",
"education": "UST(esting)",
"about_me": "I test things",
"my_travels": "Places",
"things_i_like": "Code",
"about_place": "My place has a lot of testing paraphenelia",
"additional_information": "I can be a bit testy",
# you need to make sure to update this logic to make sure the user is jailed/not on request
"accepted_tos": TOS_VERSION,
"accepted_community_guidelines": GUIDELINES_VERSION,
"geom": create_coordinate(40.7108, -73.9740),
"geom_radius": 100,
"onboarding_emails_sent": 1,
"last_onboarding_email_sent": now(),
"new_notifications_enabled": True,
}
for key, value in kwargs.items():
user_opts[key] = value
user = User(**user_opts)
session.add(user)
session.flush()
session.add(RegionVisited(user_id=user.id, region_code="CHE"))
session.add(RegionVisited(user_id=user.id, region_code="REU"))
session.add(RegionVisited(user_id=user.id, region_code="FIN"))
session.add(RegionLived(user_id=user.id, region_code="ESP"))
session.add(RegionLived(user_id=user.id, region_code="FRA"))
session.add(RegionLived(user_id=user.id, region_code="EST"))
session.add(LanguageAbility(user_id=user.id, language_code="fin", fluency=LanguageFluency.fluent))
session.add(LanguageAbility(user_id=user.id, language_code="fra", fluency=LanguageFluency.beginner))
# this expires the user, so now it's "dirty"
session.commit()
token, _ = create_session(_DummyContext(), session, user, False)
# deleted user aborts session creation, hence this follows and necessitates a second commit
if make_invisible:
user.is_deleted = True
session.commit()
# refresh it, undoes the expiry
session.refresh(user)
# allows detaches the user from the session, allowing its use outside this session
session.expunge(user)
return user, token
# This doubles as get_FriendRequest, since a friend request is just a pending friend relationship
class CookieMetadataPlugin(grpc.AuthMetadataPlugin):
"""
Injects the right `cookie: couchers-sesh=...` header into the metadata
"""
@contextmanager
def auth_api_session():
"""
Create an Auth API for testing
This needs to use the real server since it plays around with headers
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor)
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
auth_pb2_grpc.add_AuthServicer_to_server(Auth(), server)
server.start()
try:
with grpc.secure_channel(f"localhost:{port}", grpc.local_channel_credentials()) as channel:
metadata_interceptor = _MetadataKeeperInterceptor()
channel = grpc.intercept_channel(channel, metadata_interceptor)
yield auth_pb2_grpc.AuthStub(channel), metadata_interceptor
finally:
server.stop(None).wait()
@contextmanager
def api_session(token):
"""
Create an API for testing, uses the token for auth
"""
channel = fake_channel(token)
api_pb2_grpc.add_APIServicer_to_server(API(), channel)
yield api_pb2_grpc.APIStub(channel)
@contextmanager
def real_api_session(token):
"""
Create an API for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
api_pb2_grpc.add_APIServicer_to_server(API(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield api_pb2_grpc.APIStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_admin_session(token):
"""
Create a Admin service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
admin_pb2_grpc.add_AdminServicer_to_server(Admin(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield admin_pb2_grpc.AdminStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def real_jail_session(token):
"""
Create a Jail service for testing, using TCP sockets, uses the token for auth
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
jail_pb2_grpc.add_JailServicer_to_server(Jail(), server)
server.start()
call_creds = grpc.metadata_call_credentials(CookieMetadataPlugin(token))
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield jail_pb2_grpc.JailStub(channel)
finally:
server.stop(None).wait()
@contextmanager
def conversations_session(token):
"""
Create a Conversations API for testing, uses the token for auth
"""
channel = fake_channel(token)
conversations_pb2_grpc.add_ConversationsServicer_to_server(Conversations(), channel)
yield conversations_pb2_grpc.ConversationsStub(channel)
@contextmanager
def requests_session(token):
"""
Create a Requests API for testing, uses the token for auth
"""
channel = fake_channel(token)
requests_pb2_grpc.add_RequestsServicer_to_server(Requests(), channel)
yield requests_pb2_grpc.RequestsStub(channel)
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def real_stripe_session():
"""
Create a Stripe service for testing, using TCP sockets
"""
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[AuthValidatorInterceptor()])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
stripe_pb2_grpc.add_StripeServicer_to_server(Stripe(), server)
server.start()
creds = grpc.local_channel_credentials()
try:
with grpc.secure_channel(f"localhost:{port}", creds) as channel:
yield stripe_pb2_grpc.StripeStub(channel)
finally:
server.stop(None).wait()
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def account_session(token):
"""
Create a Account API for testing, uses the token for auth
"""
channel = fake_channel(token)
account_pb2_grpc.add_AccountServicer_to_server(Account(), channel)
yield account_pb2_grpc.AccountStub(channel)
@contextmanager
def search_session(token):
"""
Create a Search API for testing, uses the token for auth
"""
channel = fake_channel(token)
search_pb2_grpc.add_SearchServicer_to_server(Search(), channel)
yield search_pb2_grpc.SearchStub(channel)
@contextmanager
def references_session(token):
"""
Create a References API for testing, uses the token for auth
"""
channel = fake_channel(token)
references_pb2_grpc.add_ReferencesServicer_to_server(References(), channel)
yield references_pb2_grpc.ReferencesStub(channel)
@contextmanager
@contextmanager
@contextmanager
@contextmanager
@contextmanager
def media_session(bearer_token):
"""
Create a fresh Media API for testing, uses the bearer token for media auth
"""
media_auth_interceptor = get_media_auth_interceptor(bearer_token)
with futures.ThreadPoolExecutor(1) as executor:
server = grpc.server(executor, interceptors=[media_auth_interceptor])
port = server.add_secure_port("localhost:0", grpc.local_server_credentials())
servicer = Media()
media_pb2_grpc.add_MediaServicer_to_server(servicer, server)
server.start()
call_creds = grpc.access_token_call_credentials(bearer_token)
comp_creds = grpc.composite_channel_credentials(grpc.local_channel_credentials(), call_creds)
try:
with grpc.secure_channel(f"localhost:{port}", comp_creds) as channel:
yield media_pb2_grpc.MediaStub(channel)
finally:
server.stop(None).wait()
@pytest.fixture()
@pytest.fixture
| [
11748,
28686,
198,
6738,
24580,
1330,
25650,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
4818,
8079,
1330,
3128,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
11748,
1036,
1475... | 2.474792 | 6,605 |
from datetime import datetime
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///relation_hello_v22.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
student_identifier = db.Table('student_identifier',
db.Column('class_id', db.Integer, db.ForeignKey('classes.class_id')),
db.Column('user_id', db.Integer, db.ForeignKey('students.user_id'))
)
# s = Student()
# c = Class()
# c.students.append(s)
# db.session.add(c)
# db.session.commit() | [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
2581,
198,
6738,
42903,
62,
25410,
282,
26599,
1330,
16363,
2348,
26599,
198,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
1324,
13,
1125... | 2.69863 | 219 |