content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
""" eve_travel_helper -- EVE Online travel planning helper tool package
This tool is intended to provide assistance to EVE Online players with
planning travel routes.
Modules:
* routing
* server
Sub-packages:
* dbclient
"""
| [
37811,
28001,
62,
35927,
62,
2978,
525,
1377,
32356,
7467,
3067,
5410,
31904,
2891,
5301,
198,
198,
1212,
2891,
318,
5292,
284,
2148,
6829,
284,
32356,
7467,
1938,
351,
198,
11578,
768,
3067,
11926,
13,
198,
198,
5841,
5028,
25,
198,
... | 3.492754 | 69 |
from urllib.parse import urljoin
import pytest
import respx
from httpx import Client as HTTPXClient
from httpx import Response
from healthchecks_io import CheckCreate
from healthchecks_io import CheckTrap
from healthchecks_io import CheckUpdate
from healthchecks_io import PingFailedError
from healthchecks_io import WrongClientError
@pytest.mark.respx
@pytest.mark.respx
@pytest.mark.respx
@pytest.mark.asyncio
@pytest.mark.respx
@pytest.mark.asyncio
@pytest.mark.respx
@pytest.mark.asyncio
@pytest.mark.respx
@pytest.mark.asyncio
| [
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
22179,
198,
198,
11748,
12972,
9288,
198,
11748,
1217,
87,
198,
6738,
2638,
87,
1330,
20985,
355,
14626,
55,
11792,
198,
6738,
2638,
87,
1330,
18261,
198,
198,
6738,
1535,
42116,
62,
952,
... | 2.951613 | 186 |
"""
Distributions and Probability Tools
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
#plt.style.use('seaborn')
# 第一簇的数据
num1, mu1, var1 = 400, [0.5, 0.5], [1, 3]
X1 = np.random.multivariate_normal(mu1, np.diag(var1), num1)
# 第二簇的数据
num2, mu2, var2 = 600, [5.5, 2.5], [2, 2]
X2 = np.random.multivariate_normal(mu2, np.diag(var2), num2)
# 第三簇的数据
num3, mu3, var3 = 1000, [1, 7], [6, 2]
X3 = np.random.multivariate_normal(mu3, np.diag(var3), num3)
# 合并在一起
X = np.vstack((X1, X2, X3))
| [
37811,
201,
198,
20344,
2455,
507,
290,
30873,
1799,
20003,
201,
198,
37811,
201,
198,
11748,
10688,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
2603,
29487... | 1.87963 | 324 |
import unittest
from loop import WrapBuffer
from utils import make_sin_sound, SD_RATE
sound_len = 500_000 # samples
sound = make_sin_sound(440, sound_len / SD_RATE)
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
9052,
1330,
41028,
28632,
198,
6738,
3384,
4487,
1330,
787,
62,
31369,
62,
23661,
11,
9834,
62,
49,
6158,
198,
198,
23661,
62,
11925,
796,
5323,
62,
830,
220,
1303,
8405,
198,
23661,
796,
787,
6... | 2.658537 | 82 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python Team Awareness Kit (PyTAK) Module Tests."""
import asyncio
import csv
import io
import urllib
import xml.etree.ElementTree
import pytest
import adsbxcot.functions
__author__ = 'Greg Albrecht W2GMD <oss@undef.net>'
__copyright__ = 'Copyright 2021 Orion Labs, Inc.'
__license__ = 'Apache License, Version 2.0'
@pytest.fixture
@pytest.fixture
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
37906,
4816,
36735,
10897,
357,
20519,
5603,
42,
8,
19937,
30307,
526,
15931,
198,
198,
11748,
30351,
952,
... | 2.715232 | 151 |
# Exercise 4.3
from matplotlib import pyplot as plt
from RSE.impedance import Impedance
Z_1 = Impedance(3, 5)
Z_2 = Impedance(5, -6)
Z_0 = Impedance(2, 3)
Z_12 = Z_1.parallel(Z_2)
Z_tot = Z_0 + Z_12
E = complex(100, 0)
# Compute currents.
I = E/Z_tot.Z
V_12 = Z_0.Z * I
V_23 = Z_12.Z * I
I_Z1 = V_12 / Z_1.Z
I_Z2 = I - I_Z1
print("V_12 = %s" % print_complex(V_12))
print("V_23 = %s" % print_complex(V_23))
print("I = %s" % print_complex(I))
print("I_Z1 = %s" % print_complex(I_Z1))
print("I_Z2 = %s" % print_complex(I_Z2))
# Plot voltages.
f, (ax1, ax2) = plt.subplots(1, 2)
for U in [E, V_12, V_23]:
ax1.plot([0, U.real], [0, U.imag], marker='o')
#ax1.set_xlim((0, 110))
#ax1.set_ylim((-50, 50))
ax1.set_ylabel('Imaginary')
ax1.set_xlabel('Real')
# Plot currents.
for i in [I, I_Z1, I_Z2]:
ax2.plot([0, i.real], [0, i.imag], marker='o')
#ax2.set_xlim((0, 0.04))
#ax2.set_ylim((-0.02, 0.02))
ax2.set_ylabel('Imaginary')
ax2.set_xlabel('Real')
plt.show()
| [
2,
32900,
604,
13,
18,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
371,
5188,
13,
320,
9124,
590,
1330,
1846,
9124,
590,
628,
198,
198,
57,
62,
16,
796,
1846,
9124,
590,
7,
18,
11,
642,
8,
198... | 1.889535 | 516 |
__version__ = '0.0.1'
def flatten_list(input_list):
"""
Flattens list with many nested lists.
>>> flatten_list([1, [2, [3], [4]]])
[1, 2, 3, 4]
"""
result = []
for item in input_list:
if isinstance(item, list):
result.extend(flatten_list(item))
# yield from flatten_list(item)
else:
result.append(item)
# yield item
return result
| [
834,
9641,
834,
796,
705,
15,
13,
15,
13,
16,
6,
628,
198,
4299,
27172,
268,
62,
4868,
7,
15414,
62,
4868,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1610,
1078,
641,
1351,
351,
867,
28376,
8341,
13,
628,
220,
220,
220... | 2.037915 | 211 |
from sys import stdout
from tqdm import tqdm
from composeml.data_slice import DataSliceGenerator
from composeml.label_search import ExampleSearch, LabelSearch
from composeml.label_times import LabelTimes
class LabelMaker:
"""Automatically makes labels for prediction problems."""
def __init__(self, target_entity, time_index, labeling_function=None, window_size=None):
"""Creates an instance of label maker.
Args:
target_entity (str): Entity on which to make labels.
time_index (str): Name of time column in the data frame.
labeling_function (function or list(function) or dict(str=function)): Function, list of functions, or dictionary of functions that transform a data slice.
When set as a dictionary, the key is used as the name of the labeling function.
window_size (str or int): Size of the data slices. As a string, the value can be a timedelta or a column in the data frame to group by.
As an integer, the value can be the number of rows. Default value is all future data.
"""
self.labeling_function = labeling_function or {}
self.target_entity = target_entity
self.time_index = time_index
self.window_size = window_size
def _name_labeling_function(self, function):
"""Gets the names of the labeling functions."""
has_name = hasattr(function, '__name__')
return function.__name__ if has_name else type(function).__name__
def _check_labeling_function(self, function, name=None):
"""Checks whether the labeling function is callable."""
assert callable(function), 'labeling function must be callabe'
return function
@property
def labeling_function(self):
"""Gets the labeling function(s)."""
return self._labeling_function
@labeling_function.setter
def labeling_function(self, value):
"""Sets and formats the intial labeling function(s).
Args:
value (function or list(function) or dict(str=function)): Function that transforms a data slice to a label.
"""
if isinstance(value, dict):
for name, function in value.items():
self._check_labeling_function(function)
assert isinstance(name, str), 'labeling function name must be string'
if callable(value):
value = [value]
if isinstance(value, (tuple, list)):
value = {self._name_labeling_function(function): self._check_labeling_function(function) for function in value}
assert isinstance(value, dict), 'value type for labeling function not supported'
self._labeling_function = value
def slice(self, df, num_examples_per_instance, minimum_data=None, gap=None, drop_empty=True):
"""Generates data slices of target entity.
Args:
df (DataFrame): Data frame to create slices on.
num_examples_per_instance (int): Number of examples per unique instance of target entity.
minimum_data (str): Minimum data before starting search. Default value is first time of index.
gap (str or int): Time between examples. Default value is window size.
If an integer, search will start on the first event after the minimum data.
drop_empty (bool): Whether to drop empty slices. Default value is True.
Returns:
ds (generator): Returns a generator of data slices.
"""
self._check_example_count(num_examples_per_instance, gap)
df = self.set_index(df)
entity_groups = df.groupby(self.target_entity)
num_examples_per_instance = ExampleSearch._check_number(num_examples_per_instance)
generator = DataSliceGenerator(
window_size=self.window_size,
min_data=minimum_data,
drop_empty=drop_empty,
gap=gap,
)
for entity_id, df in entity_groups:
for ds in generator(df):
setattr(ds.context, self.target_entity, entity_id)
yield ds
if ds.context.slice_number >= num_examples_per_instance:
break
@property
def _bar_format(self):
"""Template to format the progress bar during a label search."""
value = "Elapsed: {elapsed} | "
value += "Remaining: {remaining} | "
value += "Progress: {l_bar}{bar}| "
value += self.target_entity + ": {n}/{total} "
return value
def _run_search(
self,
df,
generator,
search,
verbose=True,
*args,
**kwargs,
):
"""Search implementation to make label records.
Args:
df (DataFrame): Data frame to search and extract labels.
generator (DataSliceGenerator): The generator for data slices.
search (LabelSearch or ExampleSearch): The type of search to be done.
verbose (bool): Whether to render progress bar. Default value is True.
*args: Positional arguments for labeling function.
**kwargs: Keyword arguments for labeling function.
Returns:
records (list(dict)): Label Records
"""
df = self.set_index(df)
entity_groups = df.groupby(self.target_entity)
multiplier = search.expected_count if search.is_finite else 1
total = entity_groups.ngroups * multiplier
progress_bar, records = tqdm(
total=total,
bar_format=self._bar_format,
disable=not verbose,
file=stdout,
), []
for entity_count, (entity_id, df) in enumerate(entity_groups):
for ds in generator(df):
items = self.labeling_function.items()
labels = {name: lf(ds, *args, **kwargs) for name, lf in items}
valid_labels = search.is_valid_labels(labels)
if not valid_labels: continue
records.append({
self.target_entity: entity_id,
'time': ds.context.slice_start,
**labels,
})
search.update_count(labels)
# if finite search, progress bar is updated for each example found
if search.is_finite: progress_bar.update(n=1)
if search.is_complete: break
# if finite search, progress bar is updated for examples not found
# otherwise, progress bar is updated for each entity group
n = missing_examples(entity_count + 1) if search.is_finite else 1
progress_bar.update(n=n)
search.reset_count()
total -= progress_bar.n
progress_bar.update(n=total)
progress_bar.close()
return records
def _check_example_count(self, num_examples_per_instance, gap):
"""Checks whether example count corresponds to data slices."""
if self.window_size is None and gap is None:
more_than_one = num_examples_per_instance > 1
assert not more_than_one, "must specify gap if num_examples > 1 and window size = none"
def search(self,
df,
num_examples_per_instance,
minimum_data=None,
gap=None,
drop_empty=True,
verbose=True,
*args,
**kwargs):
"""Searches the data to calculates labels.
Args:
df (DataFrame): Data frame to search and extract labels.
num_examples_per_instance (int or dict): The expected number of examples to return from each entity group.
A dictionary can be used to further specify the expected number of examples to return from each label.
minimum_data (str): Minimum data before starting search. Default value is first time of index.
gap (str or int): Time between examples. Default value is window size.
If an integer, search will start on the first event after the minimum data.
drop_empty (bool): Whether to drop empty slices. Default value is True.
verbose (bool): Whether to render progress bar. Default value is True.
*args: Positional arguments for labeling function.
**kwargs: Keyword arguments for labeling function.
Returns:
lt (LabelTimes): Calculated labels with cutoff times.
"""
assert self.labeling_function, 'missing labeling function(s)'
self._check_example_count(num_examples_per_instance, gap)
is_label_search = isinstance(num_examples_per_instance, dict)
search = (LabelSearch if is_label_search else ExampleSearch)(num_examples_per_instance)
generator = DataSliceGenerator(
window_size=self.window_size,
min_data=minimum_data,
drop_empty=drop_empty,
gap=gap,
)
records = self._run_search(
df=df,
generator=generator,
search=search,
verbose=verbose,
*args,
**kwargs,
)
lt = LabelTimes(
data=records,
target_columns=list(self.labeling_function),
target_entity=self.target_entity,
search_settings={
'num_examples_per_instance': num_examples_per_instance,
'minimum_data': str(minimum_data),
'window_size': str(self.window_size),
'gap': str(gap),
},
)
return lt
def set_index(self, df):
"""Sets the time index in a data frame (if not already set).
Args:
df (DataFrame): Data frame to set time index in.
Returns:
df (DataFrame): Data frame with time index set.
"""
if df.index.name != self.time_index:
df = df.set_index(self.time_index)
if 'time' not in str(df.index.dtype):
df.index = df.index.astype('datetime64[ns]')
return df
| [
6738,
25064,
1330,
14367,
448,
198,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
198,
6738,
36664,
4029,
13,
7890,
62,
48369,
1330,
6060,
11122,
501,
8645,
1352,
198,
6738,
36664,
4029,
13,
18242,
62,
12947,
1330,
17934,
18243,... | 2.347452 | 4,297 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-scheduling Sparse Matrix Multiplication on CPU with Custom Sketch Rule
===========================================================================
**Author**: `Chengfan Jia <https://github.com/jcf94/>`_
This is a tutorial on how to use the auto-scheduler to tune a sparse matrix multiplication for
CPUs.
Auto-scheduler is designed to explore the schedule with best performance for a given computation
declaration automatically. While sometimes, we may have a demand to try some special ops which may
not been well-supported by auto-scheduler's default sketch rules and result in poor performance.
Fortunately, auto-scheduler currently allows user to provide a CustomSketch to cover these cases.
We use sparse matrix multiplication as an example in this tutorial to demonstrate how to implement
and plug a custom sketch rule to the auto-scheduler's search policy.
Note that this tutorial will not run on Windows or recent versions of macOS. To
get it to run, you will need to wrap the body of this tutorial in a :code:`if
__name__ == "__main__":` block.
"""
import os
import numpy as np
import tvm
from tvm import te, auto_scheduler, runtime, topi
from tvm.auto_scheduler import _ffi_api
from tvm.contrib import utils
from tvm.topi.utils import get_const_tuple
from tvm.topi.sparse.utils import random_bsr_matrix
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--hw', default=32, type=int)
parser.add_argument('--ci', default=1024, type=int)
parser.add_argument('--co', default=1024, type=int)
parser.add_argument('--density', default=0.1, type=float)
parser.add_argument('--bso', default=4, type=int)
parser.add_argument('--bsi', default=1, type=int)
parser.add_argument('--data-layout', default='hwc', type=str)
parser.add_argument('--weight-layout', default='oi', type=str)
parser.add_argument('--output-layout', default='hwc', type=str)
ARGS = parser.parse_args()
@auto_scheduler.register_task_input_check_func
######################################################################
# Define the computation
# ^^^^^^^^^^^^^^^^^^^^^^
# To begin with, let us define the computation of a sparse matmul with several relu and bias add.
# The function should return the list of input/output tensors.
# From these tensors, the auto-scheduler can get the whole computational graph.
@auto_scheduler.register_workload
######################################################################
# Special step for sparse workload
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# During schedule tuning, auto-scheduler will use random inputs to measure the performance of a
# generated schedule. While we cannot directly use a random array as the input of a sparse op, for
# the "indices" and "indptr" array are meaningful for the computation.
#
# To solve this problem, we register these as special buffers, and load them when process program
# measuring.
# See the `tvm.auto_scheduler.measure.py` for more details.
# Define the basic shapes of this sparse computation
M = ARGS.hw * ARGS.hw
K = ARGS.ci
N = ARGS.co
BS_O = ARGS.bso
BS_I = ARGS.bsi
density = ARGS.density
# Default
# data: hwc
# weight: oi
# output: hwc
# Generate the test data with numpy
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_O, BS_I, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np @ W_np.T # Process the matrix multiplication
W_sp_np_data = W_sp_np.data
W_sp_np_indices = W_sp_np.indices
W_sp_np_indptr = W_sp_np.indptr
if ARGS.data_layout == 'chw':
X_np = X_np.transpose(1, 0)
if ARGS.weight_layout == 'io':
W_np = W_np.transpose(1, 0)
W_sp_np_data = W_sp_np_data.transpose(0, 2, 1)
if ARGS.output_layout == 'chw':
Y_np = Y_np.transpose(1, 0)
######################################################################
# Create the search task
# ^^^^^^^^^^^^^^^^^^^^^^
# We then create a search task with M=N=K=512 and dtype="float32"
# If your machine supports avx instructions, you can
#
# - replace "llvm" below with "llvm -mcpu=core-avx2" to enable AVX2
# - replace "llvm" below with "llvm -mcpu=skylake-avx512" to enable AVX-512
#target = tvm.target.Target("llvm")
#target = tvm.target.Target("llvm -mtriple=armv7l-linux-gnueabihf -mattr=+neon")
target = tvm.target.Target("opencl -device=mali", host="llvm -mtriple=armv7l-linux-gnueabihf")
device_key = 'xu4'
rpc_host = '115.145.178.78'
rpc_port = 8109
# Register the sparse data to task inputs
#prefix = "sparse_dense_bsr_%d_%d_%d_%d_%.2f_%s_" % (N, K, BS_O, BS_I, density, ARGS.weight_layout)
prefix = "sparse_dense_v2_bsr_%d_%d_%d_%d_%.2f_" % (W_np.shape[0], W_np.shape[1], W_sp_np_data.shape[1], W_sp_np_data.shape[2], density)
task = tvm.auto_scheduler.SearchTask(
func=sparse_dense_v2,
args=(X_np.shape, W_sp_np_data.shape, W_sp_np_indices.shape, W_sp_np_indptr.shape, "float32", ARGS.data_layout, ARGS.weight_layout, ARGS.output_layout),
target=target,
task_inputs={
prefix + "W_data": runtime.ndarray.array(W_sp_np_data),
prefix + "W_indices": runtime.ndarray.array(W_sp_np_indices),
prefix + "W_indptr": runtime.ndarray.array(W_sp_np_indptr),
},
task_inputs_save_to_file=True,
#layout_rewrite_option=auto_scheduler.LayoutRewriteOption.NO_REWRITE,
)
# Inspect the computational graph
print("Computational DAG:")
print(task.compute_dag)
######################################################################
# Write the custom sketch for sparse dense op
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Before tuning, we will need to define the CustomSketchRule for the sparse dense op.
#
# CustomSketchRule consists of two parts: the condition function and the apply function.
#
# - condition function: describe when to apply this sketch rule. For example, we can only apply
# the rule to the sparse ops by matching their name and tag.
# - apply function: describe how to generate the initial sketch. You can implement it using
# auto-scheduler provided loop state APIs.
######################################################################
# Next, we set parameters for the auto-scheduler with the custom sketch plugged in.
#
# * :code:`num_measure_trials` is the number of measurement trials we can use during the search.
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
# good value for the search to converge. You can do more trials according to your time budget.
# * In addition, we use :code:`RecordToFile` to dump measurement records into a file
# `sparse_dense.json`.
# The measurement records can be used to query the history best, resume the search,
# and do more analyses later.
# * see :any:`auto_scheduler.TuningOptions` for more parameters
# * Here, we need to create a :code:`auto_scheduler.SketchPolicy` object, and add the custom sketch
# rule as a `init_search_callbacks`.
log_file = "spmm_mali_hw%d_ci%d_co%d_d%s_bsi%d_bso%d_%s_%s_%s.json" % (ARGS.hw, ARGS.ci, ARGS.co, str(ARGS.density), ARGS.bso, ARGS.bsi, ARGS.data_layout, ARGS.weight_layout, ARGS.output_layout)
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=1000,
builder=auto_scheduler.LocalBuilder(build_func="default"),
runner=auto_scheduler.RPCRunner(
device_key,
host=rpc_host,
port=rpc_port,
timeout=30,
repeat=1,
min_repeat_ms=200,
enable_cpu_cache_flush=True,
),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
verbose=2,
num_measures_per_round=32,
)
search_policy = auto_scheduler.SketchPolicy(
task,
program_cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func, "SparseDense")
],
)
######################################################################
# Run the search
# ^^^^^^^^^^^^^^
# Now we get all inputs ready.
# We can kick off the search and let the auto-scheduler do its magic.
# After some measurement trials, we can load the best schedule from the log
# file and apply it.
# Run auto-tuning (search)
# Notice: We do not run the tuning in our webpage server since it takes too long.
# Uncomment the following line to run it by yourself.
task.tune(tune_option, search_policy)
# Apply the best schedule
sch, args = task.apply_best(log_file)
######################################################################
# We can lower the schedule to see the IR after auto-scheduling.
# The auto-scheduler correctly performs optimizations including multi-level tiling,
# layout transformation, parallelization, vectorization, unrolling, and operator fusion.
print("Lowered TIR:")
print(tvm.lower(sch, args, simple_mode=True))
######################################################################
# Check correctness and evaluate performance
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# We build the binary and check its correctness and performance.
func = tvm.build(sch, args, target)
tmp = utils.tempdir()
filename = 'net.tar'
func.export_library(tmp.relpath(filename))
print("Upload...")
remote = auto_scheduler.utils.request_remote(device_key, rpc_host, rpc_port, timeout=10000)
remote.upload(tmp.relpath(filename))
func = remote.load_module(filename)
#dev = remote.cpu()
dev = remote.cl()
X_tvm = tvm.nd.array(X_np, device=dev)
W_data_tvm = tvm.nd.array(W_sp_np_data, device=dev)
W_indices_tvm = tvm.nd.array(W_sp_np_indices, device=dev)
W_indptr_tvm = tvm.nd.array(W_sp_np_indptr, device=dev)
Y_tvm = tvm.nd.empty(Y_np.shape, device=dev)
func(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, Y_tvm)
# Check results
#tvm.testing.assert_allclose(Y_np, Y_tvm.asnumpy(), atol=1e-4, rtol=1e-4)
np.testing.assert_allclose(Y_np, Y_tvm.asnumpy(), atol=1e-4, rtol=1e-4)
# Evaluate execution time.
evaluator = func.time_evaluator(func.entry_name, dev, min_repeat_ms=500)
print(
"Execution time of this operator: %.3f ms"
% (
np.median(evaluator(X_tvm, W_data_tvm, W_indices_tvm, W_indptr_tvm, Y_tvm).results)
* 1000
)
)
######################################################################
# .. note:: Tuning result example
#
# .. code-block:: c
#
# ----------------------------------------------------------------------
# Lowered TIR:
# primfn(placeholder_5: handle, placeholder_6: handle, placeholder_7: handle, placeholder_8: handle, placeholder_9: handle, compute_1: handle) -> ()
# attr = {"global_symbol": "main", "tir.noalias": True}
# buffers = {placeholder_2: Buffer(placeholder_10: Pointer(float32), float32, [9831, 16, 1], []),
# placeholder_4: Buffer(placeholder_11: Pointer(int32), int32, [33], []),
# placeholder_3: Buffer(placeholder_12: Pointer(float32), float32, [512, 512], []),
# compute: Buffer(compute_2: Pointer(float32), float32, [512, 512], []),
# placeholder_1: Buffer(placeholder_13: Pointer(float32), float32, [512, 512], []),
# placeholder: Buffer(placeholder_14: Pointer(int32), int32, [9831], [])}
# buffer_map = {placeholder_7: placeholder, placeholder_9: placeholder_1, placeholder_6: placeholder_2, compute_1: compute, placeholder_5: placeholder_3, placeholder_8: placeholder_4} {
# for (i0.outer.i1.outer.fused: int32, 0, 1024) "parallel" {
# attr [compute_3: Pointer(float32)] "storage_scope" = "global";
# allocate(compute_3, float32, [256]) {
# for (nb_j.inner: int32, 0, 2) {
# for (i.inner.init: int32, 0, 8) {
# for (j.init: int32, 0, 16) {
# compute_3[(((i.inner.init*32) + (nb_j.inner*16)) + j.init)] = 0f32
# }
# }
# for (elem_idx: int32, 0, ((int32*)placeholder_11[(((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner) + 1)] - (int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)])) {
# for (i.inner: int32, 0, 8) {
# for (j: int32, 0, 16) {
# compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] = ((float32*)compute_3[(((i.inner*32) + (nb_j.inner*16)) + j)] + ((float32*)placeholder_10[((((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)]*16) + (elem_idx*16)) + j)]*max((float32*)placeholder_12[(((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i.inner*512)) + (int32*)placeholder_14[((int32*)placeholder_11[((floormod(i0.outer.i1.outer.fused, 16)*2) + nb_j.inner)] + elem_idx)])], 0f32)))
# }
# }
# }
# }
# for (i0.inner: int32, 0, 8) {
# compute_2[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)] = max(((float32x32*)compute_3[ramp((i0.inner*32), 1, 32)] + (float32x32*)placeholder_13[ramp((((floordiv(i0.outer.i1.outer.fused, 16)*4096) + (i0.inner*512)) + (floormod(i0.outer.i1.outer.fused, 16)*32)), 1, 32)]), broadcast(0f32, 32))
# }
# }
# }
# }
| [
2,
49962,
284,
262,
24843,
10442,
5693,
357,
1921,
37,
8,
739,
530,
198,
2,
393,
517,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
198,
2,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
220,
383,
7054,... | 2.71668 | 5,072 |
import dlib
import pickle
import numpy as np
import os
import argparse
from utils import path_utils
from utils import identity_profile_utils
def main(path=None):
"""
Creates an IdentityProfile
:param path: (str) absolute path to a directory containing training images
- the end of the path should be a directory named after the person
- e.g. Seaton_Ullberg
- all images in the directory should contain only the face of the target
"""
# process args
parser = argparse.ArgumentParser()
parser.add_argument('--path',
default=None,
help="Path to directory containing face images")
args = parser.parse_args()
if path is None:
if args.path is None:
raise argparse.ArgumentError("--path is a required argument")
else:
path = args.path
# load the pretrained dlib models
local_paths = path_utils.LocalPaths()
detector = dlib.get_frontal_face_detector()
shape_predictor_path = os.path.join(local_paths.bin, "shape_predictor_5_face_landmarks.dat")
shape_predictor = dlib.shape_predictor(shape_predictor_path)
face_recognition_model_path = os.path.join(local_paths.bin, "dlib_face_recognition_resnet_model_v1.dat")
face_recognition_model = dlib.face_recognition_model_v1(face_recognition_model_path)
# iterate through a directory of images to calculate a 128D face description vector
descriptors = []
for fname in os.listdir(path):
img_path = os.path.join(path, fname)
img = dlib.load_rgb_image(img_path)
detections = detector(img, 1)
if len(detections) != 1: # only use single face images
continue
d = detections[0]
shape = shape_predictor(img, d)
face_descriptor = face_recognition_model.compute_face_descriptor(img, shape, 10)
descriptors.append(face_descriptor)
final_descriptor = np.mean(descriptors, axis=0)
# generate an IdentityProfile and save the descriptor alongside it
dir_name = os.path.basename(path)
pickle_path = os.path.join(local_paths.identity_profiles, dir_name, "{}")
# make the new profile dir and yaml file
identity_profile_utils.new_profile(dir_name)
# save face descriptor as a pickle
with open(pickle_path.format("face_descriptor.p"), 'wb') as stream:
pickle.dump(final_descriptor, stream)
if __name__ == "__main__":
main("/home/seaton/Pictures/Seaton_Ullberg")
| [
11748,
288,
8019,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
3384,
4487,
1330,
3108,
62,
26791,
198,
6738,
3384,
4487,
1330,
5369,
62,
13317,
62,
26791,
628,
198,
42... | 2.512948 | 1,004 |
# https://practice.geeksforgeeks.org/problems/red-or-green5711/1/?category[]=Strings&category[]=Strings&page=1&query=category[]Stringspage1category[]Strings#
#User function Template for python3
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__=='__main__':
t=int(input())
for _ in range(t):
N=int(input())
S=input()
ob=Solution()
print(ob.RedOrGreen(N,S))
# } Driver Code Ends | [
2,
3740,
1378,
39541,
13,
469,
2573,
30293,
2573,
13,
2398,
14,
1676,
22143,
14,
445,
12,
273,
12,
14809,
3553,
1157,
14,
16,
20924,
22872,
21737,
28,
13290,
654,
5,
22872,
21737,
28,
13290,
654,
5,
7700,
28,
16,
5,
22766,
28,
228... | 2.397849 | 186 |
"""
Copyright 2015 INTEL RESEARCH AND INNOVATION IRELAND LIMITED
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import multiprocessing
import optparse
import os
import sys
import threading
import uuid
import jpype
import adaptationengine_framework.adaptationaction as ae_action
import adaptationengine_framework.event as ae_event
import adaptationengine_framework.output as output
import adaptationengine_plugintester.plugins as plugins
if __name__ == "__main__":
main()
| [
37811,
198,
15269,
1853,
17828,
3698,
15731,
17133,
3398,
5357,
3268,
45,
8874,
6234,
314,
16448,
6981,
40880,
198,
198,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
5832,
743,
407,
77... | 3.782946 | 258 |
import logging
import pickle
import time
try:
import redis
except ImportError:
redis = None
from .pubsub_manager import PubSubManager
logger = logging.getLogger('socketio')
class RedisManager(PubSubManager): # pragma: no cover
"""Redis based client manager.
This class implements a Redis backend for event sharing across multiple
processes. Only kept here as one more example of how to build a custom
backend, since the kombu backend is perfectly adequate to support a Redis
message queue.
To use a Redis backend, initialize the :class:`Server` instance as
follows::
url = 'redis://hostname:port/0'
server = socketio.Server(client_manager=socketio.RedisManager(url))
:param url: The connection URL for the Redis server. For a default Redis
store running on the same host, use ``redis://``. To use an
SSL connection, use ``rediss://``.
:param channel: The channel name on which the server sends and receives
notifications. Must be the same in all the servers.
:param write_only: If set to ``True``, only initialize to emit events. The
default of ``False`` initializes the class for emitting
and receiving.
:param redis_options: additional keyword arguments to be passed to
``Redis.from_url()``.
"""
name = 'redis'
| [
11748,
18931,
198,
11748,
2298,
293,
198,
11748,
640,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
2266,
271,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
2266,
271,
796,
6045,
198,
198,
6738,
764,
12984,
7266,
62,
37153,
1330,... | 2.786693 | 511 |
import logging
from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.forms import (
AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm)
from django.utils.translation import ugettext_lazy as _
from contenteditor import models
class LoggingAuthenticationForm(AdminAuthenticationForm):
"""Override login form to log attempts"""
class StrictAdminPasswordChangeForm(AdminPasswordChangeForm):
"""Password form for editing a user"""
password1 = forms.CharField(
label=_("Password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['password1']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['password1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
class StrictPasswordChangeForm(PasswordChangeForm):
"""Password form residing at /admin/password_change"""
new_password1 = forms.CharField(
label=_("New password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_new_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['new_password1']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['new_password1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
| [
11748,
18931,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
13,
23914,
1330,
32053,
47649,
3299,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
357,
198,
220,
220... | 2.853958 | 897 |
import numpy as np
import torch
from sklearn import metrics
import pickle
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
1341,
35720,
1330,
20731,
198,
11748,
2298,
293,
198
] | 3.894737 | 19 |
import logging
from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock, patch, ANY
from uuid import uuid1
from log_decorator import log
# noinspection DuplicatedCode
# noinspection DuplicatedCode
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
6139,
44,
735,
11,
8529,
11,
15529,
198,
6738,
334,
27112,
1330,
334,
27112,
16,
198,
1... | 3.306667 | 75 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 1728.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/5/10 22:30
------------
"""
import collections
from typing import List
if __name__ == '__main__':
s = Solution()
print(s.canMouseWin(["####F","#C...","M...."], catJump = 1, mouseJump = 2))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
8979,
220,
220,
220,
1058,
220,
220,
1596,
2078,
13,
9078,
220,
220,
220,
220,
198,
31,
17829,
1058,
... | 2.331081 | 148 |
# asynctest.py Demo of asynchronous code scheduling tasks with cron
# Copyright (c) 2020 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import uasyncio as asyncio
from sched.sched import schedule
from time import localtime
try:
asyncio.run(main())
finally:
_ = asyncio.new_event_loop()
| [
2,
355,
2047,
310,
395,
13,
9078,
34588,
286,
39354,
2438,
26925,
8861,
351,
1067,
261,
198,
198,
2,
15069,
357,
66,
8,
12131,
5613,
367,
8589,
198,
2,
28728,
739,
262,
17168,
13789,
357,
36393,
8,
532,
766,
38559,
24290,
2393,
198,... | 3.168317 | 101 |
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
视频的读写与处理
FPS即每秒多少帧的处理能力,一般情况下(人类对300毫秒以下的变动是无法察觉的)每秒大于5帧的处理可以认为是视屏处理
"""
if "__main__" == __name__:
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
28,
40477,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
269,
85,
17,
355,
269,
85,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
... | 1.040541 | 222 |
# todo: 后续需要重新规划major code和mirror code,前期先这样写
class AlgorithmOperatorException(CustomException):
"""
算子异常基类
"""
MAJOR_CODE = 1
class AlgorithmInputParameterException(AlgorithmOperatorException):
"""
算子入参异常
"""
MIRROR_CODE = 11
class InputParameterAbnormalException(AlgorithmInputParameterException):
"""
输入参数不合理
"""
MIRROR_CODE = 14
class InputParameterAbsentException(AlgorithmInputParameterException):
"""
输入参数缺失
"""
MIRROR_CODE = 15
class AlgorithmRuntimeException(CustomException):
"""
算子运行时错误
"""
MAJOR_CODE = 2
class NetworkInputParameterException(AlgorithmRuntimeException):
"""
网络入参异常
"""
MIRROR_CODE = 11
class NetworkInitFailException(AlgorithmRuntimeException):
"""
网络初始化异常
"""
MIRROR_CODE = 21
class ConsumerComputeException(CustomException):
"""
消费者端出现的错误
"""
MAJOR_CODE = 3
class ConsumerAlgorithmQueryException(ConsumerComputeException):
"""
请求消费者端结果异常
"""
MIRROR_CODE = 11
class ConsumerAlgorithmRuntimeException(ConsumerComputeException):
"""
消费者端计算错误
"""
MIRROR_CODE = 12
class ConsumerAlgorithmTimeoutException(ConsumerComputeException):
"""
消费者端计算超时
"""
MIRROR_CODE = 13
class ConsumerAlgorithmUncatchException(ConsumerComputeException):
"""
消费者端无法捕捉的错误
"""
MIRROR_CODE = 99
class DeepLearningInferenceException(CustomException):
"""
深度学习推理的异常
"""
MAJOR_CODE = 3
class TritonUncatchException(DeepLearningInferenceException):
"""
Triton推理无法捕捉的异常
"""
MIRROR_CODE = 1
class NCNNUncatchaException(DeepLearningInferenceException):
"""
NCNN推理无法捕捉的异常
"""
MIRROR_CODE = 2
class InferenceTensorCheckFailException(DeepLearningInferenceException):
"""
推理时的tensor有效性检查失败
"""
MIRROR_CODE = 3
class TritonServerCannotConnectException(DeepLearningInferenceException):
"""
无法连接Triton服务器
"""
MIRROR_CODE = 11
class TritonServerNotReadyException(DeepLearningInferenceException):
"""
Triton服务器还未准备好,可能在启动中
"""
MIRROR_CODE = 12
class TritonModelNotReadyException(DeepLearningInferenceException):
"""
Triton上面的特定模型还未加载完成
"""
MIRROR_CODE = 13
class ImageFileSizeAbnormalException(GeneralException):
"""
图像文件大小不正常
"""
MIRROR_CODE = 1
class ObjectNotFoundOnOSS(GeneralException):
"""
OSS上未找到指定目标
"""
MIRROR_CODE = 2
class ImageDownloadTimeoutException(GeneralException):
"""
图像下载超时
"""
MIRROR_CODE = 3
class ImageFormatNotSupportException(GeneralException):
"""
图像格式不支持
"""
MIRROR_CODE = 4
class DownloadURLNotAvailableException(GeneralException):
"""
下载链接不可用
"""
MIRROR_CODE = 5
class DownloadURLTimeoutException(GeneralException):
"""
下载链接超时
"""
MIRROR_CODE = 6
class ImageClassNotSupportToEncodeException(GeneralException):
"""
OSS在进行图像编码的时候,格式不支持
"""
MIRROR_CODE = 7
class VideoExtractMethodNotSupportException(GeneralException):
"""
视频帧提取的方法不支持
"""
MIRROR_CODE = 8
class PreviousTaskNotFinishException(GeneralException):
"""
前置任务未完成异常
"""
MIRROR_CODE = 9
class RetryExceedLimitException(GeneralException):
"""
任务重试超过限制次数
"""
MIRROR_CODE = 10
class DAGAbortException(GeneralException):
"""
DAG中某个具有关联任务的service task异常,导致DAG无法正确运行
"""
MIRROR_CODE = 11
class VectorSearchException(CustomException):
"""
图像搜索的异常
"""
MAJOR_CODE = 5
class MilvusRuntimeException(CustomException):
"""
Milvus异常
"""
MIRROR_CODE = 10
class DatabaseNotExist(VectorSearchException):
"""
数据库不存在
"""
MIRROR_CODE = 1
| [
2,
284,
4598,
25,
10263,
238,
236,
163,
119,
255,
165,
250,
222,
17358,
223,
34932,
235,
23877,
108,
164,
100,
226,
161,
7134,
22478,
2438,
161,
240,
234,
10793,
1472,
2438,
171,
120,
234,
30298,
235,
17312,
253,
17739,
230,
32573,
... | 1.866732 | 2,026 |
from pyspark.sql import SparkSession
import os
| [
6738,
279,
893,
20928,
13,
25410,
1330,
17732,
36044,
198,
198,
11748,
28686,
628
] | 3.5 | 14 |
import logging
import os
import tempfile
from contextlib import contextmanager
from ctypes import Structure, c_int, c_uint32, c_char_p, c_void_p, pointer, CDLL, RTLD_GLOBAL
from dataclasses import dataclass
from typing import ByteString
from erica.config import get_settings, Settings
from erica.pyeric.eric_errors import check_result, check_handle, check_xml, EricWrongTaxNumberError
logger = logging.getLogger('eric')
@dataclass
# As explained in the original ERiC documentation
# As explained in the original ERiC documentation
# TODO: Unify usage of EricWrapper; rethink having eric_wrapper as a parameter
@contextmanager
def get_eric_wrapper():
"""This context manager returns an initialised eric wrapper; it will ensure that the ERiC API is shutdown after
use. """
eric = EricWrapper()
with tempfile.TemporaryDirectory() as tmp_dir:
eric.initialise(log_path=tmp_dir)
try:
yield eric
finally:
eric.shutdown()
with open(os.path.join(tmp_dir, 'eric.log')) as eric_log:
logger.debug(eric_log.read())
def verify_using_stick():
"""Calls into eric to verify whether we are using a token of type "Stick"."""
with get_eric_wrapper() as eric_wrapper:
try:
cert_properties = eric_wrapper.get_cert_properties()
return "<TokenTyp>Stick</TokenTyp>" in cert_properties
except Exception as e:
logger.debug("Exception while trying to verify Stick", exc_info=e)
return False
class EricWrapper(object):
"""A Python wrapper for the native ERiC library. It uses `ctypes` for calling
the respective functions of the `.so` file.
"""
ERIC_VALIDIERE = 1 << 1
ERIC_SENDE = 1 << 2
ERIC_DRUCKE = 1 << 5
cert_path = get_settings().get_cert_path().encode()
cert_pin = get_settings().cert_pin
def __init__(self):
"""Creates a new instance of the pyeric wrapper.
"""
self.eric = CDLL(Settings.get_eric_dll_path(), RTLD_GLOBAL)
self.eric_instance = None
logger.debug(f"eric: {self.eric}")
def initialise(self, log_path=None):
"""Initialises ERiC and a successful return from this method shall indicate
that the .so file was found and loaded successfully. Where `initialise` is called,
`shutdown` shall be called when done.
"""
fun_init = self.eric.EricMtInstanzErzeugen
fun_init.argtypes = [c_char_p, c_char_p]
fun_init.restype = c_void_p
curr_dir = os.path.dirname(os.path.realpath(__file__))
plugin_path = c_char_p(os.path.join(curr_dir, "../lib/plugins2").encode())
log_path = c_char_p(log_path.encode() if log_path else None)
self.eric_instance = fun_init(plugin_path, log_path)
logger.debug(f"fun_init instance: {self.eric_instance}")
def shutdown(self):
"""Shuts down ERiC and releases resources. One must not use the object afterwards."""
fun_shutdown = self.eric.EricMtInstanzFreigeben
fun_shutdown.argtypes = [c_void_p]
fun_shutdown.restype = c_int
res = fun_shutdown(self.eric_instance)
check_result(res)
logger.debug(f"fun_shutdown res: {res}")
def validate(self, xml, data_type_version):
"""Validate the given XML using the built-in plausibility checks."""
return self.process(xml, data_type_version, EricWrapper.ERIC_VALIDIERE)
def validate_and_send(self, xml, data_type_version):
"""Validate and (more importantly) send the given XML using the built-in
plausibility checks. For this a test certificate and pin must be provided and the
`data_type_version` shall match the XML data. When a `print_path` is given, a PDF
will be created under that path."""
with tempfile.NamedTemporaryFile() as temporary_pdf_file:
print_params = self.alloc_eric_druck_parameter_t(temporary_pdf_file.name)
cert_handle = self.get_cert_handle()
try:
cert_params = self.alloc_eric_verschluesselungs_parameter_t(cert_handle)
flags = EricWrapper.ERIC_SENDE | EricWrapper.ERIC_DRUCKE
eric_result = self.process(
xml, data_type_version,
flags,
cert_params=pointer(cert_params),
print_params=pointer(print_params))
temporary_pdf_file.seek(0)
eric_result.pdf = temporary_pdf_file.read()
return eric_result
finally:
self.close_cert_handle(cert_handle)
@staticmethod
@staticmethod
def process_verfahren(self, xml_string, verfahren, abruf_code=None, transfer_handle=None) \
-> EricResponse:
""" Send the xml_string to Elster with given verfahren and certificate parameters. """
cert_handle = self.get_cert_handle()
cert_params = self.alloc_eric_verschluesselungs_parameter_t(cert_handle, abrufCode=abruf_code)
try:
return self.process(xml_string, verfahren, EricWrapper.ERIC_SENDE | EricWrapper.ERIC_VALIDIERE,
transfer_handle=transfer_handle, cert_params=pointer(cert_params))
finally:
self.close_cert_handle(cert_handle)
def get_tax_offices(self, state_id):
"""
Get all the tax offices for a specific state
:param state_id: A valid state id for which the tax office list is provided
"""
fun_get_tax_offices = self.eric.EricMtHoleFinanzaemter
fun_get_tax_offices.argtypes = [c_void_p, c_char_p, c_void_p]
fun_get_tax_offices.restype = int
return self._call_and_return_buffer_contents_and_decode(
fun_get_tax_offices,
state_id.encode())
def get_state_id_list(self):
"""
Get a list of all the state codes
"""
fun_get_tax_offices = self.eric.EricMtHoleFinanzamtLandNummern
fun_get_tax_offices.argtypes = [c_void_p, c_void_p]
fun_get_tax_offices.restype = int
return self._call_and_return_buffer_contents_and_decode(
fun_get_tax_offices)
def _call_and_return_buffer_contents(self, function, *args):
"""
:param function: The ERIC function to be called. The argtypes and restype have to be set before.
"""
buf = self.create_buffer()
try:
res = function(self.eric_instance, *args, buf)
check_result(res)
logger.debug(f"function {function.__name__} from _call_and_return_buffer_contents res {res}")
returned_xml = self.read_buffer(buf)
check_xml(returned_xml)
return returned_xml
finally:
self.close_buffer(buf)
def _call_and_return_buffer_contents_and_decode(self, function, *args):
"""
This calls the ERIC function, reads the buffer and decodes the returned_xml.
:param function: The ERIC function to be called. The argtypes and restype have to be set before.
"""
return self._call_and_return_buffer_contents(function, *args).decode()
def get_error_message_from_xml_response(self, xml_response):
"""Extract error message from server response"""
fun_get_error_message = self.eric.EricMtGetErrormessagesFromXMLAnswer
fun_get_error_message.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p, c_void_p, c_void_p]
fun_get_error_message.restypes = int
transferticket_buffer = self.create_buffer()
th_res_code_buffer = self.create_buffer()
th_error_message_buffer = self.create_buffer()
ndh_err_xml_buffer = self.create_buffer()
try:
res_code = fun_get_error_message(self.eric_instance,
xml_response,
transferticket_buffer,
th_res_code_buffer,
th_error_message_buffer,
ndh_err_xml_buffer)
check_result(res_code)
transferticket = self.read_buffer(transferticket_buffer).decode()
th_res_code = self.read_buffer(th_res_code_buffer).decode()
th_error_message = self.read_buffer(th_error_message_buffer).decode()
ndh_err_xml = self.read_buffer(ndh_err_xml_buffer).decode()
finally:
self.close_buffer(ndh_err_xml_buffer)
self.close_buffer(th_error_message_buffer)
self.close_buffer(th_res_code_buffer)
self.close_buffer(transferticket_buffer)
return transferticket, th_res_code, th_error_message, ndh_err_xml
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
269,
19199,
1330,
32522,
11,
269,
62,
600,
11,
269,
62,
28611,
2624,
11,
269,
62,
10641,
62,
79,
11,
269,
62,
19382,
62,
... | 2.255188 | 3,903 |
import numpy as np
from abstractclasses import solver, solver_model
"""
Matrix multiplcation solves for the distance between two points in 2 or 3
dimensional space.
"""
# ————————————————————————————————————————————————
# MATRIX MULTIPLICATION SOLVER CLASS
# ————————————————————————————————————————————————
# ————————————————————————————————————————————————
# MATRIX MULTIPLICATION MODEL CLASS
# ————————————————————————————————————————————————
| [
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
6738,
12531,
37724,
1330,
1540,
332,
11,
1540,
332,
62,
19849,
201,
198,
201,
198,
37811,
201,
198,
46912,
15082,
30907,
39107,
329,
262,
5253,
1022,
734,
2173,
287,
362,
393,
513,
20... | 3.603053 | 131 |
import os
import pickle
from gensim.models.phrases import Phraser, Phrases
from gensim.utils import SaveLoad as gensimSaveLoad
from typing import List, Text, Any
PathType = str
# TODO: Search truth typing of phraser
PhraserType = Any
# %%
# %%
# %%
# %%
# %%
# print("Saving done")
# %%
# %%
# %%
if __name__ == '__main__':
main()
# %%
| [
11748,
28686,
198,
11748,
2298,
293,
198,
198,
6738,
308,
641,
320,
13,
27530,
13,
746,
81,
1386,
1330,
1380,
81,
6005,
11,
1380,
81,
1386,
198,
6738,
308,
641,
320,
13,
26791,
1330,
12793,
8912,
355,
308,
641,
320,
16928,
8912,
198... | 2.578571 | 140 |
from .helper_class import AIBC
| [
6738,
764,
2978,
525,
62,
4871,
1330,
9552,
2749,
628
] | 3.2 | 10 |
"""Problem 113
10 February 2006
Working from left-to-right if no digit is exceeded by the digit to its
left it is called an increasing number; for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is
called a decreasing number; for example, 66420.
We shall call a positive integer that is neither increasing nor
decreasing a "bouncy" number; for example, 155349.
As n increases, the proportion of bouncy numbers below n increases
such that there are only 12951 numbers below one-million that are not
bouncy and only 277032 non-bouncy numbers below 10^10.
How many numbers below a googol (10^100) are not bouncy?
"""
from eulerlib import timedRun
import math
#http://pt.wikipedia.org/wiki/Arranjo_(matem%C3%A1tica)#Combina.C3.A7.C3.A3o_simples
#http://pt.wikipedia.org/wiki/Arranjo_(matem%C3%A1tica)#Combina.C3.A7.C3.A3o_com_repeti.C3.A7.C3.A3o
if __name__ == "__main__":
timedRun(euler113)
| [
37811,
40781,
17318,
198,
940,
3945,
4793,
198,
198,
28516,
422,
1364,
12,
1462,
12,
3506,
611,
645,
16839,
318,
20672,
416,
262,
16839,
284,
663,
198,
9464,
340,
318,
1444,
281,
3649,
1271,
26,
329,
1672,
11,
1511,
2598,
3104,
13,
... | 2.906542 | 321 |
totalGasto = 0
maisdeMil = 0
maisBarato = ['', 1000000.00]
while True:
nome = str(input('nome do produto: '))
preço = float(input('preço do produto: '))
totalGasto += preço
if preço > 1000:
maisdeMil += 1
if preço < maisBarato[1]:
maisBarato[0] = nome
maisBarato[1] = preço
cont = str(input('Quer continuar(s/n)? ')).upper()
if cont == 'N':
break
print("-"*20)
print("Compras encerradas.")
print("O total gasto na compra foi de R${}.".format(totalGasto))
print("{} produtos custasm mais de R$1000.00.".format(maisdeMil))
print("O produto mais barato é {} no valor de {}.".format(maisBarato[0], maisBarato[1]))
| [
23350,
38,
459,
78,
796,
657,
198,
2611,
271,
2934,
24857,
796,
657,
198,
2611,
271,
10374,
5549,
796,
37250,
3256,
1802,
2388,
13,
405,
60,
198,
4514,
6407,
25,
198,
220,
220,
220,
299,
462,
796,
965,
7,
15414,
10786,
77,
462,
46... | 2.168285 | 309 |
__author__ = 'Simon Birrer'
"""
this file contains standard routines
"""
import numpy as np
import mpmath
import itertools
from lenstronomy.Util.numba_util import jit
from lenstronomy.Util.package_util import exporter
export, __all__ = exporter()
@export
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
@export
@export
def sort_image_index(ximg, yimg, xref, yref):
"""
:param ximg: x coordinates to sort
:param yimg: y coordinates to sort
:param xref: reference x coordinate
:param yref: reference y coordinate
:return: indexes such that ximg[indexes],yimg[indexes] matches xref,yref
"""
assert len(xref) == len(ximg)
ximg, yimg = np.array(ximg), np.array(yimg)
x_self = np.array(list(itertools.permutations(ximg)))
y_self = np.array(list(itertools.permutations(yimg)))
indexes = [0, 1, 2, 3]
index_iterations = list(itertools.permutations(indexes))
delta_r = []
for i in range(0, int(len(x_self))):
dr = 0
for j in range(0, int(len(x_self[0]))):
dr += (x_self[i][j] - xref[j]) ** 2 + (y_self[i][j] - yref[j]) ** 2
delta_r.append(dr ** .5)
min_indexes = np.array(index_iterations[np.argmin(delta_r)])
return min_indexes
@export
@jit()
def rotate(xcoords, ycoords, angle):
"""
:param xcoords: x points
:param ycoords: y points
:param angle: angle in radians
:return: x points and y points rotated ccw by angle theta
"""
return xcoords * np.cos(angle) + ycoords * np.sin(angle), -xcoords * np.sin(angle) + ycoords * np.cos(angle)
@export
def map_coord2pix(ra, dec, x_0, y_0, M):
"""
this routines performs a linear transformation between two coordinate systems. Mainly used to transform angular
into pixel coordinates in an image
:param ra: ra coordinates
:param dec: dec coordinates
:param x_0: pixel value in x-axis of ra,dec = 0,0
:param y_0: pixel value in y-axis of ra,dec = 0,0
:param M: 2x2 matrix to transform angular to pixel coordinates
:return: transformed coordinate systems of input ra and dec
"""
x, y = M.dot(np.array([ra, dec]))
return x + x_0, y + y_0
@export
def array2image(array, nx=0, ny=0):
"""
returns the information contained in a 1d array into an n*n 2d array
(only works when length of array is n**2, or nx and ny are provided)
:param array: image values
:type array: array of size n**2
:returns: 2d array
:raises: AttributeError, KeyError
"""
if nx == 0 or ny == 0:
n = int(np.sqrt(len(array)))
if n ** 2 != len(array):
raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(array)))
nx, ny = n, n
image = array.reshape(int(nx), int(ny))
return image
@export
def image2array(image):
"""
returns the information contained in a 2d array into an n*n 1d array
:param array: image values
:type array: array of size (n,n)
:returns: 1d array
:raises: AttributeError, KeyError
"""
nx, ny = image.shape # find the size of the array
imgh = np.reshape(image, nx * ny) # change the shape to be 1d
return imgh
@export
def array2cube(array, n_1, n_23):
"""
returns the information contained in a 1d array of shape (n_1*n_23*n_23) into 3d array with shape (n_1, sqrt(n_23), sqrt(n_23))
:param array: image values
:type array: 1d array
:param n_1: first dimension of returned array
:type int:
:param n_23: square of second and third dimensions of returned array
:type int:
:returns: 3d array
:raises ValueError: when n_23 is not a perfect square
"""
n = int(np.sqrt(n_23))
if n ** 2 != n_23:
raise ValueError("2nd and 3rd dims (%s) are not square of integer number!" % n_23)
n_2, n_3 = n, n
cube = array.reshape(n_1, n_2, n_3)
return cube
@export
def cube2array(cube):
"""
returns the information contained in a 3d array of shape (n_1, n_2, n_3) into 1d array with shape (n_1*n_2*n_3)
:param array: image values
:type array: 3d array
:returns: 1d array
"""
n_1, n_2, n_3 = cube.shape
array = cube.reshape(n_1 * n_2 * n_3)
return array
@export
def make_grid(numPix, deltapix, subgrid_res=1, left_lower=False):
"""
creates pixel grid (in 1d arrays of x- and y- positions)
default coordinate frame is such that (0,0) is in the center of the coordinate grid
:param numPix: number of pixels per axis
Give an integers for a square grid, or a 2-length sequence
(first, second axis length) for a non-square grid.
:param deltapix: pixel size
:param subgrid_res: sub-pixel resolution (default=1)
:return: x, y position information in two 1d arrays
"""
# Check numPix is an integer, or 2-sequence of integers
if isinstance(numPix, (tuple, list, np.ndarray)):
assert len(numPix) == 2
if any(x != round(x) for x in numPix):
raise ValueError("numPix contains non-integers: %s" % numPix)
numPix = np.asarray(numPix, dtype=np.int)
else:
if numPix != round(numPix):
raise ValueError("Attempt to specify non-int numPix: %s" % numPix)
numPix = np.array([numPix, numPix], dtype=np.int)
# Super-resolution sampling
numPix_eff = (numPix * subgrid_res).astype(np.int)
deltapix_eff = deltapix / float(subgrid_res)
# Compute unshifted grids.
# X values change quickly, Y values are repeated many times
x_grid = np.tile(np.arange(numPix_eff[0]), numPix_eff[1]) * deltapix_eff
y_grid = np.repeat(np.arange(numPix_eff[1]), numPix_eff[0]) * deltapix_eff
if left_lower is True:
# Shift so (0, 0) is in the "lower left"
# Note this does not shift when subgrid_res = 1
shift = -1. / 2 + 1. / (2 * subgrid_res) * np.array([1, 1])
else:
# Shift so (0, 0) is centered
shift = deltapix_eff * (numPix_eff - 1) / 2
return x_grid - shift[0], y_grid - shift[1]
@export
def make_grid_transformed(numPix, Mpix2Angle):
"""
returns grid with linear transformation (deltaPix and rotation)
:param numPix: number of Pixels
:param Mpix2Angle: 2-by-2 matrix to mat a pixel to a coordinate
:return: coordinate grid
"""
x_grid, y_grid = make_grid(numPix, deltapix=1)
ra_grid, dec_grid = map_coord2pix(x_grid, y_grid, 0, 0, Mpix2Angle)
return ra_grid, dec_grid
@export
def make_grid_with_coordtransform(numPix, deltapix, subgrid_res=1, center_ra=0, center_dec=0, left_lower=False,
inverse=True):
"""
same as make_grid routine, but returns the transformation matrix and shift between coordinates and pixel
:param numPix: number of pixels per axis
:param deltapix: pixel scale per axis
:param subgrid_res: supersampling resolution relative to the stated pixel size
:param center_ra: center of the grid
:param center_dec: center of the grid
:param left_lower: sets the zero point at the lower left corner of the pixels
:param inverse: bool, if true sets East as left, otherwise East is righrt
:return:
"""
numPix_eff = numPix * subgrid_res
deltapix_eff = deltapix / float(subgrid_res)
a = np.arange(numPix_eff)
matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2)
if inverse is True:
delta_x = -deltapix_eff
else:
delta_x = deltapix_eff
if left_lower is True:
ra_grid = matrix[:, 0] * delta_x
dec_grid = matrix[:, 1] * deltapix_eff
else:
ra_grid = (matrix[:, 0] - (numPix_eff - 1) / 2.) * delta_x
dec_grid = (matrix[:, 1] - (numPix_eff - 1) / 2.) * deltapix_eff
shift = (subgrid_res - 1) / (2. * subgrid_res) * deltapix
ra_grid -= shift + center_ra
dec_grid -= shift + center_dec
ra_at_xy_0 = ra_grid[0]
dec_at_xy_0 = dec_grid[0]
Mpix2coord = np.array([[delta_x, 0], [0, deltapix_eff]])
Mcoord2pix = np.linalg.inv(Mpix2coord)
x_at_radec_0, y_at_radec_0 = map_coord2pix(-ra_at_xy_0, -dec_at_xy_0, x_0=0, y_0=0, M=Mcoord2pix)
return ra_grid, dec_grid, ra_at_xy_0, dec_at_xy_0, x_at_radec_0, y_at_radec_0, Mpix2coord, Mcoord2pix
@export
def grid_from_coordinate_transform(nx, ny, Mpix2coord, ra_at_xy_0, dec_at_xy_0):
"""
return a grid in x and y coordinates that satisfy the coordinate system
:param nx: number of pixels in x-axis
:param ny: number of pixels in y-axis
:param Mpix2coord: transformation matrix (2x2) of pixels into coordinate displacements
:param ra_at_xy_0: RA coordinate at (x,y) = (0,0)
:param dec_at_xy_0: DEC coordinate at (x,y) = (0,0)
:return: RA coordinate grid, DEC coordinate grid
"""
a = np.arange(nx)
b = np.arange(ny)
matrix = np.dstack(np.meshgrid(a, b)).reshape(-1, 2)
x_grid = matrix[:, 0]
y_grid = matrix[:, 1]
ra_grid = x_grid * Mpix2coord[0, 0] + y_grid * Mpix2coord[0, 1] + ra_at_xy_0
dec_grid = x_grid * Mpix2coord[1, 0] + y_grid * Mpix2coord[1, 1] + dec_at_xy_0
return ra_grid, dec_grid
@export
def get_axes(x, y):
"""
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
"""
n = int(np.sqrt(len(x)))
if n ** 2 != len(x):
raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x)))
x_image = x.reshape(n, n)
y_image = y.reshape(n, n)
x_axes = x_image[0, :]
y_axes = y_image[:, 0]
return x_axes, y_axes
@export
def averaging(grid, numGrid, numPix):
"""
resize 2d pixel grid with numGrid to numPix and averages over the pixels
:param grid: higher resolution pixel grid
:param numGrid: number of pixels per axis in the high resolution input image
:param numPix: lower number of pixels per axis in the output image (numGrid/numPix is integer number)
:return:
"""
Nbig = numGrid
Nsmall = numPix
small = grid.reshape([int(Nsmall), int(Nbig / Nsmall), int(Nsmall), int(Nbig / Nsmall)]).mean(3).mean(1)
return small
@export
def displaceAbs(x, y, sourcePos_x, sourcePos_y):
"""
calculates a grid of distances to the observer in angel
:param mapped_cartcoord: mapped cartesian coordinates
:type mapped_cartcoord: numpy array (n,2)
:param sourcePos: source position
:type sourcePos: numpy vector [x0,y0]
:returns: array of displacement
:raises: AttributeError, KeyError
"""
x_mapped = x - sourcePos_x
y_mapped = y - sourcePos_y
absmapped = np.sqrt(x_mapped ** 2 + y_mapped ** 2)
return absmapped
@export
def get_distance(x_mins, y_mins, x_true, y_true):
"""
:param x_mins:
:param y_mins:
:param x_true:
:param y_true:
:return:
"""
if len(x_mins) != len(x_true):
return 10 ** 10
dist = 0
x_true_list = np.array(x_true)
y_true_list = np.array(y_true)
for i in range(0, len(x_mins)):
dist_list = (x_mins[i] - x_true_list) ** 2 + (y_mins[i] - y_true_list) ** 2
dist += min(dist_list)
k = np.where(dist_list == min(dist_list))
if type(k) != int:
k = k[0]
x_true_list = np.delete(x_true_list, k)
y_true_list = np.delete(y_true_list, k)
return dist
@export
def compare_distance(x_mapped, y_mapped):
"""
:param x_mapped: array of x-positions of remapped catalogue image
:param y_mapped: array of y-positions of remapped catalogue image
:return: sum of distance square of positions
"""
X2 = 0
for i in range(0, len(x_mapped) - 1):
for j in range(i + 1, len(x_mapped)):
dx = x_mapped[i] - x_mapped[j]
dy = y_mapped[i] - y_mapped[j]
X2 += dx ** 2 + dy ** 2
return X2
@export
def min_square_dist(x_1, y_1, x_2, y_2):
"""
return minimum of quadratic distance of pairs (x1, y1) to pairs (x2, y2)
:param x_1:
:param y_1:
:param x_2:
:param y_2:
:return:
"""
dist = np.zeros_like(x_1)
for i in range(len(x_1)):
dist[i] = np.min((x_1[i] - x_2) ** 2 + (y_1[i] - y_2) ** 2)
return dist
@export
def selectBest(array, criteria, numSelect, highest=True):
"""
:param array: numpy array to be selected from
:param criteria: criteria of selection
:param highest: bool, if false the lowest will be selected
:param numSelect: number of elements to be selected
:return:
"""
n = len(array)
m = len(criteria)
if n != m:
raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m))
if n < numSelect:
return array
array_sorted = array[criteria.argsort()]
if highest:
result = array_sorted[n - numSelect:]
else:
result = array_sorted[0:numSelect]
return result[::-1]
@export
def select_best(array, criteria, num_select, highest=True):
"""
:param array: numpy array to be selected from
:param criteria: criteria of selection
:param highest: bool, if false the lowest will be selected
:param num_select: number of elements to be selected
:return:
"""
n = len(array)
m = len(criteria)
if n != m:
raise ValueError('Elements in array (%s) not equal to elements in criteria (%s)' % (n, m))
if n < num_select:
return array
array = np.array(array)
if highest is True:
indexes = criteria.argsort()[::-1][:num_select]
else:
indexes = criteria.argsort()[::-1][n - num_select:]
return array[indexes]
@export
def points_on_circle(radius, num_points):
"""
returns a set of uniform points around a circle
:param radius: radius of the circle
:param num_points: number of points on the circle
:return:
"""
angle = np.linspace(0, 2 * np.pi, num_points)
x_coord = np.cos(angle) * radius
y_coord = np.sin(angle) * radius
return x_coord, y_coord
@export
@jit()
def neighborSelect(a, x, y):
"""
#TODO replace by from scipy.signal import argrelextrema for speed up
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
finds (local) minima in a 2d grid
:param a: 1d array of displacements from the source positions
:type a: numpy array with length numPix**2 in float
:returns: array of indices of local minima, values of those minima
:raises: AttributeError, KeyError
"""
dim = int(np.sqrt(len(a)))
values = []
x_mins = []
y_mins = []
for i in range(dim + 1, len(a) - dim - 1):
if (a[i] < a[i - 1]
and a[i] < a[i + 1]
and a[i] < a[i - dim]
and a[i] < a[i + dim]
and a[i] < a[i - (dim - 1)]
and a[i] < a[i - (dim + 1)]
and a[i] < a[i + (dim - 1)]
and a[i] < a[i + (dim + 1)]):
if (a[i] < a[(i - 2 * dim - 1) % dim ** 2]
and a[i] < a[(i - 2 * dim + 1) % dim ** 2]
and a[i] < a[(i - dim - 2) % dim ** 2]
and a[i] < a[(i - dim + 2) % dim ** 2]
and a[i] < a[(i + dim - 2) % dim ** 2]
and a[i] < a[(i + dim + 2) % dim ** 2]
and a[i] < a[(i + 2 * dim - 1) % dim ** 2]
and a[i] < a[(i + 2 * dim + 1) % dim ** 2]
and a[i] < a[(i + 2 * dim) % dim ** 2]
and a[i] < a[(i - 2 * dim) % dim ** 2]
and a[i] < a[(i - 2) % dim ** 2]
and a[i] < a[(i + 2) % dim ** 2]):
x_mins.append(x[i])
y_mins.append(y[i])
values.append(a[i])
return np.array(x_mins), np.array(y_mins), np.array(values)
@export
def fwhm2sigma(fwhm):
"""
:param fwhm: full-widt-half-max value
:return: gaussian sigma (sqrt(var))
"""
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
return sigma
@export
def sigma2fwhm(sigma):
"""
:param sigma:
:return:
"""
fwhm = sigma * (2 * np.sqrt(2 * np.log(2)))
return fwhm
@export
def hyper2F2_array(a, b, c, d, x):
"""
:param a:
:param b:
:param c:
:param d:
:param x:
:return:
"""
if isinstance(x, int) or isinstance(x, float):
out = mpmath.hyp2f2(a, b, c, d, x)
else:
n = len(x)
out = np.zeros(n)
for i in range(n):
out[i] = mpmath.hyp2f2(a, b, c, d, x[i])
return out
@export
def make_subgrid(ra_coord, dec_coord, subgrid_res=2):
"""
return a grid with subgrid resolution
:param ra_coord:
:param dec_coord:
:param subgrid_res:
:return:
"""
ra_array = array2image(ra_coord)
dec_array = array2image(dec_coord)
n = len(ra_array)
d_ra_x = ra_array[0][1] - ra_array[0][0]
d_ra_y = ra_array[1][0] - ra_array[0][0]
d_dec_x = dec_array[0][1] - dec_array[0][0]
d_dec_y = dec_array[1][0] - dec_array[0][0]
ra_array_new = np.zeros((n * subgrid_res, n * subgrid_res))
dec_array_new = np.zeros((n * subgrid_res, n * subgrid_res))
for i in range(0, subgrid_res):
for j in range(0, subgrid_res):
ra_array_new[i::subgrid_res, j::subgrid_res] = ra_array + d_ra_x * (
-1 / 2. + 1 / (2. * subgrid_res) + j / float(subgrid_res)) + d_ra_y * (
-1 / 2. + 1 / (2. * subgrid_res) + i / float(
subgrid_res))
dec_array_new[i::subgrid_res, j::subgrid_res] = dec_array + d_dec_x * (
-1 / 2. + 1 / (2. * subgrid_res) + j / float(subgrid_res)) + d_dec_y * (
-1 / 2. + 1 / (2. * subgrid_res) + i / float(
subgrid_res))
ra_coords_sub = image2array(ra_array_new)
dec_coords_sub = image2array(dec_array_new)
return ra_coords_sub, dec_coords_sub
@export
def convert_bool_list(n, k=None):
"""
returns a bool list of the length of the lens models
if k = None: returns bool list with True's
if k is int, returns bool list with False's but k'th is True
if k is a list of int, e.g. [0, 3, 5], returns a bool list with True's in the integers listed and False elsewhere
if k is a boolean list, checks for size to match the numbers of models and returns it
:param n: integer, total lenght of output boolean list
:param k: None, int, or list of ints
:return: bool list
"""
if k is None:
bool_list = [True] * n
elif isinstance(k, (int, np.integer)): # single integer
bool_list = [False] * n
bool_list[k] = True
elif len(k) == 0: # empty list
bool_list = [False] * n
elif isinstance(k[0], bool):
if n != len(k):
raise ValueError('length of selected lens models in format of boolean list is %s '
'and does not match the models of this class instance %s.' % (len(k), n))
bool_list = k
elif isinstance(k[0], (int, np.integer)): # list of integers
bool_list = [False] * n
for i, k_i in enumerate(k):
if k_i is not False:
# if k_i is True:
# bool_list[i] = True
if k_i < n:
bool_list[k_i] = True
else:
raise ValueError("k as set by %s is not convertable in a bool string!" % k)
else:
raise ValueError('input list k as %s not compatible' % k)
return bool_list | [
834,
9800,
834,
796,
705,
35475,
12817,
11751,
6,
198,
198,
37811,
198,
5661,
2393,
4909,
3210,
31878,
198,
37811,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
285,
4426,
776,
198,
11748,
340,
861,
10141,
198,
6738,
18896,
301,
... | 2.188349 | 9,201 |
"""
This code is based on code from voila, copied under the following license:
BSD License
Copyright (c) 2018 Voila contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
a. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
b. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
c. Neither the name of the authors nor the names of the contributors to
this package may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
import os
import gettext
from jinja2 import Environment, FileSystemLoader
import tornado
from jupyter_server.utils import url_path_join
from jupyter_server.base.handlers import path_regex
from jupyter_server.base.handlers import FileFindHandler
from voila.paths import ROOT, STATIC_ROOT, collect_template_paths, jupyter_path
from voila.handler import VoilaHandler
from voila.treehandler import VoilaTreeHandler
from voila.static_file_handler import MultiStaticFileHandler, WhiteListFileHandler
from voila.configuration import VoilaConfiguration
HERE = os.path.dirname(__file__)
# if the directory above us contains the following paths, it means we are installed in dev mode (pip install -e .)
DEV_MODE = os.path.exists(os.path.join(HERE, "../setup.py")) and os.path.exists(
os.path.join(HERE, "../share")
)
| [
37811,
198,
1212,
2438,
318,
1912,
319,
2438,
422,
7608,
10102,
11,
18984,
739,
262,
1708,
5964,
25,
198,
198,
21800,
13789,
198,
198,
15269,
357,
66,
8,
2864,
569,
9437,
64,
20420,
13,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
39... | 3.444915 | 708 |
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,10)
from distributionLib import Dist
np.random.seed(404)
N = 1000000
data = np.random.choice([0, 1], size=(N,1), p=[0.57, 0.43])
for sample in range(100,10000,500):
ind = np.random.randint(0,N,[sample,1])
Dist.MOE(data[ind])
Dist.MOE(data)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
489,
83,
13,
6015,
10044,
4105,
14692,
26875,
13,
5647,
7857,
8973,
796,
357,
1238,
11,
940,
8,
198,
6738,
6082,
25835,
1330,
4307,
198,... | 2.349315 | 146 |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
# from cities_light.models import Country, Region, City
from .models import (ServiceProvider,
CustomerProfile, RatingAndReview)
class ServiceProviderProfileForm(ModelForm):
"""Generate fields from ServiceProvider model."""
class CustomerProfileForm(ModelForm):
"""Generate fields from CustomerProfile model."""
class CustomerRegistration(UserCreationForm):
"""Customer registration."""
username = forms.CharField(
max_length=100, required=True, help_text=""
)
first_name = forms.CharField(
max_length=100, required=True, help_text=""
)
last_name = forms.CharField(
max_length=100, required=True, help_text=""
)
email = forms.EmailField(
max_length=250, required=True, help_text=""
)
class RatingAndReviewForm(ModelForm):
"""Generate review and rating form model."""
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
1330,
9104,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
23914,
1330,
11787,
12443,
341,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27... | 3 | 345 |
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
def get_network(isamAppliance, application_interface, statistics_duration, check_mode=False, force=False):
"""
Retrieving the Application Interface Statistics
"""
return isamAppliance.invoke_get("Retrieving the Application Interface Statistics",
"/analysis/interface_statistics.json{0}".format(
tools.create_query_string(prefix=application_interface,
timespan=statistics_duration)))
def get_rp_junction(isamAppliance, instance, date, duration, check_mode=False, force=False):
"""
Retrieving junction average response times for a Reverse Proxy instance
"""
return isamAppliance.invoke_get("Retrieving junction average response times for a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/reqtime{0}".format(
tools.create_query_string(date=date,
duration=duration,
instance=instance)))
def get_rp_health_summary(isamAppliance, check_mode=False, force=False):
"""
Retrieving a summary of Reverse Proxy health
"""
return isamAppliance.invoke_get("Retrieving a summary of Reverse Proxy health",
"/wga/widgets/health.json")
def get_rp_throughput_summary(isamAppliance, date, duration, aspect, summary=True, check_mode=False, force=False):
"""
Retrieving a summary of throughput for all Reverse Proxy instances
"""
return isamAppliance.invoke_get("Retrieving a summary of throughput for all Reverse Proxy instances",
"/analysis/reverse_proxy_traffic/throughput/{0}".format(
tools.create_query_string(summary=summary,
date=date,
duration=duration,
aspect=aspect)))
def get_rp_throughput(isamAppliance, instance, date, duration, check_mode=False, force=False):
"""
Retrieving throughput records for a specific Reverse Proxy instance
"""
return isamAppliance.invoke_get("Retrieving throughput records for a specific Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/throughput/{0}{1}".format(instance,
tools.create_query_string(
date=date,
duration=duration)))
def get_rp_traffic_summary(isamAppliance, instance, date, duration, aspect, summary=True, check_mode=False, force=False):
"""
Retrieving a summary of traffic by Junction or User-Agent on a Reverse Proxy instance
"""
return isamAppliance.invoke_get(
"Retrieving a summary of traffic by Junction or User-Agent on a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/traffic/instance/{0}/{1}".format(instance,
tools.create_query_string(summary=summary,
date=date,
duration=duration,
aspect=aspect)))
def get_rp_traffic(isamAppliance, instance, date, duration, aspect, aspect_identifier, check_mode=False, force=False):
"""
Retrieving a summary of traffic records for a specific Junction or User-Agent on a Reverse Proxy instance
"""
return isamAppliance.invoke_get(
"Retrieving a summary of traffic records for a specific Junction or User-Agent on a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/traffic/instance/{0}/{1}/{2}{3}".format(instance, aspect, aspect_identifier,
tools.create_query_string(date=date,
duration=duration)))
def get_rp_traffic_detail(isamAppliance, instance, date, duration, aspect, aspect_identifier, check_mode=False,
force=False):
"""
Retrieving detailed traffic records for a specific Junction or User-Agent on a Reverse Proxy instance
"""
return isamAppliance.invoke_get(
"Retrieving detailed traffic records for a specific Junction or User-Agent on a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/traffic/instance/{0}/{1}/{2}/{3}".format(instance, aspect, aspect_identifier,
tools.create_query_string(date=date,
duration=duration,
aspect=aspect)))
def get_rp_traffic_detail_aspect(isamAppliance, instance, date, duration, aspect, aspect_identifier, check_mode=False,
force=False):
"""
Retrieving detailed traffic records for a specific User-Agent on a specific junction in a Reverse Proxy instance
"""
return isamAppliance.invoke_get(
"Retrieving detailed traffic records for a specific User-Agent on a specific junction in a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/traffic/instance/{0}/{1}/{2}/{1}/{2}{3}".format(instance, aspect,
aspect_identifier,
tools.create_query_string(
date=date,
duration=duration)))
def get_rp_waf_events(isamAppliance, instance, date, duration, type, check_mode=False,
force=False):
"""
Retrieving security action events for a Reverse Proxy instance
"""
return isamAppliance.invoke_get(
"Retrieving security action events for a Reverse Proxy instance",
"/analysis/reverse_proxy_traffic/pam_events{0}".format(
tools.create_query_string(
date=date,
duration=duration,
instance=instance,
type=type)))
def get_cpu(isamAppliance, statistics_duration, check_mode=False, force=False):
"""
Retrieving the CPU Usage Statistics
"""
return isamAppliance.invoke_get(
"Retrieving the CPU Usage Statistics",
"/statistics/systems/cpu.json{0}".format(
tools.create_query_string(
timespan=statistics_duration)))
def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):
"""
Retrieving the Storage Usage Statistics
"""
return isamAppliance.invoke_get(
"Retrieving the Memory Usage Statistics",
"/statistics/systems/memory.json{0}".format(
tools.create_query_string(
timespan=statistics_duration)))
def get_storage(isamAppliance, statistics_duration, check_mode=False, force=False):
"""
Retrieving the Storage Usage Statistics
"""
return isamAppliance.invoke_get(
"Retrieving the Storage Usage Statistics",
"/statistics/systems/storage.json{0}".format(
tools.create_query_string(
timespan=statistics_duration)))
| [
11748,
18931,
198,
6738,
24283,
76,
12961,
13,
315,
2410,
1330,
4899,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628,
198,
4299,
651,
62,
27349,
7,
271,
321,
4677,
75,
3610,
11,
3586,
62,
39994,
... | 1.929767 | 4,300 |
#!/usr/bin/env python
import setuptools
from setuptools import setup
import tarfile
import os, sys
from setupbase import (
UpdateSubmodules,
check_submodule_status,
update_submodules,
require_clean_submodules
)
#-------------------------------------------------------------------------------
# Make sure we aren't trying to run without submodules
#-------------------------------------------------------------------------------
here = os.path.abspath(os.path.dirname(__file__))
require_clean_submodules(here, sys.argv)
from gluon.fileutils import tar, untar, read_file, write_file
def tar(file, filelist, expression='^.+$'):
"""
tars dir/files into file, only tars file that match expression
"""
tar = tarfile.TarFile(file, 'w')
try:
for element in filelist:
try:
for file in listdir(element, expression, add_dirs=True):
tar.add(os.path.join(element, file), file, False)
except:
tar.add(element)
finally:
tar.close()
if __name__ == '__main__':
#print "web2py does not require installation and"
#print "you should just start it with:"
#print
#print "$ python web2py.py"
#print
#print "are you sure you want to install it anyway (y/n)?"
#s = raw_input('>')
#if s.lower()[:1]=='y':
start()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
900,
37623,
10141,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
11748,
13422,
7753,
198,
11748,
28686,
11,
25064,
198,
198,
6738,
9058,
8692,
1330,
357,
198,
220,
220,... | 2.675147 | 511 |
import urllib2
from bs4 import BeautifulSoup
from datetime import datetime
import pandas as pd
import argparse
"""Fetch all daringfireball articles since 2002
count the article character length
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Fetch all daringfireball articles since 2002'
)
parser.add_argument('output')
args = parser.parse_args()
df = pd.DataFrame(
columns=(
'date',
'text',
'chars',
'title',
'url',
)
)
for year in range(2002, 2017):
print str(year)
soup = BeautifulSoup(
urllib2.urlopen("http://daringfireball.net/" + str(year)).read(),
"html.parser"
)
archive = soup.find("div", class_="archive")
p = archive.find_all("p")
for item in p:
link = item.find("a", href=True)
# look for url in soup
url = link['href']
# look for title in soup
title = link.string.encode('ascii', 'ignore')
# look for date in soup
date = item.find("small")
# get native time object
date = datetime.strptime(
date.string.encode('ascii', 'ignore'), "%d%b%Y"
)
artcl_soup = BeautifulSoup(
urllib2.urlopen(url).read(),
"html.parser"
)
text = artcl_soup.find("div", class_="article")
text_sans_quotes = []
for p in text.find_all("p"):
# add ascii text if not a quote
if p.parent.name != 'blockquote':
text_sans_quotes.append(
p.getText().encode('ascii', 'ignore')
)
full_text = str(text_sans_quotes)
print "Number of Characters: %d" % len(full_text)
print date
print url
print title
s = pd.Series({
'date': date,
'text': full_text,
'chars': len(full_text),
'title': title,
'url': url,
})
df = df.append(s, ignore_index=True)
# make date object the index
df = df.set_index('date')
df.to_pickle(args.output)
| [
11748,
2956,
297,
571,
17,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1822,
29572,
198,
198,
37811,
37,
7569,
477,
27939,
6495,
1894,
6685... | 1.902755 | 1,234 |
# -*- coding: utf-8 -*-
"""Top-level package for cycle-arbitrage."""
__author__ = """drinksober"""
__email__ = 'me@mum5.cn'
__version__ = '0.1.0'
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
6772,
12,
283,
2545,
8394,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
7109,
676,
568,
527,
37811,
198,
834,
12888,
834,
7... | 2.208955 | 67 |
# -*- coding: utf-8 -*-
"""BDA_Assignment_04_v2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1gfYg_0pK4fVIOxYRi9ybLD3ySsqR2cg2
# Streaming Twitter Data Using Kafka
Importing the libraries:
"""
!pip install kafka-python
!pip install python-twitter
!pip install tweepy
"""# Now Start the Apache Kafka with the following commands in their respective directory in a cmd prompt.
zkServer (To start Zookeeper)
kafka-server-start.bat config\server.properties (To connect Kafka)
"""
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from kafka import KafkaProducer
import json
import kafka
"""Using the developer.twitter.com (developer account) access your necessary API keys and tokens.
(Every Key is unique and limited)
"""
twitterApiKey = 'xKH*******************ubqS'
twitterApiSecret = 'VYO5A******************************tw1r19'
twitterApiAccessToken = '1661**************************************DK40xr'
twitterApiAccessTokenSecret = 'hGKDl***************************************qS7FYfV'
query = ['COVID19', "CORONA VACCINE"]
producer = KafkaProducer(bootstrap_servers='localhost:9092', api_version= (0, 10, 0),value_serializer=lambda v: json.dumps(v).encode('utf-8'))
listener = StdOutListener()
auth = OAuthHandler(twitterApiKey, twitterApiSecret)
auth.set_access_token(twitterApiAccessToken, twitterApiAccessTokenSecret)
stream = Stream(auth, listener)
stream.filter(track = query)
#kafka = KafkaClient(bootstrap_servers= "localhost:9092")
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
api_version=(0,11,5),
value_serializer=lambda x: dumps(x).encode('utf-8'))
listener = StdOutListener()
auth = OAuthHandler(twitterApiKey, twitterApiSecret)
auth.set_access_token(twitterApiAccessToken, twitterApiAccessTokenSecret)
stream = Stream(auth, listener)
stream.filter(track=query)
#KafkaTimeoutError: Failed to update metadata after 60.0 secs.
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
33,
5631,
62,
8021,
16747,
62,
3023,
62,
85,
17,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
... | 2.994083 | 676 |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import collections
import functools as ft
import pytest
import pyzpl
import pyzpl2
FIXTURE_1_DATA = b"""
1. ZPL configuration file example
1. This format is designed to be trivial to write and parse
#
context
iothreads = 1
verbose = 1 # Ask for a trace
main
type = zmq_queue
frontend
option
hwm = 1000
swap = 25000000
subscribe = "#2"
bind = tcp://eth0:5555
backend
bind = tcp://eth0:5556
""".strip()
FIXTURE_1_TREE = {
'context': {'iothreads': "1", 'verbose': "1"},
'main' : {
'type' : "zmq_queue",
'frontend': {
'option': {'hwm': "1000", 'swap': "25000000", 'subscribe': "#2"},
'bind' : "tcp://eth0:5555",
},
'backend': {'bind': "tcp://eth0:5556"},
},
}
FIXTURE_1_FLAT_TREE = {
"context:iothreads" : "1",
"context:verbose" : "1",
"main:type" : "zmq_queue",
"main:frontend:option:hwm" : "1000",
"main:frontend:option:swap" : "25000000",
"main:frontend:option:subscribe": "#2",
"main:frontend:bind" : "tcp://eth0:5555",
"main:backend:bind" : "tcp://eth0:5556",
}
FIXTURE_2_DATA = b"""
version = 1.0
apps
listener
context
iothreads = 1
verbose = 1
devices
main
type = zmq_queue
sockets
frontend
type = SUB
option
hwm = 1000
swap = 25000000
bind = tcp://eth0:5555
backend
bind = tcp://eth0:5556
""".lstrip()
FIXTURE_2_TREE = {
'version': "1.0",
'apps' : {
'listener': {
'context': {'iothreads': "1", 'verbose': "1"},
'devices': {
'main': {
'type' : "zmq_queue",
'sockets': {
'frontend': {
'type' : "SUB",
'option': {'hwm': "1000", 'swap': "25000000"},
'bind' : "tcp://eth0:5555",
},
'backend': {'bind': "tcp://eth0:5556"},
},
}
},
}
},
}
NESTED_1_DATA = b"""
root
branch
leafname = leafval
""".lstrip()
NESTED_1_TREE = {'root': {'branch': {'leafname': "leafval"}}}
NESTED_1_TREE_FLAT = {"root:branch:leafname": "leafval"}
Case = collections.namedtuple("Case", ['name', 'call', 'data', 'expected'])
UNUSED_TEST_CASES = []
LOAD_TEST_CASES = [
Case(name="loads spec doc", call=pyzpl.loads, data=FIXTURE_1_DATA, expected=FIXTURE_1_TREE),
Case(
name="loads spec doc flat",
call=ft.partial(pyzpl.loads, flat=True),
data=FIXTURE_1_DATA,
expected=FIXTURE_1_FLAT_TREE,
),
Case(
name="load_stream spec doc",
call=(lambda data: {":".join(k): v for k, v in pyzpl.load_stream(io.BytesIO(data))}),
data=FIXTURE_1_DATA,
expected=FIXTURE_1_FLAT_TREE,
),
Case(name="loads config", call=pyzpl.loads, data=FIXTURE_2_DATA, expected=FIXTURE_2_TREE),
Case(name="loads nested", call=pyzpl.loads, data=NESTED_1_DATA , expected=NESTED_1_TREE),
Case(
name="dumps nested 1", call=pyzpl.dumps, data=NESTED_1_TREE, expected=NESTED_1_DATA.decode()
),
Case(
name="dumps nested 1 flat",
call=pyzpl.dumps,
data=NESTED_1_TREE_FLAT,
expected=NESTED_1_DATA.decode(),
),
Case(
name="dumps hello world",
call=pyzpl.dumps,
data={'hello': "world"},
expected="hello = world\n",
),
Case(
name="dumps val with hash",
call=pyzpl.dumps,
data={'propname': "world with # hash (not a comment)"},
expected="""propname = "world with # hash (not a comment)"\n""",
),
# TODO (mb 2016-11-22): Test cases for
# - A value that starts with a quote and does not end in a
# matching quote is treated as unquoted.
]
@pytest.mark.parametrize("name, call, data, expected", LOAD_TEST_CASES)
FIXTURE_3_DATA = b"""
# Basement printer
node = basement
ip = 10.1.2.3
port = 2001
device = Canon Pixma
# Front door security camera
node = front door
ip = 10.1.2.10
port = 8080
device = Wyze Cam Pan 1080p
# Nursery bio-monitor
node = nursery
ip = 10.1.2.42
port = 8888
device = Mimo Sleep Tracker
# Our users
authorized_users
authorization = simple
user = alex
privilege = super-user
user = thomas
privilege = user
user = mark
privilege = user
""".lstrip()
FIXTURE_3_OUT = b"""
node = basement
ip = 10.1.2.3
port = 2001
device = Canon Pixma
node = front door
ip = 10.1.2.10
port = 8080
device = Wyze Cam Pan 1080p
node = nursery
ip = 10.1.2.42
port = 8888
device = Mimo Sleep Tracker
authorized_users
authorization = simple
user = alex
privilege = super-user
user = thomas
privilege = user
user = mark
privilege = user
""".strip()
def test_hiearchical():
"""test the Config file parser and interface
"""
cfg = pyzpl2.load_cfg(io.BytesIO(FIXTURE_3_DATA))
assert cfg != None
# "subscript" access
node = cfg['node'] # get the first node
assert node.value == "basement"
node = cfg["node=front door"] # query selection
assert node.value == "front door"
# Negative test
with pytest.raises(KeyError) as excinfo:
node = cfg['door']
assert "door" in str(excinfo.value)
with pytest.raises(KeyError) as excinfo:
node = cfg["node=garage"]
assert "node=garage" in str(excinfo.value)
# get() access
node = cfg.get("node") # get the first node (unqualified)
assert node != None
assert node.value == "basement"
# sub-element navigation
ip1 = node.get("ip") # relative to the sub-tree retrieved above
ip2 = cfg.get(("node", "ip")) # still the first node, hierarchicaly qualified
ip3 = cfg.get("node:ip") # string based fully qualified
assert ip1 != None
assert ip1 == ip2 == ip3
assert ip1.value == "10.1.2.3"
auth = cfg.get("authorized_users:authorization")
assert auth != None
assert auth.value == "simple"
# chained "indexing"
auth = cfg['authorized_users']['authorization']
assert auth.value == "simple"
# filtering
node = cfg.get("node", query="nursery")
assert node != None
assert node.value == "nursery"
# When the query is the leaf node, a simple query may be used
user = cfg.get(("authorized_users", "user"), query="mark")
assert user != None
assert user.value == "mark"
# This test demonstrates how filters can be applied at any level. The query is extended to
# include implicit 'None' values on the left, as needed to balance the depth of the path. So
# --- path --- --- query ---
# ('a', 'b', 'c') ('1', None)
# is equivalent to
# ('a', 'b', 'c') (None, '1', None)
# or
# cfg.get('a').get('b', query='1').get('c')
priv = cfg.get(("authorized_users", "user", "privilege"), query=("alex", None))
assert priv != None
assert priv.value == "super-user"
# iteration
children = [child for child in cfg.children]
assert len(children) == 4
node = children[0]
assert node.name == "node"
assert node.value == "basement"
node = children[1]
assert node.name == "node"
assert node.value == "front door" # order is preserved
node = children[2]
assert node.name == "node"
assert node.value == "nursery"
node = children[3]
assert node.name == "authorized_users"
assert node.value == "" # this node has no value
# return is a dump of the tree (root node). It should match the
# input, less blank lines and comments
assert str(cfg).strip().encode() == FIXTURE_3_OUT
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,... | 2.13981 | 3,891 |
# -*- coding: utf-8 -*-
import collections
from expects import *
from booby import Model, fields, errors
from tests.unit._helpers import MyDict
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
17268,
198,
198,
6738,
13423,
1330,
1635,
198,
198,
6738,
1489,
26730,
1330,
9104,
11,
7032,
11,
8563,
198,
6738,
5254,
13,
20850,
13557,
16794,
364,
1330,
2... | 3.08 | 50 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628
] | 3.75 | 8 |
import os, subprocess
import requests
url_search = 'http://127.0.0.1:8000/search'
url_image = 'http://127.0.0.1:8000/image'
proxies = {}
while True:
query = input('Query: ')
if not query:
break
results = get_search(query)
if len(results) == 0:
print('No results')
continue
firstResult = results[0]
episodeName = firstResult['episodeName']
timestamp = firstResult['time_begin']
print(f'Episode name: {episodeName}, similarity: {firstResult["similarity"]}, timestamp range: {timestamp}..{firstResult["time_end"]}')
print(f'Text: {firstResult["text"]}')
print()
image = get_image(episodeName, timestamp)
with open('temp.jpg', 'wb') as f:
f.write(image)
subprocess.call(f'rundll32 "C:\Program Files (x86)\Windows Photo Viewer\PhotoViewer.dll", ImageView_Fullscreen {os.path.abspath("temp.jpg")}')
| [
11748,
28686,
11,
850,
14681,
201,
198,
11748,
7007,
201,
198,
201,
198,
6371,
62,
12947,
796,
705,
4023,
1378,
16799,
13,
15,
13,
15,
13,
16,
25,
33942,
14,
12947,
6,
201,
198,
6371,
62,
9060,
796,
705,
4023,
1378,
16799,
13,
15,... | 2.408377 | 382 |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import six
from .helper import broker_range
from kafka_utils.kafka_cluster_manager.cluster_info \
.error import InvalidBrokerIdError
from kafka_utils.kafka_cluster_manager.cluster_info \
.error import InvalidPartitionError
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
1584,
44628,
3457,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428... | 3.439689 | 257 |
'''
Manage Azure API Management API's.
'''
from ... pyaz_utils import _call_az
from . import operation, release, revision, versionset
def import_(path, resource_group, service_name, specification_format, api_id=None, api_revision=None, api_type=None, api_version=None, api_version_set_id=None, description=None, display_name=None, no_wait=None, protocols=None, service_url=None, soap_api_type=None, specification_path=None, specification_url=None, subscription_key_header_name=None, subscription_key_query_param_name=None, subscription_required=None, wsdl_endpoint_name=None, wsdl_service_name=None):
'''
Import an API Management API.
Required Parameters:
- path -- Required. Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the api management service instance
- specification_format -- Specify the format of the imported API.
Optional Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- api_revision -- Describes the Revision of the Api. If no value is provided, default revision 1 is created.
- api_type -- The type of the API.
- api_version -- Describes the Version of the Api. If you add a version to a non-versioned API, an Original version will be automatically created and will respond on the default URL
- api_version_set_id -- Describes the Version Set to be used with the API
- description -- Description of the API. May include HTML formatting tags.
- display_name -- Display name of this API.
- no_wait -- Do not wait for the long-running operation to finish.
- protocols -- Describes on which protocols(one or more) the operations in this API can be invoked.
- service_url -- Absolute URL of the backend service implementing this API. Cannot be more than 2000 characters long.
- soap_api_type -- The type of API when file format is WSDL.
- specification_path -- File path specified to import the API.
- specification_url -- Url specified to import the API.
- subscription_key_header_name -- Specifies the subscription key header name.
- subscription_key_query_param_name -- Specifies the subscription key query string parameter name.
- subscription_required -- If true, the API requires a subscription key on requests.
- wsdl_endpoint_name -- Local name of WSDL Endpoint (port) to be imported.
- wsdl_service_name -- Local name of WSDL Service to be imported.
'''
return _call_az("az apim api import", locals())
def create(api_id, display_name, path, resource_group, service_name, api_type=None, authorization_scope=None, authorization_server_id=None, bearer_token_sending_methods=None, description=None, no_wait=None, open_id_provider_id=None, protocols=None, service_url=None, subscription_key_header_name=None, subscription_key_query_param_name=None, subscription_key_required=None, subscription_required=None):
'''
Create an API Management API.
Required Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- display_name -- API name. Must be 1 to 300 characters long.
- path -- Required. Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the API Management service instance.
Optional Parameters:
- api_type -- The type of the API.
- authorization_scope -- Specifies the OAuth operations scope.
- authorization_server_id -- Specifies the OAuth authorization server ID.
- bearer_token_sending_methods -- Specifies the sending methods for bearer token.
- description -- Description of the API. May include HTML formatting tags.
- no_wait -- Do not wait for the long-running operation to finish.
- open_id_provider_id -- Specifies the openid in the authentication setting.
- protocols -- Describes on which protocols the operations in this API can be invoked.
- service_url -- Absolute URL of the backend service implementing this API. Cannot be more than 2000 characters long.
- subscription_key_header_name -- Specifies the subscription key header name.
- subscription_key_query_param_name -- Specifies the subscription key query string parameter name.
- subscription_key_required -- Specifies whether subscription key is required during call to this API, true - API is included into closed products only, false - API is included into open products alone, null - there is a mix of products.
- subscription_required -- If true, the API requires a subscription key on requests.
'''
return _call_az("az apim api create", locals())
def show(api_id, resource_group, service_name):
'''
Show details of an API Management API.
Required Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the API Management service instance.
'''
return _call_az("az apim api show", locals())
def list(resource_group, service_name, filter_display_name=None, skip=None, top=None):
'''
List API Management API's.
Required Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the API Management service instance.
Optional Parameters:
- filter_display_name -- Filter of APIs by displayName.
- skip -- Number of records to skip.
- top -- Number of records to return.
'''
return _call_az("az apim api list", locals())
def delete(api_id, resource_group, service_name, delete_revisions=None, if_match=None, no_wait=None, yes=None):
'''
Delete an API Management API.
Required Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the API Management service instance.
Optional Parameters:
- delete_revisions -- Delete all revisions of the Api.
- if_match -- ETag of the Entity.
- no_wait -- Do not wait for the long-running operation to finish.
- yes -- Do not prompt for confirmation.
'''
return _call_az("az apim api delete", locals())
def update(api_id, resource_group, service_name, add=None, api_type=None, description=None, display_name=None, force_string=None, if_match=None, no_wait=None, path=None, protocols=None, remove=None, service_url=None, set=None, subscription_key_header_name=None, subscription_key_query_param_name=None, subscription_required=None, tags=None):
'''
Update an API Management API.
Required Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- service_name -- The name of the API Management service instance.
Optional Parameters:
- add -- Add an object to a list of objects by specifying a path and key value pairs. Example: --add property.listProperty <key=value, string or JSON string>
- api_type -- The type of the API.
- description -- Description of the API. May include HTML formatting tags.
- display_name -- API name. Must be 1 to 300 characters long.
- force_string -- When using 'set' or 'add', preserve string literals instead of attempting to convert to JSON.
- if_match -- ETag of the Entity. Not required when creating an entity, but required when updating an entity.
- no_wait -- Do not wait for the long-running operation to finish.
- path -- Required. Relative URL uniquely identifying this API and all of its resource paths within the API Management service instance.
- protocols -- Describes on which protocols the operations in this API can be invoked.
- remove -- Remove a property or an element from a list. Example: --remove property.list <indexToRemove> OR --remove propertyToRemove
- service_url -- Absolute URL of the backend service implementing this API. Cannot be more than 2000 characters long.
- set -- Update an object by specifying a property path and value to set. Example: --set property1.property2=<value>
- subscription_key_header_name -- Specifies the subscription key header name.
- subscription_key_query_param_name -- Specifies the subscription key query string parameter name.
- subscription_required -- If true, the API requires a subscription key on requests.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
'''
return _call_az("az apim api update", locals())
def wait(api_id, name, resource_group, created=None, custom=None, deleted=None, exists=None, interval=None, timeout=None, updated=None):
'''
Place the CLI in a waiting state until a condition of an apim api is met.
Required Parameters:
- api_id -- API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
- name -- The name of the api management service instance
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- created -- wait until created with 'provisioningState' at 'Succeeded'
- custom -- Wait until the condition satisfies a custom JMESPath query. E.g. provisioningState!='InProgress', instanceView.statuses[?code=='PowerState/running']
- deleted -- wait until deleted
- exists -- wait until the resource exists
- interval -- polling interval in seconds
- timeout -- maximum wait in seconds
- updated -- wait until updated with provisioningState at 'Succeeded'
'''
return _call_az("az apim api wait", locals())
| [
7061,
6,
198,
5124,
496,
22134,
7824,
8549,
7824,
338,
13,
198,
7061,
6,
198,
6738,
2644,
12972,
1031,
62,
26791,
1330,
4808,
13345,
62,
1031,
198,
6738,
764,
1330,
4905,
11,
2650,
11,
18440,
11,
6300,
316,
628,
198,
4299,
1330,
410... | 3.615538 | 3,012 |
"""
==================================
Fourier ring correlation of images
==================================
Estimation of the image resolution
----------------------------------
In electron microscopy the Fourier Ring Correlation (FRC) is widely used as a
measure for the resolution of an image. This very practical approach for a quality
measure begins to get traction in fluorescence microscopy. Briefly, the correlation
between two subsets of the same images are Fourier transformed and their overlap
in the Fourier space is measured. The FRC is the normalised cross-correlation
coefficient between two images over corresponding shells in Fourier space transform.
In CLSM usually multiple images of the sample sample are recoded. Thus, the
resolution of the image can be estimated by the FRC. Below a few lines of python
code are shown that read a CLSM image, split the image into two sets, and plot
the FRC of the two subsets is shown for intensity images.
#.. plot:: ../examples/imaging/imaging_frc.py
The above approach is used by the software `ChiSurf <https://github.com/fluorescence-tools/chisurf/>`_.
In practice, a set of CLSM images can be split into two subsets. The two subsets
can be used to estimate the resolution of the image.
"""
from __future__ import annotations
import numpy as np
import pylab as plt
import tttrlib
def compute_frc(
image_1: np.ndarray,
image_2: np.ndarray,
bin_width: int = 2.0
):
""" Computes the Fourier Ring/Shell Correlation of two 2-D images
:param image_1:
:param image_2:
:param bin_width:
:return:
"""
image_1 = image_1 / np.sum(image_1)
image_2 = image_2 / np.sum(image_2)
f1, f2 = np.fft.fft2(image_1), np.fft.fft2(image_2)
af1f2 = np.real(f1 * np.conj(f2))
af1_2, af2_2 = np.abs(f1)**2, np.abs(f2)**2
nx, ny = af1f2.shape
x = np.arange(-np.floor(nx / 2.0), np.ceil(nx / 2.0))
y = np.arange(-np.floor(ny / 2.0), np.ceil(ny / 2.0))
distances = list()
wf1f2 = list()
wf1 = list()
wf2 = list()
for xi, yi in np.array(np.meshgrid(x,y)).T.reshape(-1, 2):
distances.append(np.sqrt(xi**2 + xi**2))
xi = int(xi)
yi = int(yi)
wf1f2.append(af1f2[xi, yi])
wf1.append(af1_2[xi, yi])
wf2.append(af2_2[xi, yi])
bins = np.arange(0, np.sqrt((nx//2)**2 + (ny//2)**2), bin_width)
f1f2_r, bin_edges = np.histogram(
distances,
bins=bins,
weights=wf1f2
)
f12_r, bin_edges = np.histogram(
distances,
bins=bins,
weights=wf1
)
f22_r, bin_edges = np.histogram(
distances,
bins=bins,
weights=wf2
)
density = f1f2_r / np.sqrt(f12_r * f22_r)
return density, bin_edges
filename = '../../tttr-data/imaging/leica/sp8/da/G-28_C-28_S1_6_1.ptu'
data = tttrlib.TTTR(filename, 'PTU')
line_factor = 1
reading_parameter = {
"tttr_data": data,
"marker_frame_start": [4, 6],
"marker_line_start": 1,
"marker_line_stop": 2,
"marker_event_type": 15,
# if zero the number of pixels is the set to the number of lines
"n_pixel_per_line": 512 * line_factor,
"reading_routine": 'SP8',
"channels": [1],
"fill": True
}
clsm_image = tttrlib.CLSMImage(**reading_parameter)
fig, ax =plt.subplots(nrows=1, ncols=2, sharex=False, sharey=False)
ax[0].set_title('Intensity')
ax[1].set_title('FRC')
img = clsm_image.intensity
im1 = img[::2].sum(axis=0)
im2 = img[1::2].sum(axis=0)
frc, frc_bins = compute_frc(im1, im2)
ax[1].plot(frc, label="Intensity")
ax[0].imshow(img.mean(axis=0))
plt.show()
| [
37811,
198,
10052,
855,
198,
37,
280,
5277,
5858,
16096,
286,
4263,
198,
10052,
855,
198,
198,
22362,
18991,
286,
262,
2939,
6323,
198,
3880,
438,
198,
818,
11538,
21145,
11081,
262,
34296,
5277,
12569,
2744,
49501,
357,
37,
7397,
8,
... | 2.360419 | 1,526 |
import ctypes
import os
import struct
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GL.shaders import *
from OpenGL.arrays.arraydatatype import *
from OpenGL.raw.GL.EXT.texture_compression_s3tc import *
from PyEngine3D.Common import logger
dxgi_pixel_or_block_size = [
0,
16, 16, 16, 16,
12, 12, 12, 12,
8, 8, 8, 8, 8, 8,
8, 8, 8, 8,
8, 8, 8, 8,
4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4,
4,
4, 4, 4,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1,
0, # DXGI_FORMAT_R1_UNORM ehm >.< ( TODO )
4, 4, 4,
8, 8, 8, # BC1
16, 16, 16, # BC2
16, 16, 16, # BC3
8, 8, 8, # BC4
16, 16, 16, # BC5
2, 2,
4, 4, 4, 4, 4, 4, 4,
16, 16, 16, # BC6
16, 16, 16, # BC7
# TODO Complete the rest
]
dxgi_compressed_formats = [
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
94, 95, 96, 97, 98, 99
]
"""
Copyright (c) 2015 Edoardo "sparkon" Dominici
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# Win32 following : https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx
# DDS types
DDSEnumType = ctypes.c_ulong
DDSMagicNumber = Win32Types.DWORD
DDSFormatCC = Win32Types.DWORD
# Contains all default values used for validation
# Values used for flagging
# 1-to-1 mapping of DDS_PIXELFORMAT https://msdn.microsoft.com/en-us/library/windows/desktop/bb943984(v=vs.85).aspx
# 1-to-1 mapping of DDS_HEADER https://msdn.microsoft.com/en-us/library/windows/desktop/bb943982(v=vs.85).aspx
# 1-to-1 mapping of DDS_HEADER_DXT10 https://msdn.microsoft.com/en-us/library/windows/desktop/bb943983(v=vs.85).aspx
# FormatNotValid is raised when the file provided is not valid, more information in the message
# FormatNotSupported is raised when the file has some feature that is not supported by this implementation
# Represents a single surface of any kind, depending on its position and the DDSTexture info the
# mipleve or array index can be deduced
# Represents a loaded DDSFile, the name might be misleading since multiple textures or texturecubes can be
# contained inside here
if __name__ == "__main__":
# 1
loadDDS("Externals/Textures/dds_test.dds")
# 2
dds_texture = DDSTexture()
dds_texture.load("Externals/Textures/dds_test.dds")
for surface in dds_texture.surfaces:
print(surface) | [
11748,
269,
19199,
198,
11748,
28686,
198,
11748,
2878,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
30672,
13,
8763,
1330,
1635,
198,
6738,
30672,
13,
8763,
52,
1330,
1635,
198,
6738,
30672,
13,
8763,
13,
1477,
9972,
1330,
1635,... | 2.750413 | 1,210 |
"""This module provides EIGENVAL."""
from __future__ import division, print_function
import csv
import sys
from pathlib import Path
from logging import DEBUG, INFO, Formatter, StreamHandler, getLogger
import numpy as np
from typing import Dict, Optional, Sequence, Tuple, List, IO, Union
from vaspy.tools import open_by_suffix
try:
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Install matplotlib, or you cannot use methods relating to draw\n")
# logger
LOGLEVEL = INFO
logger = getLogger(__name__)
fmt = "%(asctime)s %(levelname)s %(name)s :%(message)s"
formatter = Formatter(fmt)
handler = StreamHandler()
handler.setLevel(LOGLEVEL)
logger.setLevel(LOGLEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = False
class EnergyBand(object):
"""Simple band structure object for analyzing by using ipython.
Class for band structure
Attributes
----------
kvecs: numpy.ndarray
kvectors
kdistances: numpy.ndarray
kdisance
numk: int
number of kpoints
nbands: int
number of bands
nsping: int
spin character
energies: numpy.ndarray
energies[spin_i, k_i, band_i], where spin_i, k_i, and band_i are spin-,
k- and band-index, respectively.
label: dict
used as a label (data 'title' such as '#k', 'Energy') in str format
Parameters
----------
kvecs: numpy.ndarray
1D array data of k-vectors.
energies: numpy.ndarray
1D array data of energies
nspin: int
number of spin: '1' means No-spin. '2' means collinear spin,
'4' means noncollinear spin.
In this class does not distinguish non-collinear spin
and No-spin. (default is 1)
"""
def __init__(
self,
kvecs: Sequence[float] = (),
energies: Sequence[float] = (),
nspin: int = 1,
) -> None:
"""Initialize."""
self.kvecs: np.ndarray = np.array(kvecs)
self.numk: int = len(self.kvecs)
self.label: Dict["str", List["str"]] = {}
try:
self.nbands: int = len(energies) // len(kvecs)
except ZeroDivisionError:
self.nbands = 0
self.energies: np.ndarray = np.array(energies)
self.nspin = nspin
if self.nspin == 1: # standard
self.label["spin"] = [""]
self.label["energy"] = ["Energy"]
elif self.nspin == 2: # spin-polarized
self.label["energy"] = ["Energy_up", "Energy_down"]
self.label["spin"] = ["_up", "_down"]
elif self.nspin == 4: # non-collinear
self.label["energy"] = ["Energy"]
self.label["spin"] = ["_mT", "_mX", "_mY", "_mZ"]
self.label["k"] = ["#k"]
@property
def kdistances(self) -> np.ndarray:
"""Return kdistances."""
return np.cumsum(
np.linalg.norm(
np.concatenate((np.array([[0, 0, 0]]), np.diff(self.kvecs, axis=0))),
axis=1,
)
)
def fermi_correction(self, fermi: float) -> None:
"""Correct the Fermi level.
Parameters
----------
fermi: float
value of the Fermi level.
"""
self.energies -= fermi
def make_label(self, *keys: str) -> List[str]:
"""Return array the used for label for CSV-like data.
Parameters
----------
keys: tuple
key tuple used for label
s
"""
label_list = []
for key in keys:
for tmp in self.label[key]:
label_list.append(tmp)
return label_list
def to_3dlist(self) -> List[List[List[float]]]:
"""Return 3D mentional list.
list[band_i, [k_i, energy, (energy_down)]]
This list format would be useful for str output
"""
bandstructure = []
for energies in self.energies.T.tolist():
band = []
for k, energy in zip(self.kdistances[:, np.newaxis].tolist(), energies):
k.extend(energy)
band.append(k)
bandstructure.append(band)
return bandstructure
def to_csv(self, csv_file: str, blankline: bool = True) -> None:
"""Write data to csv file.
Parameters
------------
csv_file: str
filename for output
label_str: str
string for label (put it on the first line)
blankline: boolean
It True (default), the blank line is inserted between band data
"""
label_str: str = "\t".join(self.make_label("k", "energy")) + "\n"
with open(csv_file, "w") as fhandle:
fhandle.writelines(label_str)
writer = csv.writer(fhandle, delimiter="\t")
for band_i in self.to_3dlist():
writer.writerows(band_i)
if blankline:
fhandle.writelines("\n")
def __str__(self) -> str:
"""Return the str object.
Returns
--------
str
a string represntation of EnergyBand.
**Useful for gnuplot and Igor**.
"""
labels = self.make_label("k", "energy")
output = labels[0]
for label in labels[1:]:
output += "\t" + label
output += "\n"
list3d = self.to_3dlist()
for band_i in list3d:
for line in band_i:
output += "{0:.8e}".format(line[0])
for energy in line[1:]:
output += "\t{0:.8e}".format(energy)
output += "\n"
output += "\n"
return output
def figure(self, color: str = "blue", spin_i: int = 0) -> plt.Axes:
"""Return Axes object of the energy band.
Parameters
-----------
color: str, optional (default is 'blue')
color of the band line
spin_i: spin_index
default is 0
Returns
---------
matplotlib.pyplot.Axes
Example
--------
Here is a typical code::
fig = plt.figure()
ax = band.figure(color='blue')
ax.set_ylabel('Energy ( eV )')
ax.set_ylim(-5, 5)
ax.set_xlim(0, 4)
plt.show()
"""
[
plt.plot(self.kdistances, self.energies[spin_i, :, band_i], color=color)
for band_i in range(self.energies.shape[2])
]
return plt.gca()
def show(
self, yrange: Optional[Tuple[float, float]] = None, spin_i: int = 0
) -> None: # How to set default value?
"""Draw band structure by using maptlotlib.
For 'just seeing' use.
Parameters
----------
yrange: tuple, optional (default: all range)
Minimum and maximum value of the y-axis.
If not specified, use the matplotlib default value.
spin_i: int (default is 0 for no spin or 'up' spin)
Spin index. For spin-polarized collinear band
"""
for band_i in range(self.energies.shape[2]):
plt.plot(self.kdistances, self.energies[spin_i, :, band_i], color="blue")
if yrange is not None:
plt.ylim([yrange[0], yrange[1]])
plt.xlim([self.kdistances[0], self.kdistances[-1]])
plt.ylabel(self.label["energy"][spin_i] + " (eV)")
plt.show()
def to_physical_kvector(
self,
recvec: np.ndarray = np.array(
((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)),
),
) -> None:
"""Change kvec unit to inverse AA.
Parameters
-----------
recvec: array, numpy.ndarray, optional (default is the unit vector)
reciprocal vector
Notes
-----
Don't forget that the reciprocal vector used
in VASP needs 2PI to match the conventional
unit of the wavevector.
"""
logger.debug("recvec: {}".format(recvec))
logger.debug("self.kvecs: {}".format(self.kvecs))
recvec = np.array(recvec)
self.kvecs = np.array([recvec.dot(kvecs) for kvecs in self.kvecs])
class EIGENVAL(EnergyBand):
"""Class for storing the data of EIGENVAL file.
Parameters
-----------
filename: str, Path
File name of 'EIGENVAL'
Attributes
----------
natom: int
Number of atoms
"""
def __init__(self, filename: Union[str, Path, None] = None) -> None:
"""Initialize."""
super(EIGENVAL, self).__init__()
self.natom = 0
#
if filename:
self.load_file(open_by_suffix(str(filename)))
def __getitem__(self, item: int) -> Tuple[List[float], List[List[float]]]:
"""
Parameters
----------
item: int
index of k-vector
Returns
-------
Tuple of list of float and list of float
"""
energies: List[List[List[float]]] = self.energies.transpose(1, 2, 0).tolist()
kvec: List[List[float]] = self.kvecs.tolist()
return list(zip(kvec, energies))[item]
def __len__(self) -> int:
"""Return numk as the result of len()"""
return self.numk
def load_file(self, thefile: IO[str]) -> None:
"""Parse EIGENVAL."""
self.natom, _, _, self.nspin = [int(i) for i in next(thefile).split()]
if self.nspin == 2:
self.label["energy"] = ["Energy_up", "Energy_down"]
else:
self.label["energy"] = ["Energy"]
next(thefile)
next(thefile)
next(thefile)
next(thefile)
_, self.numk, self.nbands = [int(i) for i in next(thefile).split()]
self.kvecs = []
self.energies = []
for _ in range(self.numk):
# the first line in the sigleset begins with the blank
next(thefile)
self.kvecs.append([float(i) for i in next(thefile).split()[0:3]])
for _ in range(self.nbands):
self.energies.append(
[float(i) for i in next(thefile).split()[1 : self.nspin + 1]]
)
self.kvecs = np.array(self.kvecs)
self.energies = np.array(self.energies).T.reshape(
self.nspin, self.numk, self.nbands
)
thefile.close()
| [
37811,
1212,
8265,
3769,
412,
3528,
1677,
23428,
526,
15931,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
11,
3601,
62,
8818,
198,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
18931,
1330,
... | 2.057358 | 5,056 |
streamInput = data.get('stream_name').lower()
chromecastInput = data.get('media_player')
chromecast = chromecastInput
streams = {
"overwatchleague": "overwatchleague",
"overwatch league": "overwatchleague",
"owl": "overwatchleague",
"lcs": "riotgames",
"ninja": "ninja",
"lupo": "drlupo",
"doctor lupo": "drlupo",
"dr . lupo": "drlupo",
"tim": "timthetatman",
"gdq": "gamesdonequick",
"games done quick": "gamesdonequick",
"shroud": "shroud",
"seagull": "a_seagull",
"sneaky": "c9sneaky",
"hydra": "hydramist"
}
if streamInput in streams:
streamToCast = streams[streamInput]
else:
streamToCast = streamInput
url = "https://twitch.tv/"+streamToCast
if (chromecast == 1 or chromecast == 'living room tv'):
mediaPlayer = "living_room_tv"
elif (chromecast == 2 or chromecast == 'bedroom tv'):
mediaPlayer = "sonas_hot_chromecast"
elif (chromecast == 3):
mediaPlayer = "dining_room"
elif (chromecast == 4):
mediaPlayer = "kitchen"
elif (chromecast == 5):
mediaPlayer = "living_room_speaker"
elif (chromecast == 6):
mediaPlayer = "music_flow2034"
elif (chromecast == 7 or chromecast == 'kitchen display'):
mediaPlayer = "kitchen_display"
else:
mediaPlayer = "living_room_tv"
logger.info("Streaming {} to {}".format(streamInput, mediaPlayer))
hass.services.call("media_extractor", "play_media", {"entity_id": "media_player."+mediaPlayer, "media_content_id": url, "media_content_type": "video"})
| [
5532,
20560,
796,
1366,
13,
1136,
10786,
5532,
62,
3672,
27691,
21037,
3419,
198,
28663,
43299,
20560,
796,
1366,
13,
1136,
10786,
11431,
62,
7829,
11537,
198,
28663,
43299,
796,
15358,
43299,
20560,
198,
198,
5532,
82,
796,
1391,
198,
... | 2.59375 | 576 |
"""
Utilities used across the project.
"""
# Core
from . import core
from .core import * # noqa
# Stats
from . import stats
from .stats import * # noqa
__all__ = []
__all__.extend(core.__all__)
__all__.extend(stats.__all__)
| [
37811,
198,
18274,
2410,
973,
1973,
262,
1628,
13,
198,
37811,
198,
198,
2,
7231,
198,
6738,
764,
1330,
4755,
198,
6738,
764,
7295,
1330,
1635,
220,
1303,
645,
20402,
198,
198,
2,
20595,
198,
6738,
764,
1330,
9756,
198,
6738,
764,
3... | 2.705882 | 85 |
from src.data.models.election_event import ElectionEvent
from src.services.election_history_service import ElectionHistoryService
| [
6738,
12351,
13,
7890,
13,
27530,
13,
14300,
62,
15596,
1330,
14219,
9237,
198,
6738,
12351,
13,
30416,
13,
14300,
62,
23569,
62,
15271,
1330,
14219,
18122,
16177,
628
] | 4.517241 | 29 |
import yaml
from getpass import getpass
from concurrent.futures import ThreadPoolExecutor, wait
from netmiko import ConnectHandler
MAX_WORKERS = 3
if __name__ == "__main__":
main()
| [
11748,
331,
43695,
198,
6738,
651,
6603,
1330,
651,
6603,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
11,
4043,
198,
6738,
2010,
76,
12125,
1330,
8113,
25060,
628,
198,
22921,
62,
33249,
4877,
796,
513,
628,
... | 3.147541 | 61 |
"""
A question to discover the text of an element or many elements. Questions
must be asked with an expected resolution, like so:
the_actor.should_see_the(
(Text.of_the(WELCOME_BANNER), ReadsExactly("Welcome!")),
)
"""
from typing import List, Union
from ..actor import Actor
from ..pacing import beat
from ..target import Target
from .base_question import BaseQuestion
class Text(BaseQuestion):
"""
Asks what text appears in an element or elements, viewed by an
|Actor|. This question is meant to be instantiated using its static
|Text.of| or |Text.of_all| methods. Typical invocations might look
like:
Text.of(THE_WELCOME_HEADER)
Text.of_all(SEARCH_RESULTS)
It can then be passed along to the |Actor| to ask the question.
"""
target: Target
multi: bool
@staticmethod
def of_the(target: Target) -> "Text":
"""
Provides the target to read, expecting this target to describe a
single element or the first of many elements (e.g. "#loginlink").
Args:
target: the |Target| describing the element to read.
Returns:
|Text|
"""
return Text(target=target)
@staticmethod
def of(target: Target) -> "Text":
"""Syntactic sugar for |Text.of_the|"""
return Text.of_the(target)
@staticmethod
def of_all(multi_target: Target) -> "Text":
"""
Provides the target to read, expecting this target to describe
multiple elements (e.g. "tr.report").
Args:
multi_target: the |Target| describing the elements to read.
Returns:
|Text|
"""
return Text(target=multi_target, multi=True)
@beat("{} reads the text from the {target}.")
def answered_by(self, the_actor: Actor) -> Union[str, List[str]]:
"""
Asks the supplied actor to investigate the page and give their
answer.
Args:
the_actor: the |Actor| who will answer the question.
Returns:
str: the text of the single element found by target.
List[str]: the text of all elements found by target.
"""
if self.multi:
return [e.text for e in self.target.all_found_by(the_actor)]
return self.target.found_by(the_actor).text
| [
37811,
198,
32,
1808,
284,
7073,
262,
2420,
286,
281,
5002,
393,
867,
4847,
13,
20396,
198,
27238,
307,
1965,
351,
281,
2938,
6323,
11,
588,
523,
25,
628,
220,
220,
220,
262,
62,
11218,
13,
21754,
62,
3826,
62,
1169,
7,
198,
220,
... | 2.540541 | 925 |
import itertools
print(look_and_say(8)) | [
11748,
340,
861,
10141,
198,
198,
4798,
7,
5460,
62,
392,
62,
16706,
7,
23,
4008
] | 2.5 | 16 |
# coding: utf-8
import datetime
import unittest
import fastobo
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
4818,
8079,
198,
11748,
555,
715,
395,
198,
198,
11748,
3049,
20391,
628,
198
] | 2.791667 | 24 |
'''Crie um programa que tenha uma tupla única com nomes de produtos e seus respectivos preços,
na sequência. No final, mostre uma listagem de preços, organizando os dados em forma tabular.'''
lista = ('Lápis', 1.5,
'Borracha', 1.75,
'Caneta', 2.5,
'Corretivo', 3.5,
'estojo', 15.50,
'caderno', 34.90,
'agenda', 39.90)
print('-' * 38)
print(f'{"LISTA DE PREÇOS":^40}')
print('-' * 38)
for item in range(0, len(lista)):
if item % 2 == 0:
print(f'{lista[item]:.<30}', end='')
else:
print(f'R$ {lista[item]:>5.2f}')
print('-' * 38)
| [
7061,
6,
34,
5034,
23781,
1430,
64,
8358,
3478,
3099,
334,
2611,
12777,
489,
64,
6184,
118,
77,
3970,
401,
299,
2586,
390,
40426,
315,
418,
304,
384,
385,
2461,
452,
418,
662,
16175,
418,
11,
198,
2616,
4726,
25792,
10782,
544,
13,
... | 1.920886 | 316 |
#!/usr/bin/env python3
"""
Run the command to compile cyfolds, optionally removing unneeded files.
Note this command also works (but doesn't set compiler options):
cythonize -a -i cyfolds.pyx
"""
import sys, os
python_executable = sys.executable
compile_command = python_executable + " setup.py build_ext --inplace"
print("Running compile command:\n {}\n".format(compile_command))
os.system(compile_command)
yesno = input("\nCleanup unneeded files 'cyfolds.html' and 'cyfolds.c'"
" (default is 'y')? [yn] ")
if yesno not in ["N", "n", "no", "No", "NO"]:
os.remove("cyfolds.html")
os.remove("cyfolds.c")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
198,
10987,
262,
3141,
284,
17632,
3075,
69,
10119,
11,
42976,
10829,
555,
27938,
3696,
13,
198,
198,
6425,
428,
3141,
635,
2499,
357,
4360,
1595,
470,
900,
17050,
3689... | 2.640496 | 242 |
from django.contrib.auth.models import BaseUserManager
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
7308,
12982,
13511,
628
] | 3.733333 | 15 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cal3d(AutotoolsPackage):
"""Skeletal 3D character animation library written in C++"""
homepage = 'http://cal3d.sourceforge.net/docs/api/html/cal3dfaq.html'
git = "https://github.com/hlrs-vis/cal3d.git"
maintainers = ['aumuell']
version('master', preferred=False, branch='master')
version('2021.10', commit='8c45a264acb881c026c3e3c2e7a07826aa2eaa01')
version('2018.5.2', commit='8cbf44f8c20a191b67063cb943420ec55e7125b8')
#url "https://github.com/hlrs-vis/libe57.git", branch: "main", revision: "6c6b0c8355d870342f1736bfcc3b5299fe012d4c"
#license "http://libe57.org/license.html"
#head "https://github.com/hlrs-vis/libe57.git", branch: "main"
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('pkg-config', type='build')
configure_directory = 'cal3d'
autoreconf_extra_args = ["-i"]
def cmake_args(self):
"""Populate cmake arguments for Cal3D."""
spec = self.spec
args = []
return args
def test(self):
"""Perform smoke tests on the installed package."""
self.run_test('cal3d_converter', ['--help'], [], installed=True,
purpose=reason, skip_missing=True, work_dir='.')
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 2.424242 | 627 |
# This script is responsible for changing the older metadata format
# (files resources.json and captures.json stored in the resources/
# directory) to the newer format (a properties.json file stored with
# every artifact archive).
import json, os, subprocess, argparse
res = {}
cap = {}
# Read old resources and captures files
# Turn 2014-01-30 into 20140130
# Write properties files locally to properties/ directory
local_root = 'properties'
# Use scp to copy to files server
# Copy properties files locally from properties/ to r/
# Careful not to write the file when it is unchanged - this is for
# the sake of 'make' dependencies.
parser = argparse.ArgumentParser(description='Properties tool')
parser.add_argument('--local', dest='local', action='store_true',
help='store properties files locally into r/ directories')
parser.add_argument('--publish', dest='publish', action='store_true',
help='store properties files onto server')
args = parser.parse_args()
explode()
write_properties()
if args.local: write_to_r()
if args.publish: write_to_server()
| [
2,
770,
4226,
318,
4497,
329,
5609,
262,
4697,
20150,
5794,
198,
2,
357,
16624,
4133,
13,
17752,
290,
23007,
13,
17752,
8574,
287,
262,
4133,
14,
198,
2,
8619,
8,
284,
262,
15064,
5794,
357,
64,
6608,
13,
17752,
2393,
8574,
351,
1... | 3.389571 | 326 |
# Script to perform comparison with CMIP6 models
# On NERSC
"""
source /global/common/software/e3sm/anaconda_envs/load_latest_e3sm_unified_cori-haswell.sh
"""
import glob
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
import matplotlib.transforms as mtransforms
import numpy as np
import numpy.ma as ma
import os
# --- Function to read E3SM Diags metrics for CMIP6 models ---
# --- Function to save data into csv file ---
# --- Main ---
# Variables
variables = \
[
{'name':'Net TOA',
'units':'W m$^{-2}$',
'id':'RESTOM global ceres_ebaf_toa_v4.1',
'exclude':()},
{'name':'SW CRE',
'units':'W m$^{-2}$',
'id':'SWCF global ceres_ebaf_toa_v4.1',
'exclude':()},
{'name':'LW CRE',
'units':'W m$^{-2}$',
'id':'LWCF global ceres_ebaf_toa_v4.1',
'exclude':()},
{'name':'prec',
'units':'mm day$^{-1}$',
'id':'PRECT global GPCP_v2.3',
'exclude':('CIESM',)},
{'name':'tas land',
'units':'K',
'id':'TREFHT land ERA5',
'exclude':()},
{'name':'SLP',
'units':'hPa',
'id':'PSL global ERA5',
'exclude':()},
{'name':'u-200',
'units':'m s$^{-1}$',
'id':'U-200mb global ERA5',
'exclude':()},
{'name':'u-850',
'units':'m s$^{-1}$',
'id':'U-850mb global ERA5',
'exclude':()},
{'name':'Zg-500',
'units':'hm',
'id':'Z3-500mb global ERA5',
'exclude':('KIOST-ESM',)},
]
# Seasons
seasons = ['ANN', 'DJF', 'MAM', 'JJA', 'SON']
# Read CMIP6 data
#path = '/global/cfs/cdirs/e3sm/www/e3sm_diags_for_cmip/*/historical/r1i1p1f1/viewer/table-data'
#path = '/global/cfs/cdirs/e3sm/www/chengzhu/e3sm_diags_for_cmip/CMIP6_20211216_v2_paper_linear/*/historical/r1i1p1f1/viewer/table-data'
#path = '/global/cfs/cdirs/e3sm/www/chengzhu/e3sm_diags_for_cmip/CMIP6_20220324_v2_paper_linear/*/historical/r1i1p1f1/viewer/table-data'
path = '/global/cfs/cdirs/e3sm/www/CMIP6_comparison_1985-2014_E3SMv2_golaz_etal_2022/*/historical/r1i1p1f1/viewer/table-data'
cmip6 = read_e3sm_diags_metrics(path, variables, seasons)
# Read E3SMv2 (coupled)
path = '/global/cfs/cdirs/e3sm/www/golaz/E3SMv2/v2.LR.historical_0101/e3sm_diags/180x360_aave_cmip6/model_vs_obs_1985-2014/viewer/table-data'
E3SMv2 = read_e3sm_diags_metrics(path, variables, seasons, names=['E3SMv2',])
# Read E3SMv2 (composite base)
path = '/global/cfs/cdirs/e3sm/www/golaz/E3SMv2/v2.LR.hist-composite-all/e3sm_diags/180x360_aave_cmip6/model_vs_obs_1985-2014/viewer/table-data'
E3SMv2_composite_base = read_e3sm_diags_metrics(path, variables, seasons, names=['E3SMv2 (composite base)',])
# Read E3SMv2 (composite best)
path = '/global/cfs/cdirs/e3sm/www/golaz/E3SMv2/v2.LR.hist-composite-073_017/e3sm_diags/180x360_aave_cmip6/model_vs_obs_1985-2014/viewer/table-data'
E3SMv2_composite_best = read_e3sm_diags_metrics(path, variables, seasons, names=['E3SMv2 (composite best)',])
# Save to csv
write_csv('cmip6.csv', [cmip6, E3SMv2, E3SMv2_composite_base, E3SMv2_composite_best])
# -----------------------------------------------------------------------------
# Create plot: first only with CMIP6, E3SMv1 and v2
fig = plt.figure(figsize=[12,9])
nsx = 4
nsy = 3
nmodels = len(cmip6['models'])
nvariables = len(variables)
nseasons = len(seasons)
for ivariable in range(nvariables):
# CMIP6 data for box and whiskers
data = []
labels = []
for iseason in range(nseasons):
# Identify model with lowest RMSE
ibest = ma.argmin( cmip6['data'][:,ivariable,iseason].compressed() )
print("Best model %s %s %s" % (variables[ivariable]['name'],seasons[iseason],cmip6['models'][ibest]))
# Remove missing data using 'compressed()' function
data.append( cmip6['data'][:,ivariable,iseason].compressed() )
labels.append(seasons[iseason])
cmip6_stats = cbook.boxplot_stats(data,whis=[0,100],labels=labels)
# Plot panel
ax = plt.subplot(nsy, nsx, ivariable+int(ivariable/3)+1)
ax.set_box_aspect(1)
# CMIP6 ensemble
ax.bxp(cmip6_stats)
# E3SMv1
x = np.arange(nseasons)+0.8
iE3SMv1 = cmip6['models'].index('E3SM-1-0')
ax.scatter(x,cmip6['data'][iE3SMv1,ivariable,:],color='b',marker='>',label="E3SMv1 (0101)")
# E3SMv2 (coupled)
x = np.arange(nseasons)+1.2
ax.scatter(x,E3SMv2['data'][0,ivariable,:],color='r',marker='<',label="E3SMv2 (0101)")
## E3SMv2 (amip)
#x = np.arange(nseasons)+1.4
#ax.scatter(x,E3SMv2_amip['data'][0,ivariable,:],color='r',marker="^")
## E3SMv2 (composite base)
#x = np.arange(nseasons)+0.75
#ax.scatter(x,E3SMv2_composite_base['data'][0,ivariable,:],color='r',marker="*",label="Composite base")
## E3SMv2 (composite best)
#x = np.arange(nseasons)+0.75
#ax.scatter(x,E3SMv2_composite_best['data'][0,ivariable,:],color='GoldenRod',marker="*",label='Composite best')
# Customize plot
ax.set_title('('+chr(97+ivariable)+')', loc="left")
ax.set_title(variables[ivariable]['name']+' ('+variables[ivariable]['units']+')', loc="right")
ax.set_xlim([0.4,nseasons+0.9])
fig.subplots_adjust(wspace=0.3,hspace=0.3)
# Legend base on last subplot
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc=(0.76,0.8))
fig.savefig("cmip6.pdf",bbox_inches='tight')
# -----------------------------------------------------------------------------
# Create plot: second CMIP6, E3SMv2 and composite configurations
fig = plt.figure(figsize=[12,9])
nsx = 4
nsy = 3
nmodels = len(cmip6['models'])
nvariables = len(variables)
nseasons = len(seasons)
for ivariable in range(nvariables):
# CMIP6 data for box and whiskers
data = []
labels = []
for iseason in range(nseasons):
# Identify model with lowest RMSE
ibest = ma.argmin( cmip6['data'][:,ivariable,iseason].compressed() )
print("Best model %s %s %s" % (variables[ivariable]['name'],seasons[iseason],cmip6['models'][ibest]))
# Remove missing data using 'compressed()' function
data.append( cmip6['data'][:,ivariable,iseason].compressed() )
labels.append(seasons[iseason])
cmip6_stats = cbook.boxplot_stats(data,whis=[0,100],labels=labels)
# Plot panel
ax = plt.subplot(nsy, nsx, ivariable+int(ivariable/3)+1)
ax.set_box_aspect(1)
# CMIP6 ensemble
ax.bxp(cmip6_stats)
# E3SMv2 (coupled)
x = np.arange(nseasons)+1.2
ax.scatter(x,E3SMv2['data'][0,ivariable,:],color='r',marker='<',label="E3SMv2 (0101)")
# E3SMv2 (composite base)
x = np.arange(nseasons)+0.8
ax.scatter(x,E3SMv2_composite_base['data'][0,ivariable,:],color='r',marker="*",label="Composite base")
# E3SMv2 (composite best)
x = np.arange(nseasons)+0.8
ax.scatter(x,E3SMv2_composite_best['data'][0,ivariable,:],color='GoldenRod',marker="*",label='Composite best')
# Customize plot
ax.set_title('('+chr(97+ivariable)+')', loc="left")
ax.set_title(variables[ivariable]['name']+' ('+variables[ivariable]['units']+')', loc="right")
ax.set_xlim([0.4,nseasons+0.9])
fig.subplots_adjust(wspace=0.3,hspace=0.3)
# Legend base on last subplot
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc=(0.76,0.8))
fig.savefig("cmip6_composite.pdf",bbox_inches='tight')
| [
2,
12327,
284,
1620,
7208,
351,
16477,
4061,
21,
4981,
198,
198,
2,
1550,
399,
4877,
34,
198,
37811,
198,
10459,
1220,
20541,
14,
11321,
14,
43776,
14,
68,
18,
5796,
14,
272,
330,
13533,
62,
268,
14259,
14,
2220,
62,
42861,
62,
68... | 2.223555 | 3,167 |
import numpy as np
def herding_selection(x, m, mean_=None):
"""
Source: https://github.com/PatrickZH/End-to-End-Incremental-Learning/blob/39d6f4e594e805a713aa7a1deedbcb03d1f2c9cc/utils.py#L176
Parameters
----------
x: the features, n * dimension
m: the number of selected exemplars
Returns
----------
pos_s: the position of selected exemplars
"""
pos_s = []
comb = 0
mu = np.mean(x, axis=0, keepdims=False) if mean_ is None else mean_
for k in range(m):
det = mu * (k + 1) - comb
dist = np.zeros(shape=(np.shape(x)[0]))
for i in range(np.shape(x)[0]):
if i in pos_s:
dist[i] = np.inf
else:
dist[i] = np.linalg.norm(det - x[i])
pos = np.argmin(dist)
pos_s.append(pos)
comb += x[pos]
return pos_s | [
11748,
299,
32152,
355,
45941,
198,
198,
4299,
607,
12083,
62,
49283,
7,
87,
11,
285,
11,
1612,
62,
28,
14202,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
220,
220,
220,
220,
8090,
25,
3740,
1378,
12567,
13,
785,
14,
3271... | 1.945534 | 459 |
import sqlalchemy
| [
11748,
44161,
282,
26599,
628,
198
] | 3.333333 | 6 |
# coding: utf-8
from __future__ import unicode_literals
import re
from .commonwebdriver import SeleniumInfoExtractor
from ..utils import (
ExtractorError,
sanitize_filename,
)
import hashlib
import sys
import traceback
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
import traceback
from ratelimit import limits, sleep_and_retry
from backoff import constant, on_exception
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
302,
198,
198,
6738,
764,
11321,
12384,
26230,
1330,
15300,
47477,
12360,
11627,
40450,
198,
6738,
11485,
26791,
1330,
357... | 2.868263 | 167 |
# Definition of a class with some basic behaviour
class Person:
""" An example class to hold a persons name and age"""
p3 = Person('Adam', 21)
print(p3)
p3.birthday()
print(p3)
| [
2,
30396,
286,
257,
1398,
351,
617,
4096,
9172,
198,
198,
4871,
7755,
25,
198,
197,
37811,
1052,
1672,
1398,
284,
1745,
257,
6506,
1438,
290,
2479,
37811,
628,
198,
79,
18,
796,
7755,
10786,
23159,
3256,
2310,
8,
198,
4798,
7,
79,
... | 3.067797 | 59 |
import copy, json, random
import numpy as np
from corerl.core import ScheduledParameter
from corerl.function import KernelRepresentation
import pdb
import pickle
import csv
import matplotlib.pyplot as plt
from enum import Enum
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
| [
11748,
4866,
11,
33918,
11,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4755,
45895,
13,
7295,
1330,
27774,
6309,
36301,
198,
6738,
4755,
45895,
13,
8818,
1330,
32169,
40171,
341,
198,
11748,
279,
9945,
198,
11748,
2298,
293,
1... | 3.51 | 100 |
""""@package config
Contains all config files necessary for simulator
Rignumber body configuration data
"""
import copy
import time
import inspect
from config.protobuf import *
import math
IS_ACCURATE_TIMER = False
U_X = np.array([1, 0, 0])
U_Y = np.array([0, 1, 0])
U_Z = np.array([0, 0, 1])
| [
15931,
15931,
31,
26495,
4566,
198,
4264,
1299,
477,
4566,
3696,
3306,
329,
35375,
198,
49,
570,
4494,
1767,
8398,
1366,
198,
198,
37811,
198,
11748,
4866,
198,
11748,
640,
198,
11748,
10104,
198,
6738,
4566,
13,
11235,
672,
3046,
1330,... | 2.816514 | 109 |
from django.utils.translation import gettext_lazy as _
CHECK_REJECTION_CATEGORY_TEXT_MAPPING = {
'fiu_investigation_id': _('Associated FIU investigation'),
'intelligence_report_id': _('Associated intelligence report (IR)'),
'other_reason': _('Other reason'),
}
CHECK_REJECTION_CATEGORY_BOOLEAN_MAPPING = {
'payment_source_paying_multiple_prisoners': _('Payment source is paying multiple prisoners'),
'payment_source_multiple_cards': _('Payment source is using multiple cards'),
'payment_source_linked_other_prisoners': _('Payment source is linked to other prisoner/s'),
'payment_source_known_email': _('Payment source is using a known email'),
'payment_source_unidentified': _('Payment source is unidentified'),
'prisoner_multiple_payments_payment_sources': _('Prisoner has multiple payments or payment sources')
}
CHECK_DETAIL_RENDERED_MAPPING = dict(
tuple(CHECK_REJECTION_CATEGORY_TEXT_MAPPING.items()) + tuple(CHECK_REJECTION_CATEGORY_BOOLEAN_MAPPING.items())
)
CHECK_DETAIL_FORM_MAPPING = {
'decision_reason': _('Give further details (optional)'),
'auto_accept_reason': _('Give reason for automatically accepting'),
'rejection_reasons': dict(
tuple(CHECK_REJECTION_CATEGORY_TEXT_MAPPING.items()) + tuple(CHECK_REJECTION_CATEGORY_BOOLEAN_MAPPING.items())
)
}
# This is as custom-defined exception within the API service that we match against
CHECK_AUTO_ACCEPT_UNIQUE_CONSTRAINT_ERROR = \
'An existing AutoAcceptRule is present for this DebitCardSenderDetails/PrisonerProfile pair'
SECURITY_FORMS_DEFAULT_PAGE_SIZE = 20
| [
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
651,
5239,
62,
75,
12582,
355,
4808,
628,
198,
50084,
62,
2200,
23680,
2849,
62,
34,
6158,
38,
15513,
62,
32541,
62,
44,
24805,
2751,
796,
1391,
198,
220,
220,
220,
705,
12463,
84,
62,... | 2.903636 | 550 |
"""
--- Day 19: A Series of Tubes ---
Somehow, a network packet got lost and ended up here. It's trying to follow a routing diagram (your puzzle input), but
it's confused about where to go.
Its starting point is just off the top of the diagram. Lines (drawn with |, -, and +) show the path it needs to take,
starting by going down onto the only line connected to the top of the diagram. It needs to follow this path until it
reaches the end (located somewhere within the diagram) and stop there.
Sometimes, the lines cross over each other; in these cases, it needs to continue going the same direction, and only turn
left or right when there's no other option. In addition, someone has left letters on the line; these also don't change
its direction, but it can use them to keep track of where it's been. For example:
|
| +--+
A | C
F---|----E|--+
| | | D
+B-+ +--+
Given this diagram, the packet needs to take the following path:
* Starting at the only line touching the top of the diagram, it must go down, pass through A, and continue onward to the
first +.
* Travel right, up, and right, passing through B in the process.
* Continue down (collecting C), right, and up (collecting D).
* Finally, go all the way left through E and stopping at F.
Following the path to the end, the letters it sees on its path are ABCDEF.
The little packet looks up at you, hoping you can help it find the way. What letters will it see (in the order it would
see them) if it follows the path? (The routing diagram is very wide; make sure you view it without line wrapping.)
Your puzzle answer was LIWQYKMRP.
--- Part Two ---
The packet is curious how many steps it needs to go.
For example, using the same routing diagram from the example above...
|
| +--+
A | C
F---|--|-E---+
| | | D
+B-+ +--+
...the packet would go:
* 6 steps down (including the first line at the top of the diagram).
* 3 steps right.
* 4 steps up.
* 3 steps right.
* 4 steps down.
* 3 steps right.
* 2 steps up.
* 13 steps left (including the F it stops on).
This would result in a total of 38 steps.
How many steps does the packet need to go?
Your puzzle answer was 16764.
Both parts of this puzzle are complete! They provide two gold stars: **
"""
from collections import deque, namedtuple
import re
Ctx = namedtuple("Ctx", "x, y, previous, steps, letters, visited")
if __name__ == "__main__":
with open("19_a_series_of_tubes.txt") as file:
puzzle = file.readlines()
path, steps = walker(puzzle, (0, 89))
print(f"part 1: {path}")
print(f"part 2: {steps}")
| [
37811,
198,
6329,
3596,
678,
25,
317,
7171,
286,
309,
29080,
11420,
198,
198,
4366,
4919,
11,
257,
3127,
19638,
1392,
2626,
290,
4444,
510,
994,
13,
632,
338,
2111,
284,
1061,
257,
28166,
16362,
357,
14108,
15027,
5128,
828,
475,
198,... | 3.219927 | 823 |
import os
import tsl.global_scope
from tsl.global_scope import *
data = LazyLoader('data', globals(), 'tsl.data')
datasets = LazyLoader('datasets', globals(), 'tsl.datasets')
nn = LazyLoader('nn', globals(), 'tsl.nn')
predictors = LazyLoader('predictors', globals(), 'tsl.predictors')
imputers = LazyLoader('imputers', globals(), 'tsl.imputers')
__version__ = '0.1.0'
epsilon = 5e-8
config = Config()
config_file = os.path.join(config.curr_dir, 'tsl_config.yaml')
if os.path.exists(config_file):
config.load_config_file(config_file)
__all__ = [
'__version__',
'config',
'epsilon',
'logger',
'tsl',
'data',
'datasets',
'nn',
'predictors',
'imputers'
]
| [
11748,
28686,
198,
198,
11748,
256,
6649,
13,
20541,
62,
29982,
198,
6738,
256,
6649,
13,
20541,
62,
29982,
1330,
1635,
198,
198,
7890,
796,
406,
12582,
17401,
10786,
7890,
3256,
15095,
874,
22784,
705,
912,
75,
13,
7890,
11537,
198,
... | 2.313531 | 303 |
from .generated.common_model_pb2 import (
CustomerNumber,
MediaType,
)
from .generated.messaging_model_pb2 import (
OutboundMessage,
VoiceCallAction,
RecordSessionCallAction,
RejectCallAction,
PromptMessageReplyAction,
PromptMessageMenuItemBody,
TextToSpeechVoice,
MessagingChannel
)
def get_valid_keys(target, prefix):
"""Lists the valid keys to be used on specific proto"""
keys = map(lambda key: key.replace(f'{prefix}_', ''), target.keys())
return list(filter(lambda item: item != 'UNSPECIFIED', list(keys)))
def get_enum_value(target, key, prefix):
"""Used to get the provider given the enum"""
try:
return target.Value(f"{prefix}_{key.upper()}")
except Exception:
raise RuntimeError(f"Invalid key {key}")
def get_enum_string(target, value, prefix):
"""Used to get the provider value given the enum"""
try:
keys = target.keys()
if value in keys:
return value.replace(f"{prefix}_", "")
raise ValueError
except Exception:
raise RuntimeError(f"Invalid value {value} for {target}")
| [
6738,
764,
27568,
13,
11321,
62,
19849,
62,
40842,
17,
1330,
357,
198,
220,
220,
220,
22092,
15057,
11,
198,
220,
220,
220,
6343,
6030,
11,
198,
8,
198,
6738,
764,
27568,
13,
37348,
3039,
62,
19849,
62,
40842,
17,
1330,
357,
198,
... | 2.715663 | 415 |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image transformation.
"""
import math
import numpy as np
import cv2
from mindarmour.natural_robustness.transform.image.natural_perturb import _NaturalPerturb
from mindarmour.utils._check_param import check_param_multi_types, check_param_type, check_value_non_negative
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'Image Transformation'
class Translate(_NaturalPerturb):
"""
Translate an image.
Args:
x_bias (Union[int, float]): X-direction translation, x = x + x_bias*image_width. Suggested value range
in [-0.1, 0.1].
y_bias (Union[int, float]): Y-direction translation, y = y + y_bias*image_length. Suggested value range
in [-0.1, 0.1].
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Example:
>>> img = cv2.imread('1.png')
>>> img = np.array(img)
>>> x_bias = 0.1
>>> y_bias = 0.1
>>> trans = Translate(x_bias, y_bias)
>>> dst = trans(img)
"""
def __call__(self, image):
"""
Transform the image.
Args:
image (numpy.ndarray): Original image to be transformed.
Returns:
numpy.ndarray, transformed image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
h, w = image.shape[:2]
matrix = np.array([[1, 0, self.x_bias * w], [0, 1, self.y_bias * h]], dtype=np.float)
new_img = cv2.warpAffine(image, matrix, (w, h))
new_img = self._original_format(new_img, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
class Scale(_NaturalPerturb):
"""
Scale an image in the middle.
Args:
factor_x (Union[float, int]): Rescale in X-direction, x=factor_x*x. Suggested value range in [0.5, 1] and
abs(factor_y - factor_x) < 0.5.
factor_y (Union[float, int]): Rescale in Y-direction, y=factor_y*y. Suggested value range in [0.5, 1] and
abs(factor_y - factor_x) < 0.5.
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Example:
>>> img = cv2.imread('1.png')
>>> img = np.array(img)
>>> factor_x = 0.7
>>> factor_y = 0.6
>>> trans = Scale(factor_x, factor_y)
>>> dst = trans(img)
"""
def __call__(self, image):
"""
Transform the image.
Args:
image (numpy.ndarray): Original image to be transformed.
Returns:
numpy.ndarray, transformed image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
h, w = image.shape[:2]
matrix = np.array([[self.factor_x, 0, 0], [0, self.factor_y, 0]], dtype=np.float)
new_img = cv2.warpAffine(image, matrix, (w, h))
new_img = self._original_format(new_img, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
class Shear(_NaturalPerturb):
"""
Shear an image, for each pixel (x, y) in the sheared image, the new value is taken from a position
(x+factor_x*y, factor_y*x+y) in the origin image. Then the sheared image will be rescaled to fit original size.
Args:
factor (Union[float, int]): Shear rate in shear direction. Suggested value range in [0.05, 0.5].
direction (str): Direction of deformation. Optional value is 'vertical' or 'horizontal'.
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Example:
>>> img = cv2.imread('1.png')
>>> img = np.array(img)
>>> factor = 0.2
>>> trans = Shear(factor, direction='horizontal')
>>> dst = trans(img)
"""
def __call__(self, image):
"""
Transform the image.
Args:
image (numpy.ndarray): Original image to be transformed.
Returns:
numpy.ndarray, transformed image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
h, w = image.shape[:2]
if self.direction == 'horizontal':
matrix = np.array([[1, self.factor, 0], [0, 1, 0]], dtype=np.float)
nw = int(w + self.factor * h)
nh = h
else:
matrix = np.array([[1, 0, 0], [self.factor, 1, 0]], dtype=np.float)
nw = w
nh = int(h + self.factor * w)
new_img = cv2.warpAffine(image, matrix, (nw, nh))
new_img = cv2.resize(new_img, (w, h))
new_img = self._original_format(new_img, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
class Rotate(_NaturalPerturb):
"""
Rotate an image of counter clockwise around its center.
Args:
angle (Union[float, int]): Degrees of counter clockwise. Suggested value range in [-60, 60].
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Example:
>>> img = cv2.imread('1.png')
>>> img = np.array(img)
>>> angle = 20
>>> trans = Rotate(angle)
>>> dst = trans(img)
"""
def __call__(self, image):
"""
Transform the image.
Args:
image (numpy.ndarray): Original image to be transformed.
Returns:
numpy.ndarray, rotated image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
h, w = image.shape[:2]
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, -self.angle, 1.0)
cos = np.abs(matrix[0, 0])
sin = np.abs(matrix[0, 1])
# Calculate new edge after rotated
nw = int((h * sin) + (w * cos))
nh = int((h * cos) + (w * sin))
# Adjust move distance of rotate matrix.
matrix[0, 2] += (nw / 2) - center[0]
matrix[1, 2] += (nh / 2) - center[1]
rotate = cv2.warpAffine(image, matrix, (nw, nh))
rotate = cv2.resize(rotate, (w, h))
new_img = self._original_format(rotate, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
class Perspective(_NaturalPerturb):
"""
Perform perspective transformation on a given picture.
Args:
ori_pos (list): Four points in original image.
dst_pos (list): The point coordinates of the 4 points in ori_pos after perspective transformation.
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Example:
>>> img = cv2.imread('1.png')
>>> img = np.array(img)
>>> ori_pos = [[0, 0], [0, 800], [800, 0], [800, 800]]
>>> dst_pos = [[50, 0], [0, 800], [780, 0], [800, 800]]
>>> trans = Perspective(ori_pos, dst_pos)
>>> dst = trans(img)
"""
def __call__(self, image):
"""
Transform the image.
Args:
image (numpy.ndarray): Original image to be transformed.
Returns:
numpy.ndarray, transformed image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
h, w = image.shape[:2]
if self.auto_param:
self._set_auto_param(w, h)
matrix = cv2.getPerspectiveTransform(self.ori_pos, self.dst_pos)
new_img = cv2.warpPerspective(image, matrix, (w, h))
new_img = self._original_format(new_img, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
class Curve(_NaturalPerturb):
"""
Curve picture using sin method.
Args:
curves (union[float, int]): Divide width to curves of `2*math.pi`, which means how many curve cycles. Suggested
value range in [0.1. 5].
depth (union[float, int]): Amplitude of sin method. Suggested value not exceed 1/10 of the length of the
picture.
mode (str): Direction of deformation. Optional value is 'vertical' or 'horizontal'.
auto_param (bool): Auto selected parameters. Selected parameters will preserve semantics of image.
Examples:
>>> img = cv2.imread('x.png')
>>> curves =1
>>> depth = 10
>>> trans = Curve(curves, depth, mode='vertical')
>>> img_new = trans(img)
"""
def __call__(self, image):
"""
Curve picture using sin method.
Args:
image (numpy.ndarray): Original image.
Returns:
numpy.ndarray, curved image.
"""
ori_dtype = image.dtype
_, chw, normalized, gray3dim, image = self._check(image)
shape = image.shape
height, width = shape[:2]
if self.mode == 'vertical':
if len(shape) == 3:
image = np.transpose(image, [1, 0, 2])
else:
image = np.transpose(image, [1, 0])
src_x = np.zeros((height, width), np.float32)
src_y = np.zeros((height, width), np.float32)
for y in range(height):
for x in range(width):
src_x[y, x] = x
src_y[y, x] = y + self.depth * math.sin(x / (width / self.curves / 2 / math.pi))
img_new = cv2.remap(image, src_x, src_y, cv2.INTER_LINEAR)
if self.mode == 'vertical':
if len(shape) == 3:
img_new = np.transpose(img_new, [1, 0, 2])
else:
img_new = np.transpose(image, [1, 0])
new_img = self._original_format(img_new, chw, normalized, gray3dim)
return new_img.astype(ori_dtype)
| [
2,
15069,
33160,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 2.267774 | 4,515 |
import copy
from random import randint, seed
from typing import Any, Dict, Iterable, List
from adagio.instances import WorkflowContext, WorkflowResultCache
from fugue.collections.partition import PartitionSpec
from fugue.dataframe import DataFrame
from fugue.dataframe.array_dataframe import ArrayDataFrame
from fugue.dataframe.utils import _df_eq as df_eq
from fugue.exceptions import FugueWorkflowError
from fugue.execution import NativeExecutionEngine
from fugue.extensions.transformer.convert import transformer
from fugue.workflow.workflow import _FugueInteractiveWorkflow, FugueWorkflow
from fugue.workflow._workflow_context import (_FugueInteractiveWorkflowContext,
FugueWorkflowContext)
from pytest import raises
from triad.exceptions import InvalidOperationError
from triad.collections.schema import Schema
@transformer("*,b:int")
# schema: a:int
| [
11748,
4866,
198,
6738,
4738,
1330,
43720,
600,
11,
9403,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
40806,
540,
11,
7343,
198,
198,
6738,
512,
363,
952,
13,
8625,
1817,
1330,
5521,
11125,
21947,
11,
5521,
11125,
23004,
30562,
19... | 3.14433 | 291 |
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial.
# Full text can be found in LICENSE.md
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import copy
import numpy as np
from progress.bar import Bar
import time
import torch
import math
from lib.models.model import create_model, load_model
from lib.utils.image import get_affine_transform, affine_transform
from lib.utils.debugger import Debugger
from lib.utils.pnp.cuboid_pnp_shell import pnp_shell
from lib.utils.tracker import Tracker
from lib.utils.tracker_baseline import Tracker_baseline
from lib.utils.image import draw_umich_gaussian, gaussian_radius, draw_nvidia_gaussian
from sklearn import mixture
import scipy
| [
2,
15069,
357,
66,
8,
33448,
15127,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
770,
670,
318,
11971,
739,
262,
15127,
8090,
6127,
13789,
532,
8504,
12,
36313,
13,
198,
2,
6462,
2420,
460,
307,
1043,
287,
38559,
24290,
13,
9132,
198,
... | 3.559322 | 236 |
#
# Progression of infection within individuals
#
import random
import numpy as np
from collections import defaultdict
import pyEpiabm as pe
from pyEpiabm.core import Parameters, Person
from pyEpiabm.property import InfectionStatus
from pyEpiabm.utility import StateTransitionMatrix, TransitionTimeMatrix
from .abstract_sweep import AbstractSweep
class HostProgressionSweep(AbstractSweep):
"""Class for sweeping through population and updating host infection status
and time to next infection status change.
"""
def __init__(self):
"""Initialise parameters to be used in class methods. State
transition matrix is set where each row of the matrix corresponds
to a current infection status of a person. The columns of that
row then indicate the transition probabilities to the remaining
infection statuses. Number of infection states is set by
taking the size of the InfectionStatus enum. Transition time matrix
is also initialised and associated parameters are called from the
parameters class.
Infectiousness progression defines an array used to scale a person's
infectiousness and which depends on time since the start of the
infection, measured in timesteps (following what is done in Covidsim).
"""
# Instantiate state transition matrix
use_ages = Parameters.instance().use_ages
coefficients = defaultdict(int, Parameters.instance()
.host_progression_lists)
matrix_object = StateTransitionMatrix(coefficients, use_ages)
self.state_transition_matrix = matrix_object.matrix
self.number_of_states = len(InfectionStatus)
assert self.state_transition_matrix.shape == \
(self.number_of_states, self.number_of_states),\
'Matrix dimensions must match number of infection states'
# Instantiate transmission time matrix
time_matrix_object = TransitionTimeMatrix()
self.transition_time_matrix =\
time_matrix_object.create_transition_time_matrix()
# Instantiate parameters to be used in update transition time
# method
self.latent_to_symptom_delay =\
pe.Parameters.instance().latent_to_sympt_delay
# Defining the length of the model time step (in days, can be a
# fraction of day as well).
self.model_time_step = 1 / pe.Parameters.instance().time_steps_per_day
self.delay = np.floor(self.latent_to_symptom_delay /
self.model_time_step)
# Infectiousness progression
# Instantiate parameters to be used in update infectiousness
infectious_profile = pe.Parameters.instance().infectiousness_prof
inf_prof_resolution = len(infectious_profile) - 1
inf_prof_average = np.average(infectious_profile)
infectious_period = pe.Parameters.instance().asympt_infect_period
# Extreme case where model time step would be too small
max_inf_steps = 2550
# Define number of time steps a person is infectious:
num_infectious_ts =\
int(np.ceil(infectious_period / self.model_time_step))
if num_infectious_ts >= max_inf_steps:
raise ValueError('Number of timesteps in infectious period exceeds'
+ ' limit')
# Initialisation
infectious_profile[inf_prof_resolution] = 0
infectiousness_prog = np.zeros(max_inf_steps)
# Fill infectiousness progression array by doing linear interpolation
# of infectious_profile
for i in range(num_infectious_ts):
t = (((i * self.model_time_step) / infectious_period)
* inf_prof_resolution)
# Infectiousness value associated to infectiousness profile:
associated_inf_value = int(np.floor(t))
t -= associated_inf_value
if associated_inf_value < inf_prof_resolution:
infectiousness_prog[i] =\
(infectious_profile[associated_inf_value] * (1 - t)
+ infectious_profile[associated_inf_value + 1] * t)
else: # limit case where we define infectiousness to 0
infectiousness_prog[i] =\
infectious_profile[inf_prof_resolution]
# Scaling
scaling_param = inf_prof_average
for i in range(num_infectious_ts + 1):
infectiousness_prog[i] /= scaling_param
self.infectiousness_progression = infectiousness_prog
@staticmethod
def set_infectiousness(person: Person, time: float):
"""Assigns the initial infectiousness of a person for when they go from
the exposed infection state to the next state, either InfectAsympt,
InfectMild or InfectGP. Also assigns the infection start time and
stores it as an attribute of the person.
Called right after an exposed person has been given its
new infection status in the call method below.
This static method is non private as it is also used by the initial
infected sweep to give new infected individuals an infectiousness.
Parameters
----------
Person : Person
Instance of person class with infection status attributes
time : float
Current simulation time
"""
init_infectiousness = np.random.gamma(1, 1)
if person.infection_status == InfectionStatus.InfectASympt:
infectiousness = (init_infectiousness *
pe.Parameters.instance().asympt_infectiousness)
person.initial_infectiousness = infectiousness
elif (person.infection_status == InfectionStatus.InfectMild or
person.infection_status == InfectionStatus.InfectGP):
infectiousness = (init_infectiousness *
pe.Parameters.instance().sympt_infectiousness)
person.initial_infectiousness = infectiousness
person.infection_start_time = time
if person.infection_start_time < 0:
raise ValueError('The infection start time cannot be negative')
def update_next_infection_status(self, person: Person):
"""Assigns next infection status based on current infection status
and on probabilities of transition to different statuses. Weights
are taken from row in state transition matrix that corresponds to
the person's current infection status. Weights are then used in
random.choices method to select person's next infection status.
Parameters
----------
Person : Person
Instance of person class with infection status attributes
"""
if person.infection_status in [InfectionStatus.Recovered,
InfectionStatus.Dead]:
person.next_infection_status = None
else:
row_index = person.infection_status.name
weights = self.state_transition_matrix.loc[row_index].to_numpy()
weights = [w[person.age_group] if isinstance(w, list) else w
for w in weights]
outcomes = range(1, self.number_of_states + 1)
if len(weights) != len(outcomes):
raise AssertionError('The number of infection statuses must' +
' match the number of transition' +
' probabilities')
next_infection_status_number = random.choices(outcomes, weights)[0]
next_infection_status =\
InfectionStatus(next_infection_status_number)
person.next_infection_status = next_infection_status
def update_time_status_change(self, person: Person, time: float):
"""Calculates transition time as calculated in CovidSim,
and updates the time_of_status_change for the given
Person, given as the time until next infection status
for a person who has a new infection status. If it is expected that
the person will not transition again (for example in Recovered or Dead
statuses), then the time of status change is set to infinity.
Parameters
----------
Person : Person
Instance of Person class with :class:`InfectionStatus` attributes
time : float
Current simulation time
"""
# Defines the transition time. If the person will not transition again,
# the transition time is set to infinity. Else, the transition time is
# defined using the TransitionTimeMatrix class, with the method
# `choose` from the InverseCdf class.
if person.infection_status == InfectionStatus.Susceptible:
raise ValueError("Method should not be used to infect people")
if person.infection_status in [InfectionStatus.Recovered,
InfectionStatus.Dead]:
transition_time = np.inf
else:
row_index = person.infection_status.name
column_index = person.next_infection_status.name
transition_time_icdf_object =\
self.transition_time_matrix.loc[row_index, column_index]
# Checks for susceptible to exposed case
# where transition time is zero
try:
transition_time =\
transition_time_icdf_object.icdf_choose_noexp()
except AttributeError as e:
if "object has no attribute 'icdf_choose_noexp'" in str(e):
transition_time = transition_time_icdf_object
assert isinstance(
transition_time_icdf_object,
(float, int)), \
("Entries of transition time matrix" +
" must either be ICDF" + " objects or numbers")
else:
raise
# Adds delay to transition time for first level symptomatic infection
# statuses (InfectMild or InfectGP), as is done in CovidSim.
if person.infection_status in [InfectionStatus.InfectMild,
InfectionStatus.InfectGP]:
transition_time += HostProgressionSweep().delay
# Assigns the time of status change using current time and transition
# time:
if transition_time < 0:
raise ValueError('New transition time must be larger than' +
' or equal to 0')
person.time_of_status_change = time + transition_time
def _updates_infectiousness(self, person: Person, time: float):
"""Updates infectiousness. Scales using the initial infectiousness
if the person is in an infectious state. Updates the infectiousness to
0 if the person has just been transfered to Recovered or Dead. Doesn't
do anything if the person was already in Recovered, Dead, Susceptible,
or Exposed (ie if the infectiousness of the person was 0).
Parameters
----------
Person : Person
Instance of Person class with :class:`InfectionStatus`,
initial infectiousness, and infection start time attributes
time : float
Current simulation time
"""
# Updates infectiousness with scaling if person is infectious:
if str(person.infection_status).startswith('InfectionStatus.Infect'):
scale_infectiousness = self.infectiousness_progression
time_since_infection = (int((time - person.infection_start_time)
/ self.model_time_step))
person.infectiousness = person.initial_infectiousness *\
scale_infectiousness[time_since_infection]
# Sets infectiousness to 0 if person just became Recovered or Dead, and
# sets its infection start time to None again.
elif person.infectiousness != 0:
if person.infection_status in [InfectionStatus.Recovered,
InfectionStatus.Dead]:
person.infectiousness = 0
person.infection_start_time = None
def __call__(self, time: float):
"""Sweeps through all people in the population, updates their
infection status if it is time and assigns them their next infection
status and the time of their next status change. Also updates their
infectiousness.
Parameters
----------
time : float
Current simulation time
"""
for cell in self._population.cells:
for person in cell.persons:
if person.time_of_status_change is None:
assert person.infection_status \
in [InfectionStatus.Susceptible]
continue # pragma: no cover
while person.time_of_status_change <= time:
person.update_status(person.next_infection_status)
if person.infection_status in \
[InfectionStatus.InfectASympt,
InfectionStatus.InfectMild,
InfectionStatus.InfectGP]:
self.set_infectiousness(person, time)
self.update_next_infection_status(person)
self.update_time_status_change(person, time)
self._updates_infectiousness(person, time)
| [
2,
198,
2,
1041,
32383,
286,
10280,
1626,
3925,
198,
2,
198,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
12972,
13807,
72,
397,
76,
355,
613,
198,
6738,
12972,
13807,
72,... | 2.419487 | 5,614 |
length = int(input("Do"))
width = int(input("Doer"))
area = length*width
perimeter = 2* (length+width)
print "Area: " + str(area)
print "Perimeter:" + str(perimeter)
| [
13664,
796,
493,
7,
15414,
7203,
5211,
48774,
201,
198,
10394,
796,
493,
7,
15414,
7203,
5211,
263,
48774,
201,
198,
20337,
796,
4129,
9,
10394,
201,
198,
525,
16912,
796,
362,
9,
357,
13664,
10,
10394,
8,
201,
198,
4798,
366,
30547... | 2.646154 | 65 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the command line, discovers the appropriate benchmarks, and runs them.
Handles benchmark configuration, but all the logic for
actually running the benchmark is in Benchmark and StoryRunner."""
from telemetry import command_line
# DEPRECATED: Clients should directly import from telemetry.command_line
# TODO(crbug.com/981349): remove when no longer used.
main = command_line.main
| [
2,
15069,
2211,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
37811,
47,... | 3.915493 | 142 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection anchor generator from config."""
from ..utils import multiple_grid_anchor_generator, grid_anchor_generator
def build(anchor_generator_config):
"""Builds an anchor generator based on the config.
Args:
anchor_generator_config: An anchor_generator.proto object containing the
config for the desired anchor generator.
Returns:
Anchor generator based on the config.
Raises:
ValueError: On empty anchor generator proto.
anchor_generator {
ssd_anchor_generator {
num_layers: 6
min_scale: 0.2
max_scale: 0.95
aspect_ratios: 1.0
aspect_ratios: 2.0
aspect_ratios: 0.5
aspect_ratios: 3.0
aspect_ratios: 0.3333
}
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
"""
if anchor_generator_config == 'ssd_anchor_generator':
anchor_strides = None
anchor_offsets = None
num_layers = 6
min_scale = 0.2
max_scale = 0.95
aspect_ratios = (1.0, 2.0, 0.5, 3.0, 0.3333)
return multiple_grid_anchor_generator.create_ssd_anchors(
num_layers=num_layers,
min_scale=min_scale,
max_scale=max_scale,
scales=None,
aspect_ratios=aspect_ratios,
interpolated_scale_aspect_ratio=1.0,
base_anchor_size=None,
anchor_strides=anchor_strides,
anchor_offsets=anchor_offsets,
reduce_boxes_in_lowest_layer=True)
elif anchor_generator_config == "grid_anchor_generator":
scales = [0.25, 0.5, 1.0, 2.0]
aspect_ratios = [0.5, 1.0, 2.0]
height_stride = 16
width_stride = 16
return grid_anchor_generator.GridAnchorGenerator(
scales=[float(scale) for scale in scales],
aspect_ratios=[float(aspect_ratio)
for aspect_ratio
in aspect_ratios],
base_anchor_size=None,
anchor_stride=[height_stride,
width_stride],
anchor_offset=None)
else:
raise ValueError('Empty anchor generator.')
| [
2,
15069,
2177,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
... | 2.239447 | 1,374 |
import asyncio
import math
import time
from statistics import mean
import aiohttp
from mcsniperpy.util import request_manager
from mcsniperpy.util import utils as util
from mcsniperpy.util.logs_manager import Color as color
from mcsniperpy.util.logs_manager import Logger as log
| [
11748,
30351,
952,
198,
11748,
10688,
198,
11748,
640,
198,
6738,
7869,
1330,
1612,
198,
198,
11748,
257,
952,
4023,
198,
6738,
285,
6359,
45554,
9078,
13,
22602,
1330,
2581,
62,
37153,
198,
6738,
285,
6359,
45554,
9078,
13,
22602,
1330... | 3.469136 | 81 |
# Generated by Django 1.11.1 on 2017-10-16 15:16
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
16,
319,
2177,
12,
940,
12,
1433,
1315,
25,
1433,
198,
11748,
42625,
14208,
13,
7295,
13,
12102,
2024,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
198,
6738,
42625... | 3.116667 | 60 |
import os
| [
11748,
28686,
198
] | 3.333333 | 3 |
from flask import Blueprint
auth = Blueprint('auth', __name__, url_prefix='/auth')
from . import rest-api
| [
6738,
42903,
1330,
39932,
198,
18439,
796,
39932,
10786,
18439,
3256,
11593,
3672,
834,
11,
19016,
62,
40290,
11639,
14,
18439,
11537,
198,
198,
6738,
764,
1330,
1334,
12,
15042,
198
] | 3.451613 | 31 |
from openpyxl import Workbook
from openpyxl import load_workbook
# Import necessary style classes
from openpyxl.styles import Font, Color, Alignment, Border, Side, colors
wb = Workbook()
# grab the active worksheet
ws = wb.active
# Data can be assigned directly to cells
ws['A1'] = 42
# Rows can also be appended
ws.append([1, 2, 3])
# Save the file
wb.save('experiments/sample.xlsx')
######### Append a file
# Start by opening the spreadsheet and selecting the main sheet
workbook = load_workbook(filename="experiments/hello.xlsx")
workbook.create_sheet(title="My sheet 2")
sheet = workbook["My sheet 2"]
# Write what you want into a specific cell
sheet["C1"] = "writing ;)"
# Save the spreadsheet
workbook.save(filename="experiments/hello.xlsx")
############ Change Style
# Create a few styles
bold_font = Font(bold=True)
big_blue_text = Font(color=colors.BLUE, size=20)
center_aligned_text = Alignment(horizontal="center")
double_border_side = Side(border_style="double")
square_border = Border(top=double_border_side,
right=double_border_side,
bottom=double_border_side,
left=double_border_side)
sheet = workbook["My sheet"]
# Style some cells!
sheet["A2"].font = bold_font
sheet["A3"].font = big_blue_text
sheet["A5"].border = square_border
# Save the spreadsheet
workbook.save(filename="experiments/hello.xlsx") | [
6738,
1280,
9078,
87,
75,
1330,
5521,
2070,
201,
198,
6738,
1280,
9078,
87,
75,
1330,
3440,
62,
1818,
2070,
201,
198,
2,
17267,
3306,
3918,
6097,
201,
198,
6738,
1280,
9078,
87,
75,
13,
47720,
1330,
24060,
11,
5315,
11,
978,
16747,
... | 2.637024 | 551 |
# Benchmarking Suite
# Copyright 2014-2017 Engineering Ingegneria Informatica S.p.A.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Developed in the ARTIST EU project (www.artist-project.eu) and in the
# CloudPerfect EU project (https://cloudperfect.eu/)
import configparser
import json
import os
import sys
import uuid
from abc import ABC, abstractmethod
from benchsuite.core.model.exception import ControllerConfigurationException
from benchsuite.core.model.execution import ExecutionEnvironmentRequest, ExecutionEnvironment
| [
2,
25187,
4102,
278,
26264,
198,
2,
15069,
1946,
12,
5539,
14044,
554,
469,
70,
1008,
544,
45255,
1512,
64,
311,
13,
79,
13,
32,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
1... | 3.836431 | 269 |
'''
In dieser Datei werden Waffen und Rüstungen definiert.
''' | [
7061,
6,
198,
818,
10564,
263,
7536,
72,
266,
263,
6559,
370,
2001,
268,
3318,
371,
9116,
301,
2150,
268,
2730,
72,
861,
13,
198,
7061,
6
] | 2.296296 | 27 |
# Copyright 2013 OpenStack Foundation. All rights reserved
# Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AgentDeviceDriver(object):
"""Abstract device driver that defines the API required by LBaaS agent."""
@abc.abstractproperty
@abc.abstractproperty
@abc.abstractproperty
@abc.abstractproperty
@abc.abstractproperty
@abc.abstractmethod
def get_name(self):
"""Returns unique name across all LBaaS device drivers."""
pass
@abc.abstractmethod
def deploy_instance(self, loadbalancer):
"""Fully deploys a loadbalancer instance from a given loadbalancer."""
pass
@abc.abstractmethod
def undeploy_instance(self, loadbalancer_id, **kwargs):
"""Fully undeploys the loadbalancer instance."""
pass
@six.add_metaclass(abc.ABCMeta)
| [
2,
15069,
2211,
4946,
25896,
5693,
13,
220,
1439,
2489,
10395,
198,
2,
15069,
1853,
37927,
13200,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
... | 3.010373 | 482 |
import ccxt
import pandas as pd
from itertools import combinations
from twilio.rest import Client
from api import *
avg_pairs = list(combinations(averages, 2))
message = ''
for coin in coins:
ticker, data = fetch_data(coin)
df = pd.DataFrame(
columns=['timestamp','open', 'high', 'low', 'close', 'volume'],
data=data
)
for average in averages:
df.loc[:, average] = df['close'].rolling(window=average).mean()
# df.loc[:, average] = df['close'].ewm(span=average).mean()
daily = df.loc[-24:, averages]
averages_crossed = sum([1 for avg1, avg2 in avg_pairs
if True and False in (daily[avg1] > daily[avg2])])
sentiment = None
if averages_crossed == len(avg_pairs):
last_avg = daily.iloc[-1].pct_change().dropna()
if len(last_avg) == len(last_avg[last_avg < 0]): # BEARISH
sentiment = 'BEARISH'
elif len(last_avg) == len(last_avg[last_avg > 0]): # BULLISH
sentiment = 'BULLISH'
if sentiment is not None:
message += sentiment + ': ' + ticker + '\n'
# Send the text message
client = Client(account_sid, auth_token)
if len(message) > 0:
print(message)
for recipient in recipients:
client.messages.create(
from_=twilio_number,
body=message,
to=recipient
)
| [
11748,
36624,
742,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
6738,
665,
346,
952,
13,
2118,
1330,
20985,
198,
6738,
40391,
1330,
1635,
628,
198,
615,
70,
62,
79,
3468,
796,
1351,
7,
24011,
7... | 2.25 | 608 |
import hail
PRIMATE_AI_VDS_PATHS = {
'37': 'gs://seqr-reference-data/GRCh37/primate_ai/PrimateAI_scores_v0.2.vds',
'38': 'gs://seqr-reference-data/GRCh38/primate_ai/PrimateAI_scores_v0.2.liftover_grch38.vds',
}
| [
11748,
32405,
198,
198,
4805,
3955,
6158,
62,
20185,
62,
53,
5258,
62,
47,
1404,
7998,
796,
1391,
198,
220,
220,
220,
705,
2718,
10354,
705,
14542,
1378,
41068,
81,
12,
35790,
12,
7890,
14,
10761,
1925,
2718,
14,
1050,
1920,
62,
187... | 1.982143 | 112 |
# Reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbone/vision_transformer.py
import math
from typing import Sequence
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.bricks.transformer import FFN, PatchEmbed
from mmcv.cnn.utils.weight_init import constant_init, trunc_normal_init, \
uniform_init, xavier_init
from mmcv.runner.base_module import BaseModule, ModuleList
from mmcv.utils.parrots_wrapper import _BatchNorm
from openmixup.utils import get_root_logger, print_log
from ..utils import MultiheadAttention, MultiheadAttentionWithRPE, to_2tuple, \
resize_pos_embed, build_2d_sincos_position_embedding
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class TransformerEncoderLayer(BaseModule):
"""Implements one encoder layer in Vision Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads
feedforward_channels (int): The hidden dimension for FFNs
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Defaults to 0.
attn_drop_rate (float): The drop out rate for attention output weights.
Defaults to 0.
drop_path_rate (float): Stochastic depth rate. Defaults to 0.
num_fcs (int): The number of fully-connected layers for FFNs.
Defaults to 2.
qkv_bias (bool): enable bias for qkv if True. Defaults to True.
act_cfg (dict): The activation config for FFNs.
Defaluts to ``dict(type='GELU')``.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='LN')``.
init_values (float): The init values of gamma. Defaults to 0.0.
init_cfg (dict, optional): Initialization config dict.
Defaults to None.
"""
@property
@property
@BACKBONES.register_module()
class VisionTransformer(BaseBackbone):
"""Vision Transformer.
A PyTorch implement of : `An Image is Worth 16x16 Words: Transformers
for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_
Args:
arch (str | dict): Vision Transformer architecture. If use string,
choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small'
and 'deit-base'. If use dict, it should have below keys:
- **embed_dims** (int): The dimensions of embedding.
- **num_layers** (int): The number of transformer encoder layers.
- **num_heads** (int): The number of heads in attention modules.
- **feedforward_channels** (int): The hidden dimensions in
feedforward modules.
Default: 'base'
img_size (int | tuple): The expected input image shape. Because we
support dynamic input shape, just set the argument to the most
common input image shape. Defaults to 224.
patch_size (int | tuple): The patch size in patch embedding.
Defaults to 16.
in_channels (int): The num of input channels. Defaults to 3.
out_indices (Sequence | int): Output from which stages.
Defaults to -1, means the last stage.
use_window (bool): Whether to use relative positional encoding in the
self-attention. Defaults to False.
drop_rate (float): Probability of an element to be zeroed.
Defaults to 0.
drop_path_rate (float): stochastic depth rate. Defaults to 0.
qkv_bias (bool): Whether to add bias for qkv in attention modules.
Defaults to True.
norm_cfg (dict): Config dict for normalization layer.
Defaults to ``dict(type='LN')``.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Defaults to True.
with_cls_token (bool): Whether concatenating class token into image
tokens as transformer input. Defaults to True.
output_cls_token (bool): Whether output the cls_token. If set True,
``with_cls_token`` must be True. Defaults to True.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Defaults to "bicubic".
init_values (float, optional): The init value of gamma in
TransformerEncoderLayer.
patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict.
layer_cfgs (Sequence | dict): Configs of each transformer layer in
encoder. Defaults to an empty dict.
"""
arch_zoo = {
**dict.fromkeys(
['s', 'small'], {
'embed_dims': 768,
'num_layers': 8,
'num_heads': 8,
'feedforward_channels': 768 * 3,
}),
**dict.fromkeys(
['b', 'base'], {
'embed_dims': 768,
'num_layers': 12,
'num_heads': 12,
'feedforward_channels': 3072
}),
**dict.fromkeys(
['l', 'large'], {
'embed_dims': 1024,
'num_layers': 24,
'num_heads': 16,
'feedforward_channels': 4096
}),
**dict.fromkeys(
['deit-t', 'deit-tiny'], {
'embed_dims': 192,
'num_layers': 12,
'num_heads': 3,
'feedforward_channels': 192 * 4
}),
**dict.fromkeys(
['deit-s', 'deit-small'], {
'embed_dims': 384,
'num_layers': 12,
'num_heads': 6,
'feedforward_channels': 384 * 4
}),
**dict.fromkeys(
['deit-b', 'deit-base'], {
'embed_dims': 768,
'num_layers': 12,
'num_heads': 12,
'feedforward_channels': 768 * 4
}),
**dict.fromkeys(
['mocov3-s', 'mocov3-small'], {
'embed_dims': 384,
'num_layers': 12,
'num_heads': 12,
'feedforward_channels': 1536,
}),
}
# Some structures have multiple extra tokens, like DeiT.
num_extra_tokens = 1 # cls_token
@property
@staticmethod
def resize_pos_embed(*args, **kwargs):
"""Interface for backward-compatibility."""
return resize_pos_embed(*args, **kwargs)
def _freeze_stages(self):
"""Freeze patch_embed layer, some parameters and stages."""
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
self.cls_token.requires_grad = False
self.pos_embed.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = self.layers[i - 1]
m.eval()
for param in m.parameters():
param.requires_grad = False
if i == (self.num_layers) and self.final_norm:
for param in getattr(self, 'norm1').parameters():
param.requires_grad = False
| [
2,
20984,
25,
3740,
1378,
12567,
13,
785,
14,
9654,
12,
3020,
23912,
14,
3020,
4871,
2649,
14,
21048,
14,
9866,
14,
3020,
565,
82,
14,
27530,
14,
1891,
15992,
14,
10178,
62,
7645,
16354,
13,
9078,
198,
11748,
10688,
198,
6738,
19720... | 2.203106 | 3,348 |
#! /usr/bin/env jython
# -
# Copyright (C) 2011 Sun Ning<classicning@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import re
from xml.etree import ElementTree
from string import Template, whitespace
from . import logger, repos_manager, cache_manager
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
474,
7535,
198,
2,
532,
198,
2,
15069,
357,
34,
8,
2813,
3825,
37400,
27,
49421,
768,
31,
14816,
13,
785,
29,
198,
2,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
... | 3.743516 | 347 |
from check50 import *
import os
| [
6738,
2198,
1120,
1330,
1635,
198,
11748,
28686,
198
] | 3.555556 | 9 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Handle the naming conventions for DM pipeline analysis
"""
from __future__ import absolute_import, division, print_function
import sys
import yaml
from fermipy.jobs.utils import is_null, is_not_null
class NameFactory(object):
""" Helper class to define file names and keys consistently. """
# Input configuration file
ttypeconfig_format = 'config/config_{target_type}.yaml'
# Random target direction configruation
randconfig_format = 'config/random_{target_type}.yaml'
# target keys, these are how we specify various files associated with
# particular targets
# Directory for a particular target
targetdir_format = '{target_type}/{target_name}'
# Directory for simulations for a particular target
sim_targetdir_format = '{target_type}_sim/sim_{sim_name}/{target_name}'
# Targetlist file format
targetfile_format = '{target_type}/{targetlist}'
# Roster file format
sim_targetfile_format = '{target_type}_sim/sim_{sim_name}/{targetlist}'
# Information about a particular target profile
profilefile_format = '{target_type}/{target_name}/profile_{profile}.yaml'
# Information about a particular target profile
sim_profilefile_format = '{target_type}_sim/sim_{sim_name}/{target_name}/profile_{profile}.yaml'
# SED file for a particular target
sedfile_format = '{target_type}/{target_name}/sed_{profile}.fits'
# Simulated SED file for a particular target
sim_sedfile_format = '{target_type}_sim/sim_{sim_name}/{target_name}/sed_{profile}_{seed}.fits'
# Stamp files from scatter gather jobs
stamp_format = 'stamps/{linkname}.stamp'
# Full filepath
fullpath_format = '{basedir}/{localpath}'
def __init__(self, **kwargs):
"""C'tor. Set baseline dictionary used to resolve names
"""
self.base_dict = kwargs.copy()
def update_base_dict(self, yamlfile):
"""Update the values in baseline dictionary used to resolve names
"""
self.base_dict.update(**yaml.safe_load(open(yamlfile)))
def _format_from_dict(self, format_string, **kwargs):
"""Return a formatted file name dictionary components """
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = format_string.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath
def ttypeconfig(self, **kwargs):
"""Return the name of the input configuration file
"""
return self._format_from_dict(NameFactory.ttypeconfig_format, **kwargs)
def randconfig(self, **kwargs):
"""Return the name of the random direction configuration file
"""
return self._format_from_dict(NameFactory.randconfig_format, **kwargs)
def targetdir(self, **kwargs):
"""Return the name for the directory for a particular target
"""
return self._format_from_dict(NameFactory.targetdir_format, **kwargs)
def sim_targetdir(self, **kwargs):
"""Return the name for the directory for a particular target
"""
return self._format_from_dict(NameFactory.sim_targetdir_format, **kwargs)
def targetfile(self, **kwargs):
"""Return the name for the Target list file
"""
return self._format_from_dict(NameFactory.targetfile_format, **kwargs)
def sim_targetfile(self, **kwargs):
"""Return the name for the Target list file for simulation
"""
return self._format_from_dict(NameFactory.sim_targetfile_format, **kwargs)
def profilefile(self, **kwargs):
"""Return the name of the yaml file with information about a partiuclar profile
"""
return self._format_from_dict(NameFactory.profilefile_format, **kwargs)
def sim_profilefile(self, **kwargs):
"""Return the name of the yaml file with information about a partiuclar profile
"""
return self._format_from_dict(NameFactory.sim_profilefile_format, **kwargs)
def sedfile(self, **kwargs):
"""Return the name for the SED file for a particular target
"""
return self._format_from_dict(NameFactory.sedfile_format, **kwargs)
def sim_sedfile(self, **kwargs):
"""Return the name for the simulated SED file for a particular target
"""
if 'seed' not in kwargs:
kwargs['seed'] = 'SEED'
return self._format_from_dict(NameFactory.sim_sedfile_format, **kwargs)
def stamp(self, **kwargs):
"""Return the path for a stamp file for a scatter gather job"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
return NameFactory.stamp_format.format(**kwargs_copy)
def fullpath(self, **kwargs):
"""Return a full path name for a given file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
return NameFactory.fullpath_format.format(**kwargs_copy)
def resolve_targetfile(self, args, require_sim_name=False): # x
"""Get the name of the targetfile based on the job arguments"""
ttype = args.get('ttype')
if is_null(ttype):
sys.stderr.write('Target type must be specified')
return (None, None)
sim = args.get('sim')
if is_null(sim):
if require_sim_name:
sys.stderr.write('Simulation scenario must be specified')
return (None, None)
else:
sim = None
name_keys = dict(target_type=ttype,
targetlist='target_list.yaml',
sim_name=sim,
fullpath=True)
if sim is None:
targetfile = self.targetfile(**name_keys)
else:
targetfile = self.sim_targetfile(**name_keys)
targets_override = args.get('targetfile')
if is_not_null(targets_override):
targetfile = targets_override
return (targetfile, sim)
def resolve_randconfig(self, args):
"""Get the name of the specturm file based on the job arguments"""
ttype = args.get('ttype')
if is_null(ttype):
sys.stderr.write('Target type must be specified')
return None
name_keys = dict(target_type=ttype,
fullpath=True)
randconfig = self.randconfig(**name_keys)
rand_override = args.get('rand_config')
if is_not_null(rand_override):
randconfig = rand_override
return randconfig
| [
2,
49962,
739,
257,
513,
12,
565,
682,
347,
10305,
3918,
5964,
532,
766,
38559,
24290,
13,
81,
301,
198,
37811,
198,
37508,
262,
19264,
21396,
329,
14848,
11523,
3781,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
1... | 2.505453 | 2,659 |
import threading
import json
import re
import time
import slacker
import websocket
import six
from six.moves import _thread, range
class Driver(object):
"""Functional tests driver. It handles the communication with slack api, so that
the tests code can concentrate on higher level logic.
"""
def _websocket_safe_read(self):
"""Returns data if available, otherwise ''. Newlines indicate multiple messages """
data = ''
while True:
try:
data += '{0}\n'.format(self._websocket.recv())
except Exception:
return data.rstrip()
def _start_dm_channel(self):
"""Start a slack direct messages channel with the test bot"""
response = self.slacker.im.open(self.testbot_userid)
self.dm_chan = response.body['channel']['id']
| [
11748,
4704,
278,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
640,
198,
11748,
1017,
10735,
198,
11748,
2639,
5459,
198,
11748,
2237,
198,
6738,
2237,
13,
76,
5241,
1330,
4808,
16663,
11,
2837,
628,
198,
4871,
12434,
7,
15252,
2599,... | 2.680511 | 313 |
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response
from django.contrib.auth.models import User
from django.forms.formsets import formset_factory
from django.template import RequestContext
from vote.models import *
from vote.forms import *
from live.models import *
import datetime, gspread, dateutil.parser
import madhacks.settings
def main_live(request):
'''
Show the screen prompting the user to enter their email address
before they go on with the voting.
'''
currdatetime = datetime.datetime.now()
events = Event.objects.filter(start_date__lt=currdatetime + datetime.timedelta(hours=1)).filter(end_date__gt=currdatetime).order_by('start_date')
mealevent = MealEvent.objects.all()[0]
return render_to_response('live/main.html',
{'events': events,
'mealevent': mealevent,
'form': EntryForm},
RequestContext(request))
def fetch(request):
'''
Populates the DB with events from google spreadsheet
'''
# Login with your Google account
gc = gspread.login(madhacks.settings.DRIVE_LOGIN, madhacks.settings.DRIVE_PASS)
# Open a worksheet from spreadsheet with one shot
wks = gc.open_by_key("1CoJb3Ondc0OS7dOOp7DQkywdS7W7d96D4QodkwCCghg").sheet1
#snag columns from spreadsheet
dates = wks.col_values(1)[1:] # slice away the first row that has the col name
start_times = wks.col_values(2)[1:]
end_times = wks.col_values(3)[1:]
event_names = wks.col_values(5)[1:]
# loop through events, add them as models in the DB
for i in range(len(event_names)):
if event_names[i]:
e = Event(start_date=dateutil.parser.parse(dates[i]+ " "+ start_times[i]),
end_date = dateutil.parser.parse(dates[i]+ " "+ end_times[i]),
title=event_names[i],
description="")
e.save()
currdatetime = datetime.datetime.now()
events = Event.objects.filter(start_date__lt=currdatetime + datetime.timedelta(hours=1)).filter(end_date__gt=currdatetime)
mealevents = MealEvent.objects.filter()
return render_to_response('live/fetch.html',
{'events': events,
'mealevents': mealevents,
'form': EntryForm},
RequestContext(request))
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
7738,
1060,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
673... | 2.272975 | 1,099 |
# -*- coding: utf-8 -*-
"""
© Michael Widrich, Markus Hofmarcher, 2017
"""
import time
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
16224,
3899,
24801,
7527,
11,
46013,
37745,
3876,
2044,
11,
2177,
198,
198,
37811,
198,
11748,
640,
628
] | 2.617647 | 34 |
__all__ = ["JONSWAP_gamma", "gaussian_gamma", "JONSWAP_gamma", "shape_models"]
#import stormrecon as stormrecon
#import tools as tools
#import io as io
#import plotters as plotters
| [
834,
439,
834,
796,
14631,
41,
1340,
17887,
2969,
62,
28483,
2611,
1600,
366,
4908,
31562,
62,
28483,
2611,
1600,
220,
366,
41,
1340,
17887,
2969,
62,
28483,
2611,
1600,
366,
43358,
62,
27530,
8973,
198,
198,
2,
11748,
6388,
260,
1102... | 2.731343 | 67 |
#!/usr/bin/env python3
from collections import defaultdict, deque
if __name__ == '__main__':
print(part_1())
print(part_2())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3601,
7,
3911,
62,
16,
28955,
198,
220... | 2.615385 | 52 |
"""
This will prompt a user to enter a command for a Tello drone.
Tello SDK v1.0.0 commands = "https://dl-cdn.ryzerobotics.com/downloads/tello/0228/Tello+SDK+Readme.pdf"
Tello SDK v1.3.0 commands = "https://terra-1-g.djicdn.com/2d4dce68897a46b19fc717f3576b7c6a/Tello%20编程相关/For%20Tello/Tello%20SDK%20Documentation%20EN_1.3_1122.pdf"
Tello SDK v2.0 User Guide = "https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20SDK%202.0%20User%20Guide.pdf"
Mission Pad User Guide v1.0 = "https://dl-cdn.ryzerobotics.com/downloads/Tello/Tello%20Mission%20Pad%20User%20Guide.pdf"
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_addr = ("192.168.10.1", 8889)
sock.bind(("", 9000))
while True:
try:
msg = input("Enter a command: ")
if not msg:
break
if "end" in msg:
sock.close()
break
msg = msg.encode()
print(msg)
sent = sock.sendto(msg, tello_addr)
response = sock.recv(1024)
print(response)
except Exception as err:
print(err)
sock.close()
break
| [
37811,
198,
1212,
481,
6152,
257,
2836,
284,
3802,
257,
3141,
329,
257,
309,
11109,
12170,
13,
198,
198,
51,
11109,
26144,
410,
16,
13,
15,
13,
15,
9729,
796,
366,
5450,
1378,
25404,
12,
32341,
13,
563,
9107,
672,
23891,
13,
785,
... | 2.069288 | 534 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx_bsl.sketches.MisraGriesSketch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pyarrow as pa
import six.moves.cPickle as pickle # Pybind only supports cPickle for Python2.7
from tfx_bsl.sketches import MisraGriesSketch
from absl.testing import absltest
from absl.testing import parameterized
_NUM_BUCKETS = 128
if __name__ == "__main__":
absltest.main()
| [
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
2,
921,
743,
733... | 3.389439 | 303 |
# coding=utf-8
from . import livechat_channel
from . import reply_about_models
from . import menu_about_models
from . import wx_user
from . import wx_corpuser
from . import wx_autoreply_model
from . import wx_config_model
from . import res_partner
from . import wxuser_uuid
from . import corpuser_uuid
from . import wx_confirm_wizard
from . import mail_message
from . import wx_userodoouser
from . import wx_par_config
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
764,
1330,
2107,
17006,
62,
17620,
198,
6738,
764,
1330,
10971,
62,
10755,
62,
27530,
198,
6738,
764,
1330,
6859,
62,
10755,
62,
27530,
198,
6738,
764,
1330,
266,
87,
62,
7220,
198,
6738,
764,... | 2.978723 | 141 |
"""Config flow for SenseME."""
import ipaddress
import voluptuous as vol
from aiosenseme import async_get_device_by_ip_address, discover_all
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
from .const import CONF_HOST_MANUAL, CONF_INFO, DOMAIN
DISCOVER_TIMEOUT = 5
class SensemeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle SenseME discovery config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self) -> None:
"""Initialize the SenseME config flow."""
self._discovered_devices = None
async def _async_entry_for_device(self, device):
"""Create a config entry for a device."""
await self.async_set_unique_id(device.uuid)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=device.name,
data={CONF_INFO: device.get_device_info},
)
async def async_step_manual(self, user_input=None):
"""Handle manual entry of an ip address."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
try:
ipaddress.ip_address(host)
except ValueError:
errors[CONF_HOST] = "invalid_host"
else:
device = await async_get_device_by_ip_address(host)
if device is not None:
return await self._async_entry_for_device(device)
errors[CONF_HOST] = "cannot_connect"
return self.async_show_form(
step_id="manual",
data_schema=vol.Schema({vol.Required(CONF_HOST): str}),
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# start discovery the first time through
if self._discovered_devices is None:
self._discovered_devices = await discover_all(DISCOVER_TIMEOUT)
current_ids = self._async_current_ids()
device_selection = [
device.name
for device in self._discovered_devices
if device.uuid not in current_ids
]
if not device_selection:
return await self.async_step_manual(user_input=None)
device_selection.append(CONF_HOST_MANUAL)
if user_input is not None:
if user_input[CONF_HOST] == CONF_HOST_MANUAL:
return await self.async_step_manual()
for device in self._discovered_devices:
if device == user_input[CONF_HOST]:
return await self._async_entry_for_device(device)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(CONF_HOST, default=device_selection[0]): vol.In(
device_selection
)
}
),
)
| [
37811,
16934,
5202,
329,
24956,
11682,
526,
15931,
198,
11748,
20966,
21975,
198,
198,
11748,
2322,
37623,
5623,
355,
2322,
198,
6738,
257,
4267,
1072,
1326,
1330,
30351,
62,
1136,
62,
25202,
62,
1525,
62,
541,
62,
21975,
11,
7073,
62,
... | 2.115113 | 1,416 |
import cv2
import sys
print("Hello")
target_path = '/Users/Leonardo/Desktop/logo-coritiba.jpg'
reduce_by = 11
img = cv2.imread(target_path)
shape = img.shape
new0 = int(shape[0]/reduce_by)
new1 = int(shape[1]/reduce_by)
print(f"Orginal Shape = {shape}; New Shape ({new0}, {new1}, {shape[2]})")
try:
redu = cv2.resize(img, dsize=(new1, new0), interpolation=cv2.INTER_CUBIC)
except Exception as e:
print(f'Failed to reduce {target_path} by {reduce_by}')
print(e)
sys.exit(1)
out_path = '/Users/Leonardo/Desktop/logo-coritiba_reduced.jpg'
try:
cv2.imwrite(out_path, redu)
except Exception as e:
print(f'Failed to save reduced verison of {target_path}')
print(e)
sys.exit(1)
print(f"Reduced version of {target_path} by {reduce_by}, generated {out_path}")
print("End") | [
11748,
269,
85,
17,
198,
11748,
25064,
198,
198,
4798,
7203,
15496,
4943,
198,
16793,
62,
6978,
796,
31051,
14490,
14,
36185,
13109,
14,
36881,
14,
6404,
78,
12,
10215,
270,
23718,
13,
9479,
6,
198,
445,
7234,
62,
1525,
796,
1367,
2... | 2.34593 | 344 |
import numpy as np
from lenstronomy.PointSource.Types.base_ps import PSBase, _expand_to_array
__all__ = ['LensedPositions']
class LensedPositions(PSBase):
"""
class of a a lensed point source parameterized as the (multiple) observed image positions
Name within the PointSource module: 'LENSED_POSITION'
parameters: ra_image, dec_image, point_amp
If fixed_magnification=True, than 'source_amp' is a parameter instead of 'point_amp'
"""
# def __init__(self, lens_model=None, fixed_magnification=False, additional_image=False):
# super(LensedPositions, self).__init__(lens_model=lens_model, fixed_magnification=fixed_magnification,
# additional_image=additional_image)
def image_position(self, kwargs_ps, kwargs_lens=None, magnification_limit=None, kwargs_lens_eqn_solver=None):
"""
on-sky image positions
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation
solver
:param magnification_limit: float >0 or None, if float is set and additional images are computed, only those
images will be computed that exceed the lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver
see LensEquationSolver() class for details
:return: image positions in x, y as arrays
"""
if self._additional_image is True:
if kwargs_lens_eqn_solver is None:
kwargs_lens_eqn_solver = {}
ra_source, dec_source = self.source_position(kwargs_ps, kwargs_lens)
ra_image, dec_image = self._solver.image_position_from_source(ra_source, dec_source, kwargs_lens,
magnification_limit=magnification_limit,
**kwargs_lens_eqn_solver)
else:
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
return np.array(ra_image), np.array(dec_image)
def source_position(self, kwargs_ps, kwargs_lens=None):
"""
original source position (prior to lensing)
:param kwargs_ps: point source keyword arguments
:param kwargs_lens: lens model keyword argument list (required to ray-trace back in the source plane)
:return: x, y position (as numpy arrays)
"""
ra_image = kwargs_ps['ra_image']
dec_image = kwargs_ps['dec_image']
x_source, y_source = self._lens_model.ray_shooting(ra_image, dec_image, kwargs_lens)
x_source = np.mean(x_source)
y_source = np.mean(y_source)
return np.array(x_source), np.array(y_source)
def image_amplitude(self, kwargs_ps, kwargs_lens=None, x_pos=None, y_pos=None, magnification_limit=None,
kwargs_lens_eqn_solver=None):
"""
image brightness amplitudes
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when requiring the lens equation
solver
:param x_pos: pre-computed image position (no lens equation solver applied)
:param y_pos: pre-computed image position (no lens equation solver applied)
:param magnification_limit: float >0 or None, if float is set and additional images are computed, only those
images will be computed that exceed the lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical settings for the lens equation solver
see LensEquationSolver() class for details
:return: array of image amplitudes
"""
if self._fixed_magnification:
if x_pos is not None and y_pos is not None:
ra_image, dec_image = x_pos, y_pos
else:
ra_image, dec_image = self.image_position(kwargs_ps, kwargs_lens,
magnification_limit=magnification_limit,
kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)
mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['source_amp'] * np.abs(mag)
else:
point_amp = kwargs_ps['point_amp']
if x_pos is not None:
point_amp = _expand_to_array(point_amp, len(x_pos))
return np.array(point_amp)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
"""
intrinsic brightness amplitude of point source
When brightnesses are defined in magnified on-sky positions, the intrinsic brightness is computed as the mean
in the magnification corrected image position brightnesses.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), used when brightness are defined in
magnified on-sky positions
:return: brightness amplitude (as numpy array)
"""
if self._fixed_magnification:
source_amp = kwargs_ps['source_amp']
else:
ra_image, dec_image = kwargs_ps['ra_image'], kwargs_ps['dec_image']
mag = self._lens_model.magnification(ra_image, dec_image, kwargs_lens)
point_amp = kwargs_ps['point_amp']
source_amp = np.mean(np.array(point_amp) / np.array(np.abs(mag)))
return np.array(source_amp)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
18896,
301,
1313,
9145,
13,
12727,
7416,
13,
31431,
13,
8692,
62,
862,
1330,
6599,
14881,
11,
4808,
11201,
392,
62,
1462,
62,
18747,
198,
198,
834,
439,
834,
796,
37250,
43,
15385,
21604,
1756... | 2.29175 | 2,509 |