content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import numpy as np
from collections import deque
from blist import blist
from pyBreakDown import explanation as e
class Explainer:
"""
Explainer object.
Parameters
----------
clf : np.array
Sklearn predicition model (regression or classification).
data : np.array
Baseline dataset for algorithm.
colnames : np.array
Dataset feature names.
"""
def explain (self, observation, direction, useIntercept = False, baseline=0):
"""
Make explanation for given observation and dataset.
Method works with any sklearn prediction model
Parameters
----------
observation : np.array
Observation to explain.
direction : str
Could be "up" or "down". Decides the direction of algorithm.
useIntercept : bool
If set, baseline argument will be ignored and baseline will be set to intercept.
baseline : float
Baseline of explanation.
Returns
-------
Explanation
Object that contains influences and descriptions of each relevant attribute.
"""
data = np.copy(self.data)
assert direction in ["up","down"]
observation = self._transform_observation(observation) #expand dims from 1D to 2D if necessary
assert len(self.colnames) == observation.shape[1]
if direction=="up":
exp = self._explain_up(observation, baseline, data)
if direction=="down":
exp = self._explain_down(observation, baseline, data)
mean_prediction = np.mean(self.clf.predict(data))
if useIntercept:
baseline = mean_prediction
bcont = 0
else:
bcont = mean_prediction - baseline
exp.add_intercept(bcont)
exp.add_baseline(baseline)
exp.make_final_prediction()
return exp
| [
11748,
299,
32152,
355,
45941,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
698,
396,
1330,
698,
396,
198,
6738,
12972,
31737,
8048,
1330,
7468,
355,
304,
198,
198,
4871,
5905,
10613,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220... | 2.43179 | 799 |
import matplotlib.pyplot as plt
import jax
import jax.numpy as np
import numpy as onp
import jax.random as random
import tigercontrol
from tigercontrol.utils.random import set_key, generate_key
from tigercontrol.environments import Environment
from tigercontrol.controllers import Controller
from jax import grad,jit
from system_id import SystemID
class BPC_SystemID(Controller):
"""
Description: BPC algorithm that simultaneously learns the system dynamics A, B
"""
def initialize(self, n, m, H, K, T_0, delta, x, sys_id=None, initial_lr=1.0):
"""
Description: Initialize the dynamics of the model
Args:
n (float/numpy.ndarray): dimension of the state
m (float/numpy.ndarray): dimension of the controls
H (postive int): history of the controller
K (float/numpy.ndarray): optimal controller
T_0 (postive int): number of steps to do system identification before BPC
delta (float): gradient estimator parameter
x (numpy.ndarray): initial state
sys_id (sys id obj): instance of system id class
initial_lr (float): initial learning rate
"""
self.initialized = True
self._generate_uniform = _generate_uniform
self.eps = self._generate_uniform((H, H, m, n))
self.K = np.zeros((m, n)) ## compute it...
self.x = x
self.u = np.zeros(m)
self.n = n ## dimension of the state x
self.m = m ## dimension of the control u
self.H = H ## how many control matrices
self.T_0 = T_0
self.delta = delta
self.sys_id = SystemID() if sys_id is None else sys_id
self.sys_id.initialize(n, m, K, k=0.1*T_0, T_0=T_0)
## internal parmeters to the class
self.T = 0 ## keep track of iterations, for the learning rate
self.learning_rate = initial_lr
self.M = self._generate_uniform((H, m, n), norm = 1-delta) ## CANNOT BE SET TO ZERO
self.w_past = np.zeros((H, n)) ## this are the previous perturbations, from most recent [0] to latest [HH-1]
def update(self, c_t, x_new):
"""
Description: Updates internal parameters
Args:
c_t (float): loss at time t
x_new (array): next state
Returns:
Estimated optimal action
"""
self.T += 1
# update noise
next_norm = np.sqrt(1 - np.sum(self.eps[1:] **2))
next_eps = self._generate_uniform((self.H, self.m, self.n), norm=next_norm)
self.eps = np.roll(self.eps, -(self.H * self.m * self.n))
self.eps = jax.ops.index_update(self.eps, -1, next_eps)
#set current state
self.x = x_new
# system identification
if self.T < self.T_0:
self.sys_id.update(x_new)
return # no gradient update step during system identification
if self.T == self.T_0:
self.sys_id.update(x_new) # we need one extra step
self.A, self.B = self.sys_id.system_id()
#get new noise
w_new = x_new - np.dot(self.A , self.x) - np.dot(self.B , self.u)
#update past noises
self.w_past = np.roll(self.w_past, -self.n)
self.w_past = jax.ops.index_update(self.w_past, -1, w_new)
# gradient estimate and update
g_t = (self.m * self.n * self.H / self.delta) * c_t * np.sum(self.eps, axis = 0)
lr = self.learning_rate / self.T**0.75 # eta_t = O(t^(-3/4))
self.M = (self.M - lr * g_t)
curr_norm = np.linalg.norm(self.M)
if curr_norm > (1-self.delta):
self.M *= (1-self.delta) / curr_norm
return self.u
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
45941,
198,
11748,
299,
32152,
355,
319,
79,
198,
11748,
474,
897,
13,
25120,
355,
4738,
198,
11748,
256,
328,
... | 2.138826 | 1,772 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .code_change_trend_item import CodeChangeTrendItem
from .language_statistics import LanguageStatistics
from .project_activity_metrics import ProjectActivityMetrics
from .project_language_analytics import ProjectLanguageAnalytics
from .repository_language_analytics import RepositoryLanguageAnalytics
__all__ = [
'CodeChangeTrendItem',
'LanguageStatistics',
'ProjectActivityMetrics',
'ProjectLanguageAnalytics',
'RepositoryLanguageAnalytics',
]
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 5 | 205 |
# A demonstration of a simple optimization problem in TACS: minimize the
# mass of the CRM model subject to a global stress aggregate constraint
# enforcing that the maximum stress at any quadrature point is less than
# a specified upper bound.
from __future__ import print_function
# Import necessary libraries
import numpy as np
from mpi4py import MPI
from tacs import TACS, elements, constitutive, functions
from paropt import ParOpt
class uCRM_VonMisesMassMin(ParOpt.pyParOptProblem):
'''
Mass minimization with a von Mises stress constraint
'''
def getVarsAndBounds(self, x, lb, ub):
'''Set the values of the bounds'''
xvals = np.zeros(self.nvars, TACS.dtype)
self.assembler.getDesignVars(xvals)
x[:] = self.thickness_scale*xvals
xlb = np.zeros(self.nvars, TACS.dtype)
xub = np.zeros(self.nvars, TACS.dtype)
self.assembler.getDesignVarRange(xlb, xub)
lb[:] = self.thickness_scale*xlb
ub[:] = self.thickness_scale*xub
return
def evalObjCon(self, x):
'''Evaluate the objective and constraint'''
# Evaluate the objective and constraints
fail = 0
con = np.zeros(1)
# Set the new design variable values
self.assembler.setDesignVars(x[:]/self.thickness_scale)
# Assemble the Jacobian and factor the matrix
alpha = 1.0
beta = 0.0
gamma = 0.0
self.assembler.zeroVariables()
self.assembler.assembleJacobian(alpha, beta, gamma,
self.res, self.mat)
self.pc.factor()
# Solve the linear system and set the varaibles into TACS
self.gmres.solve(self.forces, self.ans)
self.assembler.setVariables(self.ans)
# Evaluate the function
fvals = self.assembler.evalFunctions(self.funcs)
# Set the mass as the objective
fobj = self.mass_scale*fvals[0]
# Set the KS function (the approximate maximum ratio of the
# von Mises stress to the design stress) so that
# it is less than or equal to 1.0
con[0] = 1.0 - fvals[1] # ~= 1.0 - max (sigma/design) >= 0
return fail, fobj, con
def evalObjConGradient(self, x, g, A):
'''Evaluate the objective and constraint gradient'''
fail = 0
# Evaluate the derivative of the mass and place it in the
# objective gradient
gx = np.zeros(self.nvars, TACS.dtype)
self.assembler.evalDVSens(self.funcs[0], gx)
g[:] = self.mass_scale*gx/self.thickness_scale
# Compute the total derivative w.r.t. material design variables
dfdx = np.zeros(self.nvars, TACS.dtype)
product = np.zeros(self.nvars, TACS.dtype)
# Compute the derivative of the function w.r.t. the state
# variables
self.assembler.evalDVSens(self.funcs[1], dfdx)
self.assembler.evalSVSens(self.funcs[1], self.dfdu)
self.gmres.solve(self.dfdu, self.adjoint)
# Compute the product of the adjoint with the derivative of the
# residuals
self.assembler.evalAdjointResProduct(self.adjoint, product)
# Set the constraint gradient
A[0][:] = -(dfdx - product)/self.thickness_scale
# Write out the solution file every 10 iterations
if self.iter_count % 10 == 0:
self.f5.writeToFile('ucrm_iter%d.f5'%(self.iter_count))
self.iter_count += 1
return fail
# Load structural mesh from BDF file
tacs_comm = MPI.COMM_WORLD
bdf_name = 'CRM_box_2nd.bdf'
crm_opt = uCRM_VonMisesMassMin(tacs_comm, bdf_name)
# Set up the optimization problem
max_lbfgs = 5
opt = ParOpt.pyParOpt(crm_opt, max_lbfgs, ParOpt.BFGS)
opt.setOutputFile('crm_opt.out')
# Set optimization parameters
opt.checkGradients(1e-6)
# Set optimization parameters
opt.setArmijoParam(1e-5)
opt.optimize()
# Get the optimized point
x, z, zw, zl, zu = opt.getOptimizedPoint()
| [
2,
317,
13646,
286,
257,
2829,
23989,
1917,
287,
309,
2246,
50,
25,
17775,
262,
198,
2,
2347,
286,
262,
8740,
44,
2746,
2426,
284,
257,
3298,
5503,
19406,
32315,
198,
2,
26587,
326,
262,
5415,
5503,
379,
597,
15094,
81,
1300,
966,
... | 2.335891 | 1,694 |
import os
import uuid
from mlflow.entities.experiment import Experiment
from mlflow.entities.metric import Metric
from mlflow.entities.param import Param
from mlflow.entities.run import Run
from mlflow.entities.run_data import RunData
from mlflow.entities.run_info import RunInfo
from mlflow.entities.run_status import RunStatus
from mlflow.store.abstract_store import AbstractStore
from mlflow.utils.env import get_env
from mlflow.utils.file_utils import (is_directory, list_subdirs, mkdir, exists,
write_yaml, read_yaml, find, read_file,
list_files, build_path, write_to, append_to)
from mlflow.utils.search_utils import does_run_match_clause
_TRACKING_DIR_ENV_VAR = "MLFLOW_TRACKING_DIR"
| [
11748,
28686,
198,
198,
11748,
334,
27112,
198,
198,
6738,
285,
1652,
9319,
13,
298,
871,
13,
23100,
3681,
1330,
29544,
198,
6738,
285,
1652,
9319,
13,
298,
871,
13,
4164,
1173,
1330,
3395,
1173,
198,
6738,
285,
1652,
9319,
13,
298,
... | 2.438486 | 317 |
from typing import Dict
import torch
import torch.nn as nn
from transformers import AutoConfig, AutoModel
| [
6738,
19720,
1330,
360,
713,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
6121,
364,
1330,
11160,
16934,
11,
11160,
17633,
628
] | 3.857143 | 28 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dataproc.v1",
manifest={
"Component",
"FailureAction",
"RuntimeConfig",
"EnvironmentConfig",
"ExecutionConfig",
"SparkHistoryServerConfig",
"PeripheralsConfig",
"RuntimeInfo",
"GkeClusterConfig",
"KubernetesClusterConfig",
"KubernetesSoftwareConfig",
"GkeNodePoolTarget",
"GkeNodePoolConfig",
},
)
class Component(proto.Enum):
r"""Cluster components that can be activated."""
COMPONENT_UNSPECIFIED = 0
ANACONDA = 5
DOCKER = 13
DRUID = 9
FLINK = 14
HBASE = 11
HIVE_WEBHCAT = 3
JUPYTER = 1
PRESTO = 6
RANGER = 12
SOLR = 10
ZEPPELIN = 4
ZOOKEEPER = 8
class FailureAction(proto.Enum):
r"""Actions in response to failure of a resource associated with
a cluster.
"""
FAILURE_ACTION_UNSPECIFIED = 0
NO_ACTION = 1
DELETE = 2
class RuntimeConfig(proto.Message):
r"""Runtime configuration for a workload.
Attributes:
version (str):
Optional. Version of the batch runtime.
container_image (str):
Optional. Optional custom container image for
the job runtime environment. If not specified, a
default container image will be used.
properties (Sequence[google.cloud.dataproc_v1.types.RuntimeConfig.PropertiesEntry]):
Optional. A mapping of property names to
values, which are used to configure workload
execution.
"""
version = proto.Field(proto.STRING, number=1,)
container_image = proto.Field(proto.STRING, number=2,)
properties = proto.MapField(proto.STRING, proto.STRING, number=3,)
class EnvironmentConfig(proto.Message):
r"""Environment configuration for a workload.
Attributes:
execution_config (google.cloud.dataproc_v1.types.ExecutionConfig):
Optional. Execution configuration for a
workload.
peripherals_config (google.cloud.dataproc_v1.types.PeripheralsConfig):
Optional. Peripherals configuration that
workload has access to.
"""
execution_config = proto.Field(proto.MESSAGE, number=1, message="ExecutionConfig",)
peripherals_config = proto.Field(
proto.MESSAGE, number=2, message="PeripheralsConfig",
)
class ExecutionConfig(proto.Message):
r"""Execution configuration for a workload.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
service_account (str):
Optional. Service account that used to
execute workload.
network_uri (str):
Optional. Network URI to connect workload to.
This field is a member of `oneof`_ ``network``.
subnetwork_uri (str):
Optional. Subnetwork URI to connect workload
to.
This field is a member of `oneof`_ ``network``.
network_tags (Sequence[str]):
Optional. Tags used for network traffic
control.
kms_key (str):
Optional. The Cloud KMS key to use for
encryption.
"""
service_account = proto.Field(proto.STRING, number=2,)
network_uri = proto.Field(proto.STRING, number=4, oneof="network",)
subnetwork_uri = proto.Field(proto.STRING, number=5, oneof="network",)
network_tags = proto.RepeatedField(proto.STRING, number=6,)
kms_key = proto.Field(proto.STRING, number=7,)
class SparkHistoryServerConfig(proto.Message):
r"""Spark History Server configuration for the workload.
Attributes:
dataproc_cluster (str):
Optional. Resource name of an existing Dataproc Cluster to
act as a Spark History Server for the workload.
Example:
- ``projects/[project_id]/regions/[region]/clusters/[cluster_name]``
"""
dataproc_cluster = proto.Field(proto.STRING, number=1,)
class PeripheralsConfig(proto.Message):
r"""Auxiliary services configuration for a workload.
Attributes:
metastore_service (str):
Optional. Resource name of an existing Dataproc Metastore
service.
Example:
- ``projects/[project_id]/locations/[region]/services/[service_id]``
spark_history_server_config (google.cloud.dataproc_v1.types.SparkHistoryServerConfig):
Optional. The Spark History Server
configuration for the workload.
"""
metastore_service = proto.Field(proto.STRING, number=1,)
spark_history_server_config = proto.Field(
proto.MESSAGE, number=2, message="SparkHistoryServerConfig",
)
class RuntimeInfo(proto.Message):
r"""Runtime information about workload execution.
Attributes:
endpoints (Sequence[google.cloud.dataproc_v1.types.RuntimeInfo.EndpointsEntry]):
Output only. Map of remote access endpoints
(such as web interfaces and APIs) to their URIs.
output_uri (str):
Output only. A URI pointing to the location
of the stdout and stderr of the workload.
diagnostic_output_uri (str):
Output only. A URI pointing to the location
of the diagnostics tarball.
"""
endpoints = proto.MapField(proto.STRING, proto.STRING, number=1,)
output_uri = proto.Field(proto.STRING, number=2,)
diagnostic_output_uri = proto.Field(proto.STRING, number=3,)
class GkeClusterConfig(proto.Message):
r"""The cluster's GKE config.
Attributes:
gke_cluster_target (str):
Optional. A target GKE cluster to deploy to. It must be in
the same project and region as the Dataproc cluster (the GKE
cluster can be zonal or regional). Format:
'projects/{project}/locations/{location}/clusters/{cluster_id}'
node_pool_target (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget]):
Optional. GKE NodePools where workloads will
be scheduled. At least one node pool must be
assigned the 'default' role. Each role can be
given to only a single NodePoolTarget. All
NodePools must have the same location settings.
If a nodePoolTarget is not specified, Dataproc
constructs a default nodePoolTarget.
"""
gke_cluster_target = proto.Field(proto.STRING, number=2,)
node_pool_target = proto.RepeatedField(
proto.MESSAGE, number=3, message="GkeNodePoolTarget",
)
class KubernetesClusterConfig(proto.Message):
r"""The configuration for running the Dataproc cluster on
Kubernetes.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
kubernetes_namespace (str):
Optional. A namespace within the Kubernetes
cluster to deploy into. If this namespace does
not exist, it is created. If it exists, Dataproc
verifies that another Dataproc VirtualCluster is
not installed into it. If not specified, the
name of the Dataproc Cluster is used.
gke_cluster_config (google.cloud.dataproc_v1.types.GkeClusterConfig):
Required. The configuration for running the
Dataproc cluster on GKE.
This field is a member of `oneof`_ ``config``.
kubernetes_software_config (google.cloud.dataproc_v1.types.KubernetesSoftwareConfig):
Optional. The software configuration for this
Dataproc cluster running on Kubernetes.
"""
kubernetes_namespace = proto.Field(proto.STRING, number=1,)
gke_cluster_config = proto.Field(
proto.MESSAGE, number=2, oneof="config", message="GkeClusterConfig",
)
kubernetes_software_config = proto.Field(
proto.MESSAGE, number=3, message="KubernetesSoftwareConfig",
)
class KubernetesSoftwareConfig(proto.Message):
r"""The software configuration for this Dataproc cluster running
on Kubernetes.
Attributes:
component_version (Sequence[google.cloud.dataproc_v1.types.KubernetesSoftwareConfig.ComponentVersionEntry]):
The components that should be installed in
this Dataproc cluster. The key must be a string
from the KubernetesComponent enumeration. The
value is the version of the software to be
installed.
At least one entry must be specified.
properties (Sequence[google.cloud.dataproc_v1.types.KubernetesSoftwareConfig.PropertiesEntry]):
The properties to set on daemon config files.
Property keys are specified in ``prefix:property`` format,
for example ``spark:spark.kubernetes.container.image``. The
following are supported prefixes and their mappings:
- spark: ``spark-defaults.conf``
For more information, see `Cluster
properties <https://cloud.google.com/dataproc/docs/concepts/cluster-properties>`__.
"""
component_version = proto.MapField(proto.STRING, proto.STRING, number=1,)
properties = proto.MapField(proto.STRING, proto.STRING, number=2,)
class GkeNodePoolTarget(proto.Message):
r"""GKE NodePools that Dataproc workloads run on.
Attributes:
node_pool (str):
Required. The target GKE NodePool. Format:
'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
roles (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget.Role]):
Required. The types of role for a GKE
NodePool
node_pool_config (google.cloud.dataproc_v1.types.GkeNodePoolConfig):
Optional. The configuration for the GKE
NodePool.
If specified, Dataproc attempts to create a
NodePool with the specified shape. If one with
the same name already exists, it is verified
against all specified fields. If a field
differs, the virtual cluster creation will fail.
If omitted, any NodePool with the specified name
is used. If a NodePool with the specified name
does not exist, Dataproc create a NodePool with
default values.
"""
class Role(proto.Enum):
r"""``Role`` specifies whose tasks will run on the NodePool. The roles
can be specific to workloads. Exactly one GkeNodePoolTarget within
the VirtualCluster must have 'default' role, which is used to run
all workloads that are not associated with a NodePool.
"""
ROLE_UNSPECIFIED = 0
DEFAULT = 1
CONTROLLER = 2
SPARK_DRIVER = 3
SPARK_EXECUTOR = 4
node_pool = proto.Field(proto.STRING, number=1,)
roles = proto.RepeatedField(proto.ENUM, number=2, enum=Role,)
node_pool_config = proto.Field(
proto.MESSAGE, number=3, message="GkeNodePoolConfig",
)
class GkeNodePoolConfig(proto.Message):
r"""The configuration of a GKE NodePool used by a `Dataproc-on-GKE
cluster <https://cloud.google.com/dataproc/docs/concepts/jobs/dataproc-gke#create-a-dataproc-on-gke-cluster>`__.
Attributes:
config (google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodeConfig):
Optional. The node pool configuration.
locations (Sequence[str]):
Optional. The list of Compute Engine
`zones <https://cloud.google.com/compute/docs/zones#available>`__
where NodePool's nodes will be located.
**Note:** Currently, only one zone may be specified.
If a location is not specified during NodePool creation,
Dataproc will choose a location.
autoscaling (google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAutoscalingConfig):
Optional. The autoscaler configuration for
this NodePool. The autoscaler is enabled only
when a valid configuration is present.
"""
class GkeNodeConfig(proto.Message):
r"""Parameters that describe cluster nodes.
Attributes:
machine_type (str):
Optional. The name of a Compute Engine `machine
type <https://cloud.google.com/compute/docs/machine-types>`__.
preemptible (bool):
Optional. Whether the nodes are created as `preemptible VM
instances <https://cloud.google.com/compute/docs/instances/preemptible>`__.
local_ssd_count (int):
Optional. The number of local SSD disks to attach to the
node, which is limited by the maximum number of disks
allowable per zone (see `Adding Local
SSDs <https://cloud.google.com/compute/docs/disks/local-ssd>`__).
accelerators (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig]):
Optional. A list of `hardware
accelerators <https://cloud.google.com/compute/docs/gpus>`__
to attach to each node.
min_cpu_platform (str):
Optional. `Minimum CPU
platform <https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform>`__
to be used by this instance. The instance may be scheduled
on the specified or a newer CPU platform. Specify the
friendly names of CPU platforms, such as "Intel Haswell"\`
or Intel Sandy Bridge".
"""
machine_type = proto.Field(proto.STRING, number=1,)
preemptible = proto.Field(proto.BOOL, number=10,)
local_ssd_count = proto.Field(proto.INT32, number=7,)
accelerators = proto.RepeatedField(
proto.MESSAGE,
number=11,
message="GkeNodePoolConfig.GkeNodePoolAcceleratorConfig",
)
min_cpu_platform = proto.Field(proto.STRING, number=13,)
class GkeNodePoolAcceleratorConfig(proto.Message):
r"""A GkeNodeConfigAcceleratorConfig represents a Hardware
Accelerator request for a NodePool.
Attributes:
accelerator_count (int):
The number of accelerator cards exposed to an
instance.
accelerator_type (str):
The accelerator type resource namename (see
GPUs on Compute Engine).
"""
accelerator_count = proto.Field(proto.INT64, number=1,)
accelerator_type = proto.Field(proto.STRING, number=2,)
class GkeNodePoolAutoscalingConfig(proto.Message):
r"""GkeNodePoolAutoscaling contains information the cluster
autoscaler needs to adjust the size of the node pool to the
current cluster usage.
Attributes:
min_node_count (int):
The minimum number of nodes in the NodePool. Must be >= 0
and <= max_node_count.
max_node_count (int):
The maximum number of nodes in the NodePool. Must be >=
min_node_count. **Note:** Quota must be sufficient to scale
up the cluster.
"""
min_node_count = proto.Field(proto.INT32, number=2,)
max_node_count = proto.Field(proto.INT32, number=3,)
config = proto.Field(proto.MESSAGE, number=2, message=GkeNodeConfig,)
locations = proto.RepeatedField(proto.STRING, number=13,)
autoscaling = proto.Field(
proto.MESSAGE, number=4, message=GkeNodePoolAutoscalingConfig,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
33160,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2... | 2.468559 | 6,695 |
"""
This Package responsible for the logs in the WP-exploiter.
"""
try:
import enum
import asyncio
from datetime import datetime
except BaseException as e:
print("Import Error: ", e)
class LoggerMode(enum.Enum):
"""
:DEVELOP: Will output all the logs to the screen also.
:DEBUG: Will output only Exceptions to the screen also.
:DEPLOY: Will not output nothing to the screen.
"""
DEVELOP = 0
DEBUG = 1
DEPLOY = 2
class Logger(object):
"""
This is a singleton class that handle the logs of the project.
:method save_log: save the log
"""
instance = None
class __Logger:
"""
Private class - for the singleton.
"""
file_lock = asyncio.Lock()
def __init__(self, output_file, log_level=LoggerMode.DEPLOY):
"""
:param output_file: the file will use for output the messages.
:param log_level: LoggerMode
"""
if not Logger.instance:
Logger.instance = Logger.__Logger(output_file, log_level)
else:
Logger.instance.logfile = output_file
def save_log(self, log, error_msg=None):
"""
This method are responsible to ouput the logs to the output_file.
:param log: the log line to store, can be anything that have to_string method.
:param error_msg: a special message will be print to the log file after the original log.
:return: None
"""
try:
with open(self.instance.logfile, "a+") as file:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
file.write("{}: {}\n".format(current_time,log))
file.close()
if self.instance.log_level == LoggerMode.DEVELOP:
print(log)
elif self.instance.log_level == LoggerMode.DEBUG and type(log) is Exception:
print(log)
if error_msg is not None:
self.save_log(error_msg)
except BaseException as e:
print("Error while printing to logs file.")
print(e)
finally:
file.close()
| [
37811,
198,
1212,
15717,
4497,
329,
262,
17259,
287,
262,
28993,
12,
20676,
78,
2676,
13,
198,
37811,
198,
198,
28311,
25,
198,
220,
220,
220,
1330,
33829,
198,
220,
220,
220,
1330,
30351,
952,
198,
220,
220,
220,
422,
4818,
8079,
1... | 2.272919 | 949 |
from config import user_list
from flask import g
from app.helpers.render import render_json, render_error
from app.models.User import User
from app.instances import db
def get_my_profile():
"""
Returns the logged in user's profile or a JSON with `unauthorized: true`
"""
if isinstance(g.user, User):
return render_json(g.user.to_json(own=True))
else:
return render_json({'unauthorized': True})
def get_profile(user_id):
"""
Returns a user's user_id
"""
user = User.query.filter_by(id=user_id).first()
if user is None:
return render_error('user not found'), 400
else:
return render_json(user.to_json(bio=True))
def follow(source_user_id, target_user_id):
"""
Makes 1st param follow the 2nd param
"""
if source_user_id == target_user_id:
return render_error('cannot follow oneself'), 400
source_user = User.query.filter_by(id=source_user_id).first()
target_user = User.query.filter_by(id=target_user_id).first()
if not isinstance(g.user, User):
return render_error('Unauthorized'), 401
if source_user is None or target_user is None:
return render_error('source user or target user doesn\'t exist'), 400
if source_user.id != g.user.id:
return render_error('Forbidden'), 403
source_user.follow(target_user)
db.session.commit()
return render_json({ 'following': True })
def unfollow(source_user_id, target_user_id):
"""
Makes 1st param unfollow the 2nd param
"""
if source_user_id == target_user_id:
return render_error('cannot follow oneself'), 400
source_user = User.query.filter_by(id=source_user_id).first()
target_user = User.query.filter_by(id=target_user_id).first()
if not isinstance(g.user, User):
return render_error('Unauthorized'), 401
if source_user is None or target_user is None:
return render_error('source user or target user doesn\'t exist'), 400
if source_user.id != g.user.id:
return render_error('Forbidden'), 403
source_user.unfollow(target_user)
db.session.commit()
return render_json({ 'following': False })
| [
6738,
4566,
1330,
2836,
62,
4868,
198,
6738,
42903,
1330,
308,
198,
198,
6738,
598,
13,
16794,
364,
13,
13287,
1330,
8543,
62,
17752,
11,
8543,
62,
18224,
198,
6738,
598,
13,
27530,
13,
12982,
1330,
11787,
198,
6738,
598,
13,
8625,
... | 2.67033 | 819 |
"""empty message
Revision ID: 77219ff4194a
Revises: d0605c00c98e
Create Date: 2019-11-18 18:22:03.915031
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "77219ff4194a"
down_revision = "d0605c00c98e"
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
767,
4761,
1129,
487,
19,
22913,
64,
198,
18009,
2696,
25,
288,
15,
32417,
66,
405,
66,
4089,
68,
198,
16447,
7536,
25,
13130,
12,
1157,
12,
1507,
1248,
25,
1828,
25,
3070,
13,
... | 2.56391 | 133 |
import json
from rest_framework.views import APIView
from rest_framework import permissions
from rest_framework.authentication import TokenAuthentication
from django.http import HttpResponse
from django.utils import timezone
from datetime import timedelta
from app.models import Game, Winner
| [
11748,
33918,
198,
6738,
1334,
62,
30604,
13,
33571,
1330,
3486,
3824,
769,
198,
6738,
1334,
62,
30604,
1330,
21627,
198,
6738,
1334,
62,
30604,
13,
41299,
3299,
1330,
29130,
47649,
3299,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29... | 4.138889 | 72 |
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 22:59:10 2020
@author: Christopher Cheng
"""
# The value is the function mapped
areas = {"sq": square, "ci": circle, "eqtri": equal_tri}
# When accessing the dictionary keys, you can pass a parameter if it's a function
n = 2
print(areas["sq"](n))
print(areas["ci"](n))
print(areas["eqtri"](n)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3825,
1737,
838,
2534,
25,
3270,
25,
940,
12131,
198,
198,
31,
9800,
25,
12803,
27692,
198,
37811,
198,
198,
2,
383,
1988,
318,
262,
2163,
2766... | 2.761905 | 126 |
from __future__ import division
from builtins import range
from past.utils import old_div
from collections import Counter
import numpy as np
def propensity(y, A=0.55, B=1.5):
"""
Computes propensity scores based on ys
"""
N, Nl, ml = metrics(y)
C = (np.log(N) - 1) * (B + 1) ** A
weights = []
for i in range(ml):
weights.append(1 + C * (Nl.get(i, 0) + B) ** -A)
return np.array(weights, dtype='float32')
| [
6738,
11593,
37443,
834,
1330,
7297,
201,
198,
6738,
3170,
1040,
1330,
2837,
201,
198,
6738,
1613,
13,
26791,
1330,
1468,
62,
7146,
201,
198,
6738,
17268,
1330,
15034,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
4299,
... | 2.326633 | 199 |
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os
from os import path
from bes.key_value.key_value_list import key_value_list
from bes.common.check import check
from bes.system.log import log
from .vfs_cli_command import vfs_cli_command
from .vfs_list_options import vfs_list_options
| [
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
26,
4235,
25,
29412,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
26,
269,
12,
35487,
12,
28968,
25,
362,
26,
7400,
12,
10394,
25,
362,
532,
9,
12,
198,
198,
11748,
28686,
198,
6738,
2... | 2.772358 | 123 |
from argparse import Namespace
from torchvision.datasets import CIFAR10
import numpy as np
import torch
import utils
DEFAULT_ARGS=Namespace(
dataset='cifar10',
data='data',
cutout=None,
cutout_length=None,
train_portion=0.9,
batch_size=256,
evaluate_batch_size=256,
)
| [
6738,
1822,
29572,
1330,
28531,
10223,
198,
198,
6738,
28034,
10178,
13,
19608,
292,
1039,
1330,
327,
5064,
1503,
940,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
3384,
4487,
198,
198,
7206,
38865,
62,
1503,
14313,
... | 2.471074 | 121 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 2 10:21:16 2021
@author: fabian
"""
import pypsa
import pytest
import os
import pandas as pd
from numpy.testing import assert_array_almost_equal as equal
from pandas import IndexSlice as idx
from pypsa.descriptors import get_activity_mask
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
5979,
220,
362,
838,
25,
2481,
25,
1433,
33448,
198,
198,
31,
9800,
25,
7843,
666,... | 2.675862 | 145 |
import intentparser as ip
__author__ = 'Nonthakon Jitchiranant'
<<<<<<< HEAD
intent = intentparser.intentParser({
'description' : {
"type" : 'FavMusicIntent',
"args" : [(1, "musics_types")],
"keyword" : [
(0, "musics_keyword"),
(1, "musics_types")
]},
'musics_keyword' : ['is', 'are', 'music', 'favourite', 'genre'],
'musics_types' : [
"pop",
"rock",
"jazz",
"country",
"reggae"
]
})
intent.teachWords(["I love Reggae music.", "Rock is my favourite.", "I love Country music genre."])
print(intent.getResult("I love Rock music."))
print(intent.getResult("Jazz is my favourite."))
=======
if __name__ == "__main__":
intent = ip.intentParser({
'description' : {
"type" : 'FavMusicIntent',
"args" : [(ip.OPTIONAL, "musics_types")],
"keyword" : [
(ip.REQUIRE, "musics_keyword"),
(ip.OPTIONAL, "musics_types")
]},
'musics_keyword' : ['is', 'are', 'music', 'favourite', 'genre'],
'musics_types' : [
"pop",
"rock",
"jazz",
"country",
"reggae"
]
})
intent.teachWords(["I love Reggae music.", "Rock is my favourite.", "I love Country music genre."])
print(intent.getResult("I love Rock music."))
print(intent.getResult("Jazz is my favourite."))
>>>>>>> origin/master
| [
11748,
6824,
48610,
355,
20966,
198,
198,
834,
9800,
834,
796,
705,
15419,
400,
461,
261,
449,
2007,
343,
272,
415,
6,
198,
198,
16791,
16791,
16791,
27,
39837,
198,
48536,
796,
6824,
48610,
13,
48536,
46677,
15090,
198,
220,
220,
220... | 1.873536 | 854 |
from cx_Freeze import setup, Executable
import os
additional_packages = []
additional_mods = []
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__)) # directory where this file is in
DIST_DIR = os.path.join(PROJECT_DIR, 'release\\bin') # directory where all the final builds will be found
BUILD_DIR = os.path.join(PROJECT_DIR, 'build') # directory where all work will be done
setup(
name = "dpow_client" ,
version = "0.1" ,
description = " " ,
executables = [Executable("dpow_client.py", targetName="dpow_client.exe")],
options = {'build_exe': {'packages':additional_packages, 'includes': additional_mods, 'build_exe': DIST_DIR}}
) | [
6738,
43213,
62,
11146,
2736,
1330,
9058,
11,
8393,
18187,
201,
198,
11748,
28686,
201,
198,
201,
198,
2860,
1859,
62,
43789,
796,
17635,
201,
198,
2860,
1859,
62,
24122,
796,
17635,
201,
198,
201,
198,
31190,
23680,
62,
34720,
796,
2... | 2.740891 | 247 |
luck_desc = [
{
"charaid": ["1", "2"],
"_name": "凯露",
"type": [
{
"good-luck": 10,
"content": "仅限今天而已!别想多了!!"
},
{
"good-luck": 8,
"content": "宜抽卡﹁十连五彩不是梦﹂"
},
{
"good-luck": 7,
"content": "宜刷本﹁碎片装备一大堆!你是托吧﹂"
},
{
"good-luck": 6,
"content": "还不学习吗,殺すわよっ!"
},
{
"good-luck": -6,
"content": "忌抽卡﹁这个池子不行的,没有3x啊﹂"
},
{
"good-luck": -9,
"content": "ヤバイわよ!钱包又没了"
},
{
"good-luck": 5,
"content": "双场排名飞升"
},
{
"good-luck": -7,
"content": "伊莉亚又暴击了︵对自己"
},
{
"good-luck": -8,
"content": "害学习呢,你被梯了"
},
{
"good-luck": -10,
"content": "遇到困难,睡大觉"
},
{
"good-luck": 24,
"content": "金运不错,所以你能借钱给我吗?"
}
]
},
{
"charaid": ["3", "4"],
"_name": "初音",
"type": [
{
"good-luck": 10,
"content": "Kira☆今天运气绝佳♫"
},
{
"good-luck": 9,
"content": "睡个午觉可以变清爽一点☆"
},
{
"good-luck": 4,
"content": "醒醒你体力满了!"
},
{
"good-luck": -6,
"content": "梦到熬夜赶ddl"
},
{
"good-luck": -9,
"content": "用超能力的话,运气会不会好一点呢"
},
{
"good-luck": 20,
"content": "ハツネちゃんの应援だよ!一切都会好起来的"
},
{
"good-luck": 25,
"content": "叫上栞栞一起学习吧"
}
]
},
{
"charaid": ["5", "6"],
"_name": "杏奈",
"type": [
{
"good-luck": 8,
"content": "氷狂凍獄絶零破!︵宜JJC"
},
{
"good-luck": 7,
"content": "我ら煉獄に舞いし一陣の疾風︵宜现充"
},
{
"good-luck": 4,
"content": "就到此为止了吗…︵宜摸鱼"
},
{
"good-luck": -6,
"content": "冥 空 天 升︵会被演"
},
{
"good-luck": -8,
"content": "会被提起黑历史"
},
{
"good-luck": 21,
"content": "邪王真眼、我会追上你的︵宜表白"
}
]
},
{
"charaid": ["7", "8"],
"_name": "优妮",
"type": [
{
"good-luck": 7,
"content": "宜学习﹁今天去图书馆自习吗﹂"
},
{
"good-luck": 5,
"content": "宜击剑﹁讨论人性与哲学光辉﹂"
},
{
"good-luck": 4,
"content": "论文的事,需要帮忙吗"
},
{
"good-luck": -6,
"content": "会思考怎么拆对手到超时"
},
{
"good-luck": -7,
"content": "会忘记每日十连"
},
{
"good-luck": -9,
"content": "忌表白﹁啊啊,浓咖啡真好喝﹂"
},
{
"good-luck": 26,
"content": "見たまえ同志!これこそわ﹁ユニちゃんズ﹂!"
}
]
},
{
"charaid": ["9", "10"],
"_name": "琪爱儿",
"type": [
{
"good-luck": 9,
"content": "切噜☆♪"
},
{
"good-luck": 6,
"content": "宜水群﹁你就是龙王切噜♪﹂"
},
{
"good-luck": 4,
"content": "末小吉就是说你今天也没什么运气切噜噜"
},
{
"good-luck": -6,
"content": "忌装弱﹁因为先辈你本来就很弱切噜♪﹂"
},
{
"good-luck": -9,
"content": "切噜噜噜噜♪困难本0碎切噜!"
},
{
"good-luck": 27,
"content": "总是萌混过关是交不到朋友的切噜☆"
}
]
},
{
"charaid": ["11", "12"],
"_name": "克萝依",
"type": [
{
"good-luck": 10,
"content": "哦|偶尔也会有这样的日子呢,偶尔就是了"
},
{
"good-luck": 6,
"content": "JJC啊,没几个人打你的"
},
{
"good-luck": 5,
"content": "小心拖延症"
},
{
"good-luck": -7,
"content": "会跨服聊天"
},
{
"good-luck": -8,
"content": "水群会冷场"
},
{
"good-luck": 23,
"content": "工作总是遇到奇怪的人"
}
]
},
{
"charaid": ["13", "14"],
"_name": "静流",
"type": [
{
"good-luck": 9,
"content": "会十连出碎片"
},
{
"good-luck": 8,
"content": "宜熬夜爆肝"
},
{
"good-luck": 5,
"content": "宜出刀﹁希望弟弟暴击﹂"
},
{
"good-luck": -6,
"content": "抽卡会+19"
},
{
"good-luck": -8,
"content": "没什么精神,要姐姐的头槌清醒下吗♡"
},
{
"good-luck": 21,
"content": "偷跑才是王道!︵冬马&静流"
}
]
},
{
"charaid": ["15", "16"],
"_name": "惠理子",
"type": [
{
"good-luck": 10,
"content": "诸事皆宜"
},
{
"good-luck": 7,
"content": "宜现充﹁会遇到命 运 之 人﹂"
},
{
"good-luck": -7,
"content": "你就是记仇碎钻回刺人"
},
{
"good-luck": -8,
"content": "忌dd﹁别的女人就这么吸引你吗﹂"
},
{
"good-luck": -10,
"content": "今天……一直会在你身后的"
},
{
"good-luck": 22,
"content": "爱情也需要调味料︵电脑配件"
}
]
},
{
"charaid": ["17", "18"],
"_name": "优花梨",
"type": [
{
"good-luck": 8,
"content": "宜社畜﹁你就是戒酒后的优花梨﹂"
},
{
"good-luck": 7,
"content": "宜摸鱼﹁我就再喝亿杯﹂"
},
{
"good-luck": 4,
"content": "宜追番﹁守护最好的二刺螈﹂"
},
{
"good-luck": -7,
"content": "忌现充﹁现充爆炸吧﹂"
},
{
"good-luck": -8,
"content": "優花梨,为什么不喝呢~♫"
},
{
"good-luck": 20,
"content": "运气用在了没用的地方"
}
]
},
{
"charaid": ["19", "20"],
"_name": "空花",
"type": [
{
"good-luck": 9,
"content": "宜摸鱼﹁空花的妄想停不下来﹂"
},
{
"good-luck": 7,
"content": "宜瑟图﹁群涩批时不时来暴露xp﹂"
},
{
"good-luck": -6,
"content": "忌社畜﹁会被当成抖M﹂"
},
{
"good-luck": -7,
"content": "忌水群﹁会被放置PLAY﹂"
},
{
"good-luck": -8,
"content": "忌瑟图﹁哦淦老兄你xp好怪啊﹂"
},
{
"good-luck": 26,
"content": "摩多摩多摩多~"
}
]
},
{
"charaid": ["21", "22"],
"_name": "忍",
"type": [
{
"good-luck": 8,
"content": "父亲说这个占卜一点都不准。。。"
},
{
"good-luck": 6,
"content": "宜DD﹁骷髅老爹也想栞栞!﹂"
},
{
"good-luck": 4,
"content": "宜对线﹁你就是送爸人﹂"
},
{
"good-luck": -7,
"content": "忌抽卡﹁父亲的占卜罢了,你身后是有挺多怨灵的﹂"
},
{
"good-luck": -9,
"content": "忌表白﹁会得骷髅老爹真传﹂"
},
{
"good-luck": 20,
"content": "今天你身边也是有各种各样的幽灵在呢"
}
]
},
{
"charaid": ["23", "24"],
"_name": "真步",
"type": [
{
"good-luck": 10,
"content": "みらくるもほりん、くるりんぱ"
},
{
"good-luck": 7,
"content": "宜刷本﹁这个是真步真步N3魔法!﹂"
},
{
"good-luck": 5,
"content": "宜膜佬﹁?这真步河里﹂"
},
{
"good-luck": -6,
"content": "宜复读﹁真步真步教○脑开始﹂"
},
{
"good-luck": -7,
"content": "忌竞技场﹁那么,答案就只有一个了!﹂"
},
{
"good-luck": 20,
"content": "咕噜灵波~"
}
]
},
{
"charaid": ["25", "26"],
"_name": "茜里",
"type": [
{
"good-luck": 6,
"content": "宜出游﹁诶诶要在外面吗﹂"
},
{
"good-luck": 5,
"content": "宜运动﹁这样的、太激烈了嗯﹂"
},
{
"good-luck": 4,
"content": "那里,已经满满的了︵指体力"
},
{
"good-luck": -7,
"content": "明明还是早上就要从后面。。。︵打回前排"
},
{
"good-luck": -9,
"content": "忌瑟图﹁是谁害想着开车呢爪巴﹂"
},
{
"good-luck": 21,
"content": "这种事,就是两个人一起会变舒服︵心情上︶的对吧?"
}
]
},
{
"charaid": ["27", "28"],
"_name": "镜华",
"type": [
{
"good-luck": 7,
"content": "宜水群﹁今天你就是喷水龙王﹂"
},
{
"good-luck": 5,
"content": "宜社畜﹁就算很讨厌,今天我也努力吃了一片胡萝卜﹂"
},
{
"good-luck": 6,
"content": "宜出刀﹁虽然希望可以暴击変態桑而不是王﹂"
},
{
"good-luck": -6,
"content": "忌装弱﹁优妮就算了,你也想叫妈妈?﹂"
},
{
"good-luck": 26,
"content": "全体运会不会是能吃到纸杯蛋糕呢"
},
{
"good-luck": 27,
"content": "変態変態変態変態!"
}
]
},
{
"charaid": ["29", "30"],
"_name": "美美",
"type": [
{
"good-luck": 8,
"content": "天兔霸断剑,要试试吗"
},
{
"good-luck": 7,
"content": "宜出游﹁会遇到小萝莉﹂"
},
{
"good-luck": 4,
"content": "宜刷本﹁お兄ちゃん見ていて、いっぱい箱!﹂"
},
{
"good-luck": -7,
"content": "忌出刀﹁美美的狮鹫长大了也会伤害它吗﹂"
},
{
"good-luck": -8,
"content": "这个月会战摸了"
},
{
"good-luck": 27,
"content": "镜华和未奏希也会吵架,但最后还是会和好的"
}
]
},
{
"charaid": ["31", "32"],
"_name": "未奏希",
"type": [
{
"good-luck": 8,
"content": "宜刷本﹁成功避开攻略烟雾弹﹂"
},
{
"good-luck": 5,
"content": "宜摸鱼﹁每天三刀甜心刀,大家一起睡饱饱﹂"
},
{
"good-luck": 5,
"content": "小 心 熊 孩 子"
},
{
"good-luck": -6,
"content": "忌出刀﹁刚跟镜华吵完架,现在是0暴镜华﹂"
},
{
"good-luck": -8,
"content": "忌作死﹁自己挖的陷阱,自己踩下去﹂"
},
{
"good-luck": 25,
"content": "哎呀,学不会啊!"
}
]
},
{
"charaid": ["33", "34"],
"_name": "怜",
"type": [
{
"good-luck": 7,
"content": "宜钓鱼﹁剑圣之意不在鱼,在乎::﹂"
},
{
"good-luck": 5,
"content": "宜击剑﹁要刺得快!刺得狠!﹂"
},
{
"good-luck": 4,
"content": "宜现充﹁一起特训到晚上吧﹂"
},
{
"good-luck": -8,
"content": "今天你就是老醋坛︵士条怜︶了"
},
{
"good-luck": -10,
"content": "忌摸鱼﹁当你偷偷快乐的时候,优衣正在凝视着你﹂"
},
{
"good-luck": 23,
"content": "遇到困难的时候,男?朋友总会帮你︵雪乃&怜"
}
]
},
{
"charaid": ["35", "36"],
"_name": "优衣",
"type": [
{
"good-luck": 10,
"content": "诸事皆宜!今、今后也请::︵宕机"
},
{
"good-luck": 6,
"content": "宜现充﹁朋?友都会顾及关心你﹂"
},
{
"good-luck": -6,
"content": "说着对不起实际却偷吃的屑都不配被我奶"
},
{
"good-luck": -7,
"content": "忌表白﹁诶诶诶我怎么又被对不起了﹂"
},
{
"good-luck": -10,
"content": "忌抽卡﹁又井了呢,骑士君﹂"
},
{
"good-luck": 21,
"content": "不许说对不起!"
}
]
},
{
"charaid": ["37", "38"],
"_name": "日和莉",
"type": [
{
"good-luck": 9,
"content": "宜竞技场﹁欧拉欧拉欧拉欧拉欧拉!﹂"
},
{
"good-luck": 8,
"content": "日行一善会有好事发生"
},
{
"good-luck": 6,
"content": "宜对线﹁那就对他使用炎拳吧﹂"
},
{
"good-luck": -7,
"content": "忌偏见﹁你以为我是猫,其实我是老虎哒﹂"
},
{
"good-luck": -8,
"content": "忌表白﹁游戏也好,相遇也好,明明是我先来的﹂"
},
{
"good-luck": 27,
"content": "开朗活泼想要保持他人关系,到底哪里出错了︵团子&日和莉"
}
]
},
{
"charaid": ["39", "40"],
"_name": "贪吃佩可",
"type": [
{
"good-luck": 10,
"content": "んま~い!ごはんは命のエネルギー☆"
},
{
"good-luck": 7,
"content": "宜现充﹁おいっす~☆﹂"
},
{
"good-luck": 5,
"content": "宜二刺猿﹁真男人就该?高达﹂"
},
{
"good-luck": -7,
"content": "今日性能降低﹁因为好吃就是高兴嘛﹂"
},
{
"good-luck": -10,
"content": "忌水群﹁群霸瞳篡改群员记忆导致冷场﹂"
},
{
"good-luck": 20,
"content": "ヤバイですね‼"
}
]
},
{
"charaid": ["41", "42"],
"_name": "可可萝",
"type": [
{
"good-luck": 10,
"content": "今天也做得很好、主さま"
},
{
"good-luck": 9,
"content": "宜抽卡﹁爱梅斯大人说今天又会遇到新的女生﹂"
},
{
"good-luck": 8,
"content": "宜刷本﹁已经让密涅瓦偷偷改过数据了﹂"
},
{
"good-luck": -8,
"content": "果然又是主人认识的女生,以后叫わんわんの主さま好了"
},
{
"good-luck": -9,
"content": "忌半途而废﹁第几次转生了,DD﹂"
},
{
"good-luck": 26,
"content": "诸事皆宜"
}
]
},
{
"charaid": ["43", "44"],
"_name": "璃乃",
"type": [
{
"good-luck": 10,
"content": "宜抽卡!3x会像射出的箭雨一样多"
},
{
"good-luck": 8,
"content": "宜学习﹁这次就不会再落榜了!﹂"
},
{
"good-luck": 4,
"content": "宜出游﹁只是监视哥哥的任务罢了﹂"
},
{
"good-luck": -6,
"content": "忌熬夜﹁不是每个人醒来后会有心上人的膝枕的﹂"
},
{
"good-luck": -9,
"content": "忌表白﹁会被别人︵静流进行失忆头槌﹂"
},
{
"good-luck": 20,
"content": "各方面都要注意,大意吃津粥"
}
]
},
{
"charaid": ["45", "46"],
"_name": "克里斯蒂娜",
"type": [
{
"good-luck": 8,
"content": "宜现充﹁遵守誓约会留下好印象﹂"
},
{
"good-luck": 7,
"content": "宜出刀﹁你就是无情的南瓜大菠萝机器﹂"
},
{
"good-luck": 6,
"content": "宜竞技场﹁就这就这,太弱了﹂"
},
{
"good-luck": -6,
"content": "忌社畜﹁会作死顶撞到上司﹂"
},
{
"good-luck": 23,
"content": "就像我现在是经纪人一样,换个环境也未尝不可"
},
{
"good-luck": 22,
"content": "年龄可算不上是问题"
}
]
},
{
"charaid": ["47", "48"],
"_name": "栞",
"type": [
{
"good-luck": 9,
"content": "宜学习﹁新书买来了,要一起看吗?﹂"
},
{
"good-luck": 5,
"content": "宜撸猫﹁小喵特是个乖孩子呢﹂"
},
{
"good-luck": 4,
"content": "宜出游﹁多晒晒太阳有利身体健康﹂"
},
{
"good-luck": -6,
"content": "注意健康﹁抱歉::是我把感冒传给你了吗﹂"
},
{
"good-luck": -7,
"content": "忌社畜﹁不要太过勉强自己,适时寻求他人帮助﹂"
},
{
"good-luck": 27,
"content": "今天好像状态不错,要叫上姐姐一起出去走走吗"
}
]
},
{
"charaid": ["49", "50"],
"_name": "铃奈",
"type": [
{
"good-luck": 7,
"content": "宜推图﹁我暴击弓一箭一个铁乌龟︵雾﹂"
},
{
"good-luck": 6,
"content": "宜现充﹁好想变成跟望一样人气爆棚﹂"
},
{
"good-luck": 6,
"content": "宜黄油﹁这个gal不是那个gal啊!﹂"
},
{
"good-luck": -7,
"content": "会健忘﹁7×7等于多少来着?﹂"
},
{
"good-luck": -9,
"content": "镜暴唯连空花都杀不死"
},
{
"good-luck": 25,
"content": "好难啊!秀才教交我!"
}
]
},
{
"charaid": ["51", "52"],
"_name": "咲恋",
"type": [
{
"good-luck": 10,
"content": "投入努力会获得回报"
},
{
"good-luck": 8,
"content": "宜助人﹁被叫咲恋妈妈其实有点开心的﹂"
},
{
"good-luck": 5,
"content": "宜访友﹁真正的青梅竹马就算世界重置了也还记得对方﹂"
},
{
"good-luck": -7,
"content": "忌氪金﹁这些钱拿来买3080不香吗﹂"
},
{
"good-luck": -9,
"content": "忌水群﹁会被群员掏出傻逼.gif进行嘲笑﹂"
},
{
"good-luck": 23,
"content": "为了救济院的孩子们,遇到困难也能坚持下去"
}
]
},
{
"charaid": ["53", "54"],
"_name": "露娜",
"type": [
{
"good-luck": 7,
"content": "宜现充﹁可以跟我做朋友吗︵迫真﹂"
},
{
"good-luck": -6,
"content": "宜摸鱼﹁工作是不可能的,朋友又交不到,只好摸了﹂"
},
{
"good-luck": -7,
"content": "忌出刀﹁会长我xcw暴击了﹂"
},
{
"good-luck": -10,
"content": "会长我不想再出挂树春黑刀了"
},
{
"good-luck": 27,
"content": "明明想交很多朋友的,为什么都避着我"
}
]
},
{
"charaid": ["55", "56"],
"_name": "霞",
"type": [
{
"good-luck": 8,
"content": "唔姆,今天也要拜托你了"
},
{
"good-luck": 6,
"content": "冷静分析会得出问题的最优解"
},
{
"good-luck": 5,
"content": "宜打?﹁小问号,你是不是有很多朋友﹂"
},
{
"good-luck": -6,
"content": "为人际关系烦恼﹁让我调查看看是谁伤害了优衣﹂"
},
{
"good-luck": -8,
"content": "会被误解﹁是杜宾不是驴!﹂"
},
{
"good-luck": 20,
"content": "助手くん,有问题的话尽管交给王都名侦探我吧!"
}
]
},
{
"charaid": ["57", "58"],
"_name": "真琴",
"type": [
{
"good-luck": 10,
"content": "ごめんユイ‼"
},
{
"good-luck": 7,
"content": "宜熬夜﹁要跟我一起夜间巡逻吗﹂"
},
{
"good-luck": 6,
"content": "宜学习﹁又要数学测试了,上课不能睡着了啊﹂"
},
{
"good-luck": -8,
"content": "会出糗﹁小、小熊胖次怎么了!﹂"
},
{
"good-luck": -9,
"content": "忌出刀﹁会不小心卡掉狼吼﹂"
},
{
"good-luck": 21,
"content": "伤害优衣的家伙在哪啊!"
}
]
},
{
"charaid": ["59", "60"],
"_name": "香织",
"type": [
{
"good-luck": 10,
"content": "一切都会暴击的!"
},
{
"good-luck": 5,
"content": "宜抽卡﹁精神统一切入中线拿下up一气呵成﹂"
},
{
"good-luck": 4,
"content": "宜吃瓜﹁你也来一点?︵苦︶瓜很好吃的!﹂"
},
{
"good-luck": -6,
"content": "忌出刀﹁重要的东西在树上﹂"
},
{
"good-luck": -10,
"content": "忌竞技场﹁狗拳又砸幽灵布丁上了﹂"
},
{
"good-luck": 20,
"content": "一切都会好起来的!"
}
]
},
{
"charaid": ["61", "62"],
"_name": "亚里莎",
"type": [
{
"good-luck": 7,
"content": "宜出游﹁咖啡店的女仆制服想看看吗﹂"
},
{
"good-luck": 6,
"content": "宜访友﹁不知道萝赛莉亚现在怎么样了﹂"
},
{
"good-luck": 5,
"content": "宜瑟图﹁会不停射爆﹂"
},
{
"good-luck": -7,
"content": "MISS﹁我在兰德索尔也会变成地心少女吗﹂"
},
{
"good-luck": -8,
"content": "会变成星际玩家"
},
{
"good-luck": 26,
"content": "找到四叶草了!看来是会有好运"
}
]
},
{
"charaid": ["63", "64"],
"_name": "望",
"type": [
{
"good-luck": 9,
"content": "宜单推﹁演唱会,要来看吗﹂"
},
{
"good-luck": 8,
"content": "宜表白﹁这是为了写出歌词的约会练习﹂"
},
{
"good-luck": 4,
"content": "宜运动﹁偶像活动也需要锻炼身体!﹂"
},
{
"good-luck": -6,
"content": "吃得太多又变胖了"
},
{
"good-luck": -9,
"content": "忌装弱﹁会被群友识破伪装﹂"
},
{
"good-luck": 21,
"content": "偶像也有偶像的烦恼,好想谈一场普通的恋爱啊"
}
]
},
{
"charaid": ["65"],
"_name": "晶",
"type": [
{
"good-luck": 10,
"content": "今后也请作为我的公主骑士努力吧"
},
{
"good-luck": 7,
"content": "宜游戏﹁你就是GM﹂"
},
{
"good-luck": 5,
"content": "宜摸鱼﹁重要人物只有关键时刻才会出场﹂"
},
{
"good-luck": -7,
"content": "忌下厨﹁纳豆奶油可丽饼好像不错啊﹂"
},
{
"good-luck": 24,
"content": "可丽饼卖不出去啊,是哪里出问题了"
}
]
},
{
"charaid": ["66"],
"_name": "花凛",
"type": [
{
"good-luck": 10,
"content": "ヤバイですね☆幸运物是饭团!吃得饱饱的,继续抽下去吧!"
},
{
"good-luck": 7,
"content": "幸运动物是熊!或许能像真正家人一样守护你︵不会吃井"
},
{
"good-luck": 5,
"content": "宜抽卡﹁十连一定会出金,出金一定会吃井﹂"
},
{
"good-luck": -9,
"content": "要小心突发事件!出门可能会被当成「吃井的人」……"
},
{
"good-luck": -10,
"content": "来了来了,这是你要的+19"
},
{
"good-luck": 20,
"content": "你醒啦,我帮你调低3x概率了"
}
]
}
] | [
46708,
62,
20147,
796,
685,
198,
220,
220,
220,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
10641,
1698,
1298,
14631,
16,
1600,
366,
17,
33116,
198,
220,
220,
220,
220,
220,
220,
220,
45434,
3672,
1298,
366,
49035,
107,
165,
... | 1.083815 | 23,540 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
from torch.utils.data import DataLoader
from sklearn.preprocessing import StandardScaler
from models import GRU, rmse_loss
from Dataset import BikeDataSet
if __name__ == '__main__':
device = torch.device('cuda:1')
dataset = BikeDataSet()
dataloader = DataLoader(dataset, num_workers=1, batch_size=64)
trainer = Trainer(device)
best_loss = 10000
best_epoch = -1
epoch_list = []
for epoch in range(1000):
iter_loss_list = []
for iter, (x, y) in enumerate(dataloader):
x = x.float().to(device)
y = y.float().to(device)
l = trainer.train(x, y)
iter_loss_list.append(l)
epoch_loss = np.mean(iter_loss_list)
if epoch_loss < best_loss:
best_epoch = epoch
best_loss = epoch_loss
elif best_epoch + 50 < epoch:
break
print("Epoch:", epoch, "loss:", epoch_loss, "Best Epoch:", best_epoch, "Best Loss:", best_loss)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
... | 2.293617 | 470 |
# -.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.
#
# File Name : tardis_minimal_model.py
#
# Purpose :
#
# Creation Date : 18-02-2016
#
# Last Modified : Tue 02 Aug 2016 15:09:26 CEST
#
# Created By :
#
# _._._._._._._._._._._._._._._._._._._._._.
"""This module provides an interface object which holds the essential
information of a Tardis run to do all the diagnostic tasks for which the
tardisanalysis repository provides tools. Relying on this interface model
objects is a temporary solution until the model storage capability of Tardis
has reached a mature state.
"""
from __future__ import print_function
import pandas as pd
import astropy.units as units
import os
from tardis.model import Radial1DModel
def store_data_for_minimal_model(simulation, buffer_or_fname="minimal_model.hdf5",
path="", mode="virtual"):
"""Simple helper routine to dump all information which are required to
perform extensive diagnostics with the tardisanalysis tools to an HDF5
file.
Parameters
----------
mdl : tardis.model.Radial1DModel
source tardis model object
buffer_or_fname : str or file stream
name of the hdf5 file or file handler (default 'minimal_model.hdf5')
path : str
location of the date within the HDF5 file (default '', i.e. its root)
mode : str
"virtual" (default), "real" or "both"; store the properties of the
virtual or the real packet population
"""
def _save_spectrum_real(key, path, hdf_store):
"""save the real packet spectrum"""
wave = simulation.runner.spectrum.wavelength.value
flux = simulation.runner.spectrum.luminosity_density_lambda.value
luminosity_density = \
pd.DataFrame.from_dict(dict(wave=wave, flux=flux))
luminosity_density.to_hdf(hdf_store, os.path.join(path, key))
def _save_spectrum_virtual(key, path, hdf_store):
"""save the virtual packet spectrum"""
wave = simulation.runner.spectrum_virtual.wavelength.value
flux = \
simulation.runner.spectrum_virtual.luminosity_density_lambda.value
luminosity_density_virtual = pd.DataFrame.from_dict(dict(wave=wave,
flux=flux))
luminosity_density_virtual.to_hdf(hdf_store, os.path.join(path, key))
def _save_configuration_dict(key, path, hdf_store):
"""save some information from the basic configuration of the run. For
now only the time of the simulation is stored
"""
configuration_dict = dict(
time_of_simulation=simulation.runner.time_of_simulation,
R_photosphere=(simulation.model.time_explosion *
simulation.model._velocity[0]).to("cm"),
t_inner=simulation.model.t_inner)
configuration_dict_path = os.path.join(path, 'configuration')
pd.Series(configuration_dict).to_hdf(hdf_store,
configuration_dict_path)
possible_modes = ["real", "virtual", "both"]
try:
assert(mode in possible_modes)
except AssertionError:
raise ValueError(
"Wrong mode - possible_modes are {:s}".format(
", ".join(possible_modes)))
if mode == "virtual" and simulation.runner.virt_logging == 0:
raise ValueError(
"Virtual packet logging is switched off - cannot store the "
"properties of the virtual packet population")
include_from_runner_ = {}
include_from_spectrum_ = {}
if mode == "virtual" or mode == "both":
include_from_runner_.update(
{'virt_packet_last_interaction_type': None,
'virt_packet_last_line_interaction_in_id': None,
'virt_packet_last_line_interaction_out_id': None,
'virt_packet_last_interaction_in_nu': None,
'virt_packet_nus': None,
'virt_packet_energies': None})
include_from_spectrum_.update(
{'luminosity_density_virtual': _save_spectrum_virtual})
if mode == "real" or mode == "both":
include_from_runner_.update(
{'last_interaction_type': None,
'last_line_interaction_in_id': None,
'last_line_interaction_out_id': None,
'last_interaction_in_nu': None,
'output_nu': None,
'output_energy': None})
include_from_spectrum_.update(
{'luminosity_density': _save_spectrum_real})
include_from_atom_data_ = {'lines': None}
include_from_model_in_hdf5 = {'runner': include_from_runner_,
'atom_data': _save_atom_data,
'spectrum': include_from_spectrum_,
'configuration_dict':
_save_configuration_dict,
}
if isinstance(buffer_or_fname, basestring):
hdf_store = pd.HDFStore(buffer_or_fname)
elif isinstance(buffer_or_fname, pd.HDFStore):
hdf_store = buffer_or_fname
else:
raise IOError('Please specify either a filename or an HDFStore')
print('Writing to path %s' % path)
for key in include_from_model_in_hdf5:
if include_from_model_in_hdf5[key] is None:
_save_model_property(getattr(simulation, key), key, path, hdf_store)
elif callable(include_from_model_in_hdf5[key]):
include_from_model_in_hdf5[key](key, path, hdf_store)
else:
try:
for subkey in include_from_model_in_hdf5[key]:
print(subkey)
if include_from_model_in_hdf5[key][subkey] is None:
_save_model_property(getattr(getattr(simulation, key),
subkey), subkey,
os.path.join(path, key),
hdf_store)
elif callable(include_from_model_in_hdf5[key][subkey]):
include_from_model_in_hdf5[key][subkey](
subkey, os.path.join(path, key), hdf_store)
else:
print('Can not save %s' % str(os.path.join(path, key, subkey)))
except:
print('An error occurred while dumping %s to HDF.' % str(os.path.join(path, key)))
hdf_store.flush()
hdf_store.close()
class minimal_model(object):
"""Interface object used in many tardisanalysis tools. It holds the
essential diagnostics information for either the real or the virtual packet
population of a run.
This interface object may be filled from an existing Tardis radial1dmodel
object (for example during the interactive ipython use of Tardis), or
filled from an HDF5 file (generated by store_data_for_minimal_model).
Parameters
----------
mode : str
"real" (default) or "virtual"; defines which packet population is
stored in the interface object.
"""
def from_interactive(self, simulation):
"""fill the minimal_model from an existing simulation object
Parameters
----------
simulation : Simulation
Tardis simulation object holding the run
"""
self.time_of_simulation = simulation.runner.time_of_simulation
self.lines = \
simulation.plasma.atomic_data.lines.reset_index().set_index(
'line_id')
self.R_phot = (simulation.model._velocity[0] *
simulation.model.time_explosion).to("cm")
self.t_inner = simulation.model.t_inner
if self.mode == "virtual":
self.last_interaction_type = \
simulation.runner.virt_packet_last_interaction_type
self.last_line_interaction_in_id = \
simulation.runner.virt_packet_last_line_interaction_in_id
self.last_line_interaction_out_id = \
simulation.runner.virt_packet_last_line_interaction_out_id
self.last_interaction_in_nu = \
simulation.runner.virt_packet_last_interaction_in_nu
self.packet_nus = \
simulation.runner.virt_packet_nus * units.Hz
self.packet_energies = \
simulation.runner.virt_packet_energies * units.erg
self.spectrum_wave = \
simulation.runner.spectrum_virtual.wavelength
self.spectrum_luminosity = \
simulation.runner.spectrum_virtual.luminosity_density_lambda
elif self.mode == "real":
esc_mask = simulation.runner.output_energy >= 0
self.last_interaction_type = \
simulation.runner.last_interaction_type[esc_mask]
self.last_line_interaction_in_id = \
simulation.runner.last_line_interaction_in_id[esc_mask]
self.last_line_interaction_out_id = \
simulation.runner.last_line_interaction_out_id[esc_mask]
self.last_interaction_in_nu = \
simulation.runner.last_interaction_in_nu[esc_mask]
self.packet_nus = \
simulation.runner.output_nu[esc_mask]
self.packet_energies = \
simulation.runner.output_energy[esc_mask]
self.spectrum_wave = \
simulation.runner.spectrum.wavelength
self.spectrum_luminosity = \
simulation.runner.spectrum.luminosity_density_lambda
else:
raise ValueError
self.last_interaction_in_nu = self.last_interaction_in_nu * units.Hz
self.readin = True
def from_hdf5(self, buffer_or_fname):
"""Fill minimal_model from an HDF5 file, which was created by using
store_data_for_minimal_model.
Parameters
----------
buffer_or_fname : str, file stream
name of file object containing the essential Tardis run information
"""
if isinstance(buffer_or_fname, basestring):
hdf_store = pd.HDFStore(buffer_or_fname)
elif isinstance(buffer_or_fname, pd.HDFStore):
hdf_store = buffer_or_fname
else:
raise IOError('Please specify either a filename or an HDFStore')
self.time_of_simulation = \
hdf_store["/configuration"].time_of_simulation
self.lines = hdf_store["/atom_data/lines"].reset_index().set_index(
'line_id')
self.R_phot = hdf_store["/configuration"].R_photosphere
self.t_inner = hdf_store["/configuration"].t_inner
if self.mode == "virtual":
self.last_interaction_type = \
hdf_store["/runner/virt_packet_last_interaction_type"]
self.last_line_interaction_in_id = \
hdf_store["/runner/virt_packet_last_line_interaction_in_id"]
self.last_line_interaction_out_id = \
hdf_store["/runner/virt_packet_last_line_interaction_out_id"]
self.last_interaction_in_nu = \
hdf_store["/runner/virt_packet_last_interaction_in_nu"]
self.packet_nus = \
hdf_store["/runner/virt_packet_nus"]
self.packet_energies = \
hdf_store["/runner/virt_packet_energies"]
self.spectrum_wave = \
hdf_store["/spectrum/luminosity_density_virtual"]["wave"]
self.spectrum_luminosity = \
hdf_store["/spectrum/luminosity_density_virtual"]["flux"]
elif self.mode == "real":
esc_mask = hdf_store["/runner/output_energy"] >= 0
self.last_interaction_type = \
hdf_store["/runner/last_interaction_type"][esc_mask]
self.last_line_interaction_in_id = \
hdf_store["/runner/last_line_interaction_in_id"][esc_mask]
self.last_line_interaction_out_id = \
hdf_store["/runner/last_line_interaction_out_id"][esc_mask]
self.last_interaction_in_nu = \
hdf_store["/runner/last_interaction_in_nu"][esc_mask]
self.packet_nus = \
hdf_store["/runner/output_nu"][esc_mask]
self.packet_energies = \
hdf_store["/runner/output_energy"][esc_mask]
self.spectrum_wave = \
hdf_store["/spectrum/luminosity_density"]["wave"]
self.spectrum_luminosity = \
hdf_store["/spectrum/luminosity_density"]["flux"]
else:
raise ValueError
self.last_interaction_type = self.last_interaction_type.values
self.last_line_interaction_in_id = \
self.last_line_interaction_in_id.values
self.last_line_interaction_out_id = \
self.last_line_interaction_out_id.values
self.last_interaction_in_nu = \
self.last_interaction_in_nu.values * units.Hz
self.packet_nus = self.packet_nus.values * units.Hz
self.packet_energies = self.packet_energies.values * units.erg
self.spectrum_wave = self.spectrum_wave.values
self.spectrum_luminosity = self.spectrum_luminosity.values
self.readin = True
| [
2,
532,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
7874,
13,
198,
2,
198,
2,
220,
9220,
6530,
1058,
256,
446,
271,
62,
1084,
4402,
62,
19849,
13,
9078,
198,
2,
... | 2.116491 | 6,258 |
import copy
import glob
import mss
import os
import pygame
import random
import sys
from PIL import Image, ImageOps
from threading import Timer
from core.device_manager import HandJointsFrame
from core.kinect_manager import KinectManager
CRACK_PATHS = glob.glob(os.path.join(resource_path(), "assets", "cracks", "*.png"))
PUNCH_PATH = resource_path(os.path.join("assets", "punch.png"))
CRACK_SOUNDS = glob.glob(os.path.join(resource_path(), "assets", "audio", "*.wav"))
| [
11748,
4866,
198,
11748,
15095,
198,
11748,
285,
824,
198,
11748,
28686,
198,
11748,
12972,
6057,
198,
11748,
4738,
198,
11748,
25064,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
41472,
198,
6738,
4704,
278,
1330,
5045,
263,
198,
198,
6... | 3.012658 | 158 |
import marso
import pytest
| [
11748,
1667,
568,
198,
198,
11748,
12972,
9288,
628
] | 3.222222 | 9 |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import UserRegisterForm,UserUpdateForm,ProfileUpdateForm
# Create your views here.
@login_required
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
... | 3.72093 | 86 |
from rest_framework import serializers
from uploader.models import UploadFileModel
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
9516,
263,
13,
27530,
1330,
36803,
8979,
17633,
198
] | 4.611111 | 18 |
import sys
import json
import mock
import requests
import requests_mock
import unittest2 as unittest
from drf_client.connection import Api
from drf_client.exceptions import HttpClientError, HttpServerError
| [
11748,
25064,
198,
11748,
33918,
198,
11748,
15290,
198,
11748,
7007,
198,
11748,
7007,
62,
76,
735,
198,
11748,
555,
715,
395,
17,
355,
555,
715,
395,
198,
198,
6738,
1553,
69,
62,
16366,
13,
38659,
1330,
5949,
72,
198,
6738,
1553,
... | 3.483333 | 60 |
import logging
import colorlog
| [
11748,
18931,
201,
198,
11748,
3124,
6404,
201
] | 4 | 8 |
import base64
import hashlib
import datetime
| [
11748,
2779,
2414,
198,
11748,
12234,
8019,
198,
11748,
4818,
8079,
198
] | 3.75 | 12 |
from datetime import datetime, date
import json
import urllib
from bottle import request, response
import bauble.db as db
from bauble.model import Model, SystemModel
class ArgsPlugin(object):
"""
Plugin to add an args property to every request that contains the url_args for the route.
"""
name = 'args'
api = 2
class OptionsPlugin(object):
"""
Plugin to add an OPTIONS http method request handler for ever route defined
in the app. This plugin should be installed after all the routes have been
setup.
"""
name = 'options'
api = 2
def apply(self, callback, route):
"""
This method doesn't do anything but it's required to be implemented by the
Bottle plugin system
"""
return callback
class CORSPlugin(object):
"""
Bottle.py plugin to add CORS headers to each request.
"""
name = 'cors'
api = 2
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
3128,
198,
11748,
33918,
198,
11748,
2956,
297,
571,
198,
198,
6738,
9294,
1330,
2581,
11,
2882,
198,
198,
11748,
26605,
26664,
13,
9945,
355,
20613,
198,
6738,
26605,
26664,
13,
19849,
1330,
9104... | 2.967846 | 311 |
# encoding: latin1
"""spatial lag of a variable
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2010-11 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['spatialLag']
import numpy
def spatialLag(data,w):
"""
This method recives a dictionary of variables an
return a copy of the dictionary with variables
spatially lagged.
:param data: data dictionary to be lagged
:type data: dictionary
:rtype: dictionary (Y dictionary with the lag of vars)
"""
data = [data[x] for x in data]
data = numpy.matrix(data)
data = data.transpose()
w = numpy.matrix(w)
data = data*w
data = data.transpose()
y = {}
for nd, d in enumerate(data):
y[nd] = d.tolist()[0]
return y
| [
2,
21004,
25,
3042,
259,
16,
198,
37811,
2777,
34961,
19470,
286,
257,
7885,
198,
37811,
198,
834,
9800,
834,
796,
366,
41,
7258,
327,
13,
10343,
4188,
11,
9300,
47983,
5147,
1192,
15666,
1,
198,
834,
66,
20696,
834,
796,
366,
15269... | 2.507205 | 347 |
import tkinter as tk
from tkinter import ttk
from matplotlib.pyplot import close
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg,
NavigationToolbar2Tk)
from matplotlib.mathtext import math_to_image
from io import BytesIO
from PIL import ImageTk, Image
from sympy import latex
from math import pi, cos, sin
from sgraph import *
from braid import *
from col_perm import *
from pres_mat import *
from visualization import *
from casson_gordon import *
from typing import List, Tuple, Callable, Dict
from math import log10, floor
font_style = "Calibri"
font_size = 25
# Function for rounding eigenvalues
# Class for main window
# Compute invariants with defaults
# Processing Link Info style inputs
# Processing comma separated inputs
# Processing space separated inputs
# Command for computing the cycle decomposition and generating the braid
# Command for computing the cycle decomposition and generating the braid
# Print latex
# Save the seifert matrices to a file
# Command for computing and displaying invariants
# Command to view the braid
# Command to view the C-Complex
# Class for invariants
# Renders latex as a label and places it on the grid
# Class for strand inputs
# Make a braid and return error messages
# Class for color inputs
# Make a colored braid and return error messages
# Command for getting the coloured braid
# Makes the graph for the colored braid derived from the color inputs
# Class for signature inputs
# Get the signature input and return error messages
# Class for Casson Gordon inputs
# Executing everything
if __name__ == "__main__":
root = tk.Tk()
root.title("Clasper")
# Get the screen dimension
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# Find the center point
center_x = int(screen_width/2)
center_y = int(screen_height/2)
window_width = screen_width
window_height = screen_height
# Set the position of the window to the center of the screen
root.geometry(f'{window_width}x{window_height}+{center_x}+{0}')
root.state('zoomed')
clasper_canvas = tk.Canvas(root)
hbar = tk.Scrollbar(root, orient='horizontal',
command=clasper_canvas.xview)
scrollbar = tk.Scrollbar(root, orient='vertical',
command=clasper_canvas.yview)
hbar.pack(side="bottom", fill="both")
clasper_canvas.pack(side="left", fill="both", expand=True, padx=10, pady=10)
scrollbar.pack(side="right", fill="both")
clasper_canvas['yscrollcommand'] = scrollbar.set
clasper_canvas['xscrollcommand'] = hbar.set
clasper = Clasper(clasper_canvas)
clasper_canvas.create_window(0, 0,
height=2800,
width=3000,
window=clasper, anchor="nw", tags="frame")
clasper_canvas.bind("<Configure>", onCanvasConfigure)
clasper_canvas.configure(scrollregion=clasper_canvas.bbox("all"))
clasper_canvas.itemconfig('frame',
height=2800,
width=3000)
root.bind_all("<MouseWheel>", on_mousewheel)
root.bind_all("<Shift-MouseWheel>", on_shift_mousewheel)
root.bind('<Return>', clasper.compute_with_defaults)
try:
from ctypes import windll
windll.shcore.SetProcessDpiAwareness(1)
finally:
root.mainloop()
# Setting up the entry for strands
"""ttk.Label(
self, text='Number of Strands:',
font=(font_style, font_size)).grid(column=0, row=2, pady=10)
self.strand_str = tk.StringVar()
ttk.Entry(self, textvariable=self.strand_str,
font=(font_style, font_size)).grid(
column=1, row=2, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for the colour list
"""ttk.Label(self, text='Colours (start from 0, BFD):',
font=(font_style, font_size)).grid(
column=0, row=5, pady=10)
self.colour_list = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_list,
font=(font_style, font_size)).grid(
column=1, row=5, padx=0, pady=10, sticky='W', columnspan=3)"""
# Set up entry for orientations of colours
"""ttk.Label(self, text='Orientations (+1/-1, BFD):',
font=(font_style, font_size)).grid(
column=0, row=6, pady=10)
self.colour_signs = tk.StringVar()
ttk.Entry(self, textvariable=self.colour_signs,
font=(font_style, font_size)).grid(
column=1, row=6, padx=0, pady=10, sticky='W', columnspan=3)
"""
# Set up entry for complex tuple
"""ttk.Label(self, text='Signature input,'+
'space sep\n (1/3 means 2*pi/3, BFD):',
font=(font_style, font_size)).grid(
column=0, row=7, pady=10)
self.cplx_tuple = tk.StringVar()
ttk.Entry(self, textvariable=self.cplx_tuple,
font=(font_style, font_size)).grid(
column=1, row=7, padx=0, pady=10, sticky='W', columnspan=2)"""
| [
11748,
256,
74,
3849,
355,
256,
74,
198,
6738,
256,
74,
3849,
1330,
256,
30488,
198,
6738,
2603,
29487,
8019,
13,
9078,
29487,
1330,
1969,
198,
6738,
2603,
29487,
8019,
13,
26875,
1330,
11291,
198,
6738,
2603,
29487,
8019,
13,
1891,
2... | 2.501471 | 2,040 |
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.chrome.options import Options
import re
import time
start = time.perf_counter()
PATH = "/home/ahmad/Templates/chrome-driver/chromedriver"
serv = Service(PATH)
'''
The three commented lines are needed if you want to run Selenium in a headless mode.
'''
# options = Options()
# options.headless = True
# webdriver.Chrome(service=serv, options=options)
driver = webdriver.Chrome(service=serv)
driver.get("https://10fastfingers.com/typing-test/english")
try:
wordsContainer = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "row1"))
)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "highlight"))
)
spans = re.findall(r"<span wordnr[^<]*", driver.page_source)
words = map(lambda span : span.split(">")[1], spans)
words = list(words)
inputField = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "inputfield"))
)
for word in words:
inputField.send_keys(word + " ")
timer = driver.find_element(By.ID, "timer")
timer = timer.text[2:]
timer = int(timer)
print(f"{(len(words) * 60) / (60 - timer)} WPM")
except Exception:
print("An error occured!")
driver.quit()
end = time.perf_counter()
print(f"The program took {end - start} seconds") | [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
15271,
1330,
4809,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
32103,
21321,
198,
6738,
384,
11... | 2.693603 | 594 |
import bottle
import model
ID_IGRE_COOKIE_NAME = 'id_igre'
COOKIE_SECRET = 'my_very_special - secret key'
vislice = model.Vislice()
vislice.preberi_iz_datoteke()
@bottle.get('/')
@bottle.post('/nova_igra/')
@bottle.get('/igra/')
@bottle.post('/igra/')
bottle.run(reloader=True, debug=True)
| [
11748,
9294,
198,
11748,
2746,
628,
198,
2389,
62,
3528,
2200,
62,
34,
15308,
10008,
62,
20608,
796,
705,
312,
62,
328,
260,
6,
198,
34,
15308,
10008,
62,
23683,
26087,
796,
705,
1820,
62,
548,
62,
20887,
532,
3200,
1994,
6,
198,
... | 2.179856 | 139 |
import base64
import datetime
import re
import os
def parse_irc_msg(s):
"""Breaks a message from an IRC server into its tags, prefix, command, and arguments.
"""
if not s:
raise IRCBadMessage("Empty IRC line.")
tags = {}
if s.startswith("@"):
s = s[1:]
tags_str, s = s.split(" ", 1)
tags = deconstruct_irc_tags(tags_str)
prefix = ''
trailing = []
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return tags, prefix, command, args
IRC_TAG_VALUE_ESCAPE_TRANSLATION = {";": "\\:", " ": "\\s", "\r": "\\r", "\n": "\\n", "\\": "\\\\"} | [
11748,
2779,
2414,
198,
11748,
4818,
8079,
198,
11748,
302,
198,
11748,
28686,
198,
198,
4299,
21136,
62,
1980,
62,
19662,
7,
82,
2599,
198,
220,
220,
220,
37227,
12679,
4730,
257,
3275,
422,
281,
30039,
4382,
656,
663,
15940,
11,
212... | 2.168937 | 367 |
from stretchme import Structure
from simulation_data import data_partial
from os import remove, replace
import sys
import numpy as np
directory = '/Users/pawel/PycharmProjects/Rozciaganie/data/'
proteins = {'5wyr': 248, 'trmd-no-knot': 240, 'trmd': 240, 'tm1570': 193, 'fuzja': 432}
missing = {'5wyr': 14, 'trmd-no-knot': 14, 'trmd': 14, 'tm1570': 14, 'fuzja': 28}
means = {('trmd', 'exp'): [45, 52, 87],
('trmd', 'kexp'): [40, 48, 82],
('tm1570', 'exp'): [10, 20, 25, 30, 35, 40, 45, 52, 57, 62, 69],
('tm1570', 'kexp'): [10, 20, 25, 30, 35, 40, 45, 52, 57, 62, 69],
('fuzja', 'exp'): None,
('fuzja', 'kexp'): None}
statess = {'exp': 'unknotted', 'kexp': 'knotted', 'pexp': 'partially'}
# protein, model = sys.argv[1], sys.argv[2]
to_c = [('exp', 'trmd'), ('kexp', 'tm1570'), ('exp', 'tm1570'), ('kexp', 'fuzja'), ('exp', 'fuzja'), ('pexp', 'fuzja'),
('cc', 'trmd'), ('cd', 'trmd'), ('ce', 'trmd'), ('pa', 'trmd'), ('cb', 'trmd'), ('pc', 'trmd'), ('pd', 'trmd'),
('sa', 'trmd'), ('sb', 'trmd'), ('sc', 'trmd'), ('sd', 'trmd'), ('ca', 'trmd-no-knot'), ('cb', 'trmd-no-knot'),
('cc', 'trmd-no-knot'), ('pa', 'trmd-no-knot'), ('pb', 'trmd-no-knot'), ('pc', 'trmd-no-knot'),
('sa', 'trmd-no-knot'), ('sb', 'trmd-no-knot'), ('sc', 'trmd-no-knot'), ('ca', '5wyr'), ('cb', '5wyr'),
('cc', '5wyr'), ('pa', '5wyr'), ('pb', '5wyr'), ('pc', '5wyr'), ('sa', '5wyr'), ('sb', '5wyr'), ('sc', '5wyr')]
# for model, protein in [('exp', 'trmd'), ('kexp', 'tm1570'), ('exp', 'tm1570'), ('kexp', 'fuzja'), ('exp', 'fuzja'), ('pexp', 'fuzja')]:
protein = 'trmd'
for model in ['exp']:
print(model, protein)
residues = proteins[protein]
name_type = protein + '_' + model
data_list = data_partial[name_type]
run_whole_experiment(protein)
# for model in ['aa']:
# print(model)
# name_type = 'trmd' + '_' + model
# data_list = data_partial[name_type]
# run_whole('trmd', model)
# replace('latex.info', model + '_whole_latex.info')
# replace('data.info', model + '_whole_data.info')
| [
6738,
7539,
1326,
1330,
32522,
198,
6738,
18640,
62,
7890,
1330,
1366,
62,
47172,
198,
6738,
28686,
1330,
4781,
11,
6330,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
34945,
796,
31051,
14490,
14,
79,
707,
417,
14,
... | 2.139059 | 978 |
# Generated by Django 3.1 on 2020-08-13 08:17
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
319,
12131,
12,
2919,
12,
1485,
8487,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.892857 | 28 |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from assertpy import assert_that
from common.utils import EventType, Host, UpdateEvent
from sqswatcher.plugins.slurm import _update_gres_node_lists, _update_node_lists
# Input: existing gres_node_list, events to be processed.
# Expected results: updated gres_node_list
@pytest.mark.parametrize(
"gres_node_list, events, expected_result",
[
(
["NodeName=ip-10-0-000-111 Name=gpu Type=tesla File=/dev/nvidia[0-15]\n"],
[
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
],
[
"NodeName=ip-10-0-000-111 Name=gpu Type=tesla File=/dev/nvidia[0-15]\n",
"NodeName=ip-10-0-000-222 Name=gpu Type=tesla File=/dev/nvidia[0-15]\n",
],
),
(
["NodeName=ip-10-0-000-111 Name=gpu Type=tesla File=/dev/nvidia[0-15]\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 16)),
],
[],
),
(
# GPU files should be updated after remove/add sequence
["NodeName=ip-10-0-000-111 Name=gpu Type=tesla File=/dev/nvidia[0-1]\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
],
["NodeName=ip-10-0-000-111 Name=gpu Type=tesla File=/dev/nvidia[0-15]\n"],
),
],
ids=["repetitive_add", "remove_nonexisting_node", "reusing_nodename"],
)
# Input: existing gpu_node_list, events to be processed.
# Expected results: (updated node_list, nodes_to_restart)
@pytest.mark.parametrize(
"gpu_node_list, events, expected_result",
[
(
["NodeName=ip-10-0-000-111 CPUs=32 Gres=gpu:tesla:16 State=UNKNOWN\n"],
[
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
],
(
[
"NodeName=ip-10-0-000-111 CPUs=32 Gres=gpu:tesla:16 State=UNKNOWN\n",
"NodeName=ip-10-0-000-222 CPUs=32 Gres=gpu:tesla:16 State=UNKNOWN\n",
],
# Note nodes_to_restart list is expected to be repetitive because we want to restart with every ADD
["ip-10-0-000-111", "ip-10-0-000-222", "ip-10-0-000-111"],
),
),
(
["NodeName=ip-10-0-000-111 CPUs=32 Gres=gpu:tesla:16 State=UNKNOWN\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 16)),
],
([], []),
),
(
# CPU/GPU information should be updated after remove/add sequence
["NodeName=ip-10-0-000-111 CPUs=8 Gres=gpu:tesla:1 State=UNKNOWN\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 16)),
],
(["NodeName=ip-10-0-000-111 CPUs=32 Gres=gpu:tesla:16 State=UNKNOWN\n"], ["ip-10-0-000-111"]),
),
],
ids=["repetitive_add", "remove_nonexisting_node", "reusing_nodename"],
)
# Input: existing gpu_node_list, events to be processed.
# Expected results: (updated node_list, nodes_to_restart)
@pytest.mark.parametrize(
"node_list, events, expected_result",
[
(
["NodeName=ip-10-0-000-111 CPUs=32 State=UNKNOWN\n"],
[
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 0)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 0)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 0)),
],
(
[
"NodeName=ip-10-0-000-111 CPUs=32 State=UNKNOWN\n",
"NodeName=ip-10-0-000-222 CPUs=32 State=UNKNOWN\n",
],
# Note nodes_to_restart list is expected to be repetitive because we want to restart with every ADD
["ip-10-0-000-111", "ip-10-0-000-222", "ip-10-0-000-111"],
),
),
(
["NodeName=ip-10-0-000-111 CPUs=32 State=UNKNOWN\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 0)),
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-222", "32", 0)),
],
([], []),
),
(
# CPU information should be updated after remove/add sequence
["NodeName=ip-10-0-000-111 CPUs=8 State=UNKNOWN\n"],
[
UpdateEvent(EventType.REMOVE, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 0)),
UpdateEvent(EventType.ADD, "some message", Host("i-0c1234567", "ip-10-0-000-111", "32", 0)),
],
(["NodeName=ip-10-0-000-111 CPUs=32 State=UNKNOWN\n"], ["ip-10-0-000-111"]),
),
],
ids=["repetitive_add", "remove_nonexisting_node", "reusing_nodename"],
)
| [
2,
15069,
13130,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
11074,
921,
743,
407,
779,
428,
2393,
2845,
28... | 2.005427 | 3,317 |
import math
from decimal import Decimal, getcontext
getcontext().prec = 30
if __name__ == '__main__':
v1 = Vector([8.218, -9.341])
v2 = Vector([-1.129, 2.111])
print('Add:', v1 + v2)
v3 = Vector([7.119, 8.215])
v4 = Vector([-8.223, 0.878])
print('Minus:', v3 - v4)
v5 = Vector([1.671, -1.012, -.318])
a = 7.41
print('Scalling:', v5 * a)
v6 = Vector([-0.221, 7.437])
print('Magnitude:', v6.magnitude())
v7 = Vector([5.581, -2.136])
print('Unit Vector:', v7.unit_vector())
v8 = Vector([-5.955, -4.904, -1.874])
v9 = Vector([-4.496, -8.755, 7.103])
print(v8.dot_product(v9))
v10 = Vector([7.35, 0.221, 5.188])
v11 = Vector([2.751, 8.259, 3.985])
print(v10.angle(v11, 'degree'))
v12 = Vector([-2.328, -7.284, -1.214])
v13 = Vector([-1.821, 1.072, -2.94])
print(v12.is_parallel_to(v13))
print(v12.is_orthogonal_to(v13))
v14 = Vector([3.009, -6.172, 3.692, -2.51])
v15 = Vector([6.404, -9.144, 2.759, 8.718])
print(v14.projection_on(v15))
print(v14.orthogonal_on(v15))
v16 = Vector([8.462, 7.893, -8.187])
v17 = Vector([6.984, -5.975, 4.778])
print(v16.cross_product(v17))
v16 = Vector([-8.987, -9.838, 5.031])
v17 = Vector([-4.268, -1.861, -8.866])
print(v16.parallelogram_spanned_with(v17))
v16 = Vector([1.5, 9.547, 3.691])
v17 = Vector([-6.007, 0.124, 5.772])
print(v16.triangle_spanned_with(v17))
v18 = Vector([1.671, -1.012, -.318])
a = 7.41
print('Scalling:', a * v18) | [
11748,
10688,
198,
6738,
32465,
1330,
4280,
4402,
11,
651,
22866,
198,
198,
1136,
22866,
22446,
3866,
66,
796,
1542,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
410,
16,
796,
20650,
26933,
2... | 1.938287 | 794 |
BinaryOperators = {'/' : lambda a, b: '\\frac{' + a + '}{' + b + '}'}
UnaryOperators = {'sqrt' : lambda a: '\\sqrt{'+ a + '}'}
read(set('2 / 4 * 3'))
| [
33,
3219,
18843,
2024,
796,
1391,
26488,
6,
1058,
37456,
257,
11,
275,
25,
705,
6852,
31944,
90,
6,
1343,
257,
1343,
705,
18477,
6,
1343,
275,
1343,
705,
92,
6,
92,
198,
3118,
560,
18843,
2024,
796,
1391,
6,
31166,
17034,
6,
1058,... | 2.04 | 75 |
# file: parser.py
# vim:fileencoding=utf-8:ft=python
# Copyright © 2014-2021 R.F. Smith <rsmith@xs4all.nl>. All rights reserved.
# SPDX-License-Identifier: BSD-2-Clause
# Created: 2014-02-21 21:35:41 +0100
# Last modified: 2022-01-21T17:09:55+0100
"""Parser for lamprop files."""
import copy
import logging
from .core import fiber, resin, lamina, laminate
msg = logging.getLogger("parser")
def parse(filename):
"""
Parse a lamprop file.
Arguments:
filename: The name of the file to parse.
Returns
A list of types.laminate.
"""
try:
rd, fd, ld = _directives(filename)
except IOError:
msg.warning("cannot read '{}'.".format(filename))
return []
fdict = _get_components(fd, fiber)
msg.info("found {} fibers in '{}'".format(len(fdict), filename))
rdict = _get_components(rd, resin)
msg.info("found {} resins in '{}'".format(len(rdict), filename))
boundaries = [j for j in range(len(ld)) if ld[j][1][0] == "t"] + [len(ld)]
bpairs = [(a, b) for a, b in zip(boundaries[:-1], boundaries[1:])]
msg.info("found {} possible laminates in '{}'".format(len(bpairs), filename))
laminates = []
for a, b in bpairs:
current = ld[a:b]
lam = _laminate(current, rdict, fdict)
if lam:
laminates.append(lam)
msg.info("found {} laminates in '{}'".format(len(laminates), filename))
return laminates
def _directives(filename):
"""
Read the directives from a lamprop file.
Arguments:
filename: The name of the file to parse.
Returns:
A 3-tuple (resin directives, fiber directives, laminate directives)
"""
with open(filename, encoding="utf-8") as df:
data = [ln.strip() for ln in df]
# Filter out lines with directives.
directives = [
(num, ln)
for num, ln in enumerate(data, start=1)
if len(ln) > 1 and ln[1] == ":" and ln[0] in "tmlscfr"
]
msg.info("found {} directives in '{}'".format(len(directives), filename))
rd = [(num, ln) for num, ln in directives if ln[0] == "r"]
fd = [(num, ln) for num, ln in directives if ln[0] == "f"]
ld = [(num, ln) for num, ln in directives if ln[0] in "tmlsc"]
return rd, fd, ld
def _get_numbers(directive):
"""
Retrieve consecutive floating point numbers from a directive.
Arguments:
directive: A 2-tuple (int, str).
Returns:
A tuple of floating point numbers and the remainder of the string.
"""
num, line = directive
numbers = []
items = line.split()[1:]
for j in items:
try:
numbers.append(float(j))
except ValueError:
break
newitems = line.split(maxsplit=len(numbers)+1)[1:]
if len(newitems) > len(numbers):
remain = newitems[-1]
else:
remain = ""
return tuple(numbers), remain
def _laminate(ld, resins, fibers):
"""
Parse a laminate definition.
This must be a t-directive, followed by an m-directive, followed by one or
more l-directives and optionally finished by an s-directive.
Arguments:
ld: A sequence of (number, line) tuples describing a laminate.
resins: A dictionary of resins, keyed by their names.
fibers: A dictionary of fibers, keyed by their names.
Returns:
A laminate dictionary, or None.
"""
sym = False
if ld[0][1].startswith("t"):
lname = ld[0][1][2:].strip()
if lname == "":
msg.warning("no laminate name on line {}".format(ld[0][0]))
return None
else:
msg.warning("no 't' directive on line {}".format(ld[0][0]))
return None
try:
if not ld[1][1].startswith("m"):
raise ValueError
common_vf, rname = ld[1][1][2:].split(maxsplit=1)
common_vf = float(common_vf)
if rname not in resins:
msg.warning("unknown resin '{}' on line {}".format(rname, ld[1][0]))
raise ValueError
except ValueError:
msg.warning("no valid 'm' directive on line {}".format(ld[1][0]))
return None
if ld[-1][1].startswith("s"):
sym = True
del ld[-1]
llist = []
for directive in ld[2:]:
if directive[1].startswith("c"): # Comment line.
llist.append(directive[1][2:].strip())
continue
lamina = _get_lamina(directive, fibers, resins[rname], common_vf)
if lamina:
llist.append(lamina)
if not llist:
msg.warning("empty laminate '{}'".format(lname))
return None
if sym:
msg.info("laminate '{}' is symmetric".format(lname))
llist = llist + _extended(llist)
return laminate(lname, llist)
def _extended(original):
"""
Create the extension to the `original` list to make the laminate symmetric.
The position of the comments is taken into account.
"""
if sum(1 for la in original if isinstance(la, str)) == 0:
return original[::-1]
layers = copy.deepcopy(original)
if not isinstance(layers[-1], str):
layers.append("__")
if not isinstance(layers[0], str):
layers.insert(0, "unknown")
idx = [n for n, v in enumerate(layers) if isinstance(v, str)]
pairs = list(zip(idx[:-1], idx[1:]))[::-1]
extension = []
for s, e in pairs:
if layers[s] == "__":
extension += layers[s + 1 : e][::-1] # noqa
else:
extension += [layers[s]] + layers[s + 1 : e][::-1] # noqa
return extension
def _get_components(directives, tp):
"""
Parse fiber and resin lines.
Arguments:
directives: A sequence of (number, line) tuples describing fibers/resins.
tp: The conversion function to use. Either core.fiber or core.resin
Returns:
A list of fiber dictionaries
"""
rv = []
names = []
tname = tp.__name__
w1 = "expected 4 numbers for a {} on line {}, found {}; skipping."
w2 = 'duplicate {} "{}" on line {} ignored.'
w3 = "{} must be >0 on line {}; skipping."
w4 = "Poisson's ratio on line {} should be >0 and <0.5; skipping."
w5 = 'missing {} name on line {}; skipping.'
for directive in directives:
ln = directive[0]
numbers, name = _get_numbers(directive)
count = len(numbers)
if count != 4:
msg.warning(w1.format(tname, ln, count))
continue
if len(name) == 0:
msg.warning(w5.format(tname, ln))
continue
if name in names:
msg.warning(w2.format(tname, name, ln))
continue
E, ν, α, ρ = numbers
if E < 0:
msg.warning(w3.format("Young's modulus", ln))
continue
if ρ < 0:
msg.warning(w3.format("Density", ln))
continue
if ν < 0 or ν >= 0.5:
msg.warning(w4.format(ln))
continue
rv.append(tp(*numbers, name))
return {comp.name: comp for comp in rv}
def _get_lamina(directive, fibers, resin, vf):
"""
Parse a lamina line.
Arguments:
directive: A 2-tuple (int, str) that contains the line number and
a lamina line.
resins: A dictionary of resins, keyed by their names.
fibers: A dictionary of fibers, keyed by their names.
vf: The global fiber volume fraction as a floating point number
between 0 and 1.
Returns:
A lamina dictionary, or None.
"""
w1 = "invalid lamina line {}, '{}'"
w2 = "unknown fiber '{}' on line {}"
ln, line = directive
numbers, fname = _get_numbers(directive)
if len(numbers) == 2:
numbers = numbers + (vf,)
elif len(numbers) != 3:
msg.warning(w1.format(ln, line))
return None
if fname not in fibers:
msg.warning(w2.format(fname, ln))
return None
return lamina(fibers[fname], resin, *numbers)
| [
2,
2393,
25,
30751,
13,
9078,
198,
2,
43907,
25,
7753,
12685,
7656,
28,
40477,
12,
23,
25,
701,
28,
29412,
198,
2,
15069,
10673,
1946,
12,
1238,
2481,
371,
13,
37,
13,
4176,
1279,
3808,
22947,
31,
34223,
19,
439,
13,
21283,
28401,... | 2.250711 | 3,518 |
import conversion as conv
| [
11748,
11315,
355,
3063,
198
] | 5.2 | 5 |
import sys, os, cv2
import numpy as np
import time, datetime
import logging
import copy, random, itertools
from prettytable import PrettyTable
import pickle
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import config
import constants
from config import args, parse_args, ConfigContext
from models import build_model,build_teacher_model
from models.balanced_dataparallel import DataParallel
from utils import *
from utils.projection import vertices_kp3d_projection
from utils.train_utils import justify_detection_state
from evaluation import compute_error_verts, compute_similarity_transform, compute_similarity_transform_torch, \
batch_compute_similarity_transform_torch, compute_mpjpe, \
determ_worst_best, reorganize_vis_info
from dataset.mixed_dataset import MixedDataset,SingleDataset
from visualization.visualization import Visualizer
if args().model_precision=='fp16':
from torch.cuda.amp import autocast, GradScaler
| [
11748,
25064,
11,
28686,
11,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
11,
4818,
8079,
198,
11748,
18931,
198,
11748,
4866,
11,
4738,
11,
340,
861,
10141,
198,
6738,
2495,
11487,
1330,
20090,
10962,
198,
11748,
... | 3.197015 | 335 |
from pyminion.expansions.base import estate, smithy, vassal, village
from pyminion.game import Game
from pyminion.players import Human
| [
6738,
12972,
1084,
295,
13,
11201,
504,
507,
13,
8692,
1330,
7964,
11,
895,
342,
88,
11,
410,
562,
282,
11,
7404,
198,
6738,
12972,
1084,
295,
13,
6057,
1330,
3776,
198,
6738,
12972,
1084,
295,
13,
32399,
1330,
5524,
628,
628,
628,
... | 3.227273 | 44 |
#!/usr/bin/env python
"""
Solution to Project Euler Problem 3
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
Find the largest prime factor of the given number
"""
from factorization import factors
TARGET = 600851475143
if __name__ == "__main__":
test()
run()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
46344,
284,
4935,
412,
18173,
20647,
513,
198,
4023,
1378,
16302,
68,
18173,
13,
3262,
14,
198,
198,
1525,
5949,
282,
6081,
1279,
499,
282,
6081,
31,
14816,
13,
785,
... | 2.79845 | 129 |
#!/usr/bin/env python3
"""
Classes for PWMing fans
"""
__author__ = "<Daniel Casner <www.danielcasner.org>"
def DiscreteFan(object):
"A simple fan controller with discrete PWM speeds"
def __init__(self, pi, gpio, speeds=(0, 4095), phase=0):
"Setup the fan"
self.pi = pi
self.gpio = gpio
self.speeds = speeds
self.phase = phase
self.set(0)
def set(self, speed_index):
"Sets the fan to the speed given by index"
self.pi.set_PWM_dutycycle(self.gpio, self.speeds[speed_index], self.phase)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
9487,
274,
329,
350,
22117,
278,
3296,
198,
37811,
198,
834,
9800,
834,
796,
33490,
19962,
11294,
1008,
1279,
2503,
13,
67,
6321,
34004,
1008,
13,
2398,
24618,
198,
198... | 2.302041 | 245 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
import glob
import time
import zipfile
import signal
import ConfigParser
import Mobigen.Common.Log as Log; Log.Init()
SHUTDOWN = False
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT , handler)
signal.signal(signal.SIGHUP , handler)
signal.signal(signal.SIGPIPE, handler)
if __name__ == "__main__" :
if (len(sys.argv) < 3) : usage()
fileMon = FileCompressor(sys.argv[1], sys.argv[2])
fileMon.MakeIdxFile()
fileMon.run()
__LOG__.Trace("PROCESS END...\n")
#sys.stderr.write("LOG://PROCESS END...\n")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
640,
198,
11748,
19974,
7753,
198,
... | 2.440945 | 254 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" distribute- and pip-enabled setup.py for simffa """
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
import re
setup(
name='simffa',
version='dev',
include_package_data=True,
install_requires=parse_requirements('requirements.txt'),
dependency_links=parse_dependency_links('requirements.txt'),
test_suite="nose.collector",
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
14983,
12,
290,
7347,
12,
25616,
9058,
13,
9078,
329,
985,
487,
64,
37227,
198,
198,
6738,
14983,
62,
404... | 2.8125 | 160 |
import networkx as nx
import codecs
import json
import pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from string import ascii_lowercase
histogram = None
for char in ['a', 'b', 'c', 'd']:
for c in ascii_lowercase:
filename = "hist"+char+c
print("Reading:", filename)
with open(filename, "rb") as fp:
hist = pickle.load(fp)
hist = np.array(hist)
if histogram is None:
histogram = hist
else:
if(histogram.shape[0] < hist.shape[0]):
histogram.resize(hist.shape)
else:
hist.resize(histogram.shape)
histogram = histogram + hist
with open('histogram', 'wb') as f:
pickle.dump(histogram, f)
print(histogram) | [
11748,
3127,
87,
355,
299,
87,
198,
11748,
40481,
82,
198,
11748,
33918,
198,
11748,
2298,
293,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
46384,
11537,
198,
11748,
2603,
... | 2.05379 | 409 |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from api import views
urlpatterns = [
path('studentdb',views.StudentDbDetail.as_view()),
path('add',views.AddStudentAPIView.as_view()),
path('studentdetail/<student_number>',views.GetStudentAPIView.as_view()),
path('studentupdate/<student_number>',views.UpdateStudentAPIView.as_view()),
path('studentdelete/<student_number>',views.ClearStudentAPIView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
6738,
1334,
62,
30604,
13,
6371,
33279,
82,
1330,
5794,
62,
37333,
844,
62,
33279,
82,
198,
6738,
40391,
1330,
5009,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
1... | 2.869565 | 184 |
# Generated by Django 3.1.2 on 2020-12-18 03:43
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
17,
319,
12131,
12,
1065,
12,
1507,
7643,
25,
3559,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
import logging
import pandas as pd
from library.file_utils import file_md5sum
| [
11748,
18931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
5888,
13,
7753,
62,
26791,
1330,
2393,
62,
9132,
20,
16345,
628
] | 3.24 | 25 |
import hashlib
import uuid
import appserversdao
import constants
from pymongo import MongoClient
mongo = MongoClient(constants.MONGO_URI)
mongodb = mongo[constants.MONGO_DATABASE]
mongocollection = mongodb[constants.MONGO_USERS_COLLECTION]
users = list()
| [
11748,
12234,
8019,
198,
11748,
334,
27112,
198,
11748,
598,
2655,
690,
67,
5488,
198,
11748,
38491,
198,
6738,
279,
4948,
25162,
1330,
42591,
11792,
198,
198,
76,
25162,
796,
42591,
11792,
7,
9979,
1187,
13,
27857,
11230,
62,
47269,
8,... | 2.847826 | 92 |
import pytest
import joblib
from pathlib import Path
@pytest.fixture(scope='session')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='session')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='session')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module') | [
198,
11748,
12972,
9288,
198,
11748,
1693,
8019,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
29891,
11537,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
21412,
11537,
628,
19... | 2.708134 | 209 |
import sys, os
# sys.path.append(os.path.join(os.path.dirname(__file__)) #TODO: Is this a good idea? Dunno! It works!
# print(os.path.join(os.path.dirname(__file__)))
import argparse
import markov_pilot.environment.properties as prp
from markov_pilot.environment.environment import NoFGJsbSimEnv_multi, JsbSimEnv_multi
from markov_pilot.wrappers.episodePlotterWrapper import EpisodePlotterWrapper_multi
from markov_pilot.wrappers.varySetpointsWrapper import VarySetpointsWrapper
from markov_pilot.tasks.tasks import SingleChannel_FlightTask, SingleChannel_MinimumProps_Task
from reward_funcs import _make_base_reward_components, make_angular_integral_reward_components, make_sideslip_angle_reward_components
from markov_pilot.agents.AgentTrainer import DDPG_AgentTrainer, PID_AgentTrainer, PidParameters, MADDPG_AgentTrainer
from markov_pilot.agents.agent_container import AgentContainer, AgentSpec
from markov_pilot.agents.train import perform_training
from markov_pilot.helper.lab_journal import LabJournal
from markov_pilot.helper.load_store import restore_agent_container_from_journal, restore_env_from_journal, save_test_run
from markov_pilot.testbed.evaluate_training import evaluate_training
## define the initial setpoints
target_path_angle_gamma_deg = -6.5
target_kias = 92
target_roll_angle_phi_deg = -15
target_sideslip_angle_beta_deg = 0
if __name__ == '__main__':
arglist = parse_args()
lab_journal = LabJournal(arglist.base_dir, arglist)
# # uncomment the following lines when trying to restore from disk
# restore_lines = [3463, 3488, 3489]
# testing_env = restore_env_from_journal(lab_journal, restore_lines[0])
# # if needed, change to FlightGear enabled environment
# # testing_env = restore_env_from_journal(lab_journal, restore_lines[0], target_environment='FG')
# #alternatively, use setup_env() to create a new testin_env
# # testing_env = setup_env(arglist)
# # if needed, apply VarySetpointsWrapper to see wild action:
# # testing_env = VarySetpointsWrapper(testing_env, prp.roll_deg, (-30, 30), (10, 120), (5, 30), (0.05, 0.1))
# # testing_env = VarySetpointsWrapper(testing_env, prp.flight_path_deg, (-9, -5.5), (10, 120), (5, 30), (0.05, 0.1))
# agent_container = restore_agent_container_from_journal(lab_journal, restore_lines)
# # normally, we don't save the test runs restored from disk
# # save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation
# evaluate_training(agent_container, testing_env, lab_journal=lab_journal) #run the standardized test on the test_env
# # if FligthGear rendering is desired, use this alternative
# # evaluate_training(agent_container, testing_env, lab_journal=None, render_mode = 'flightgear') #run the standardized test on the test_env
# # when restoring form disk, exit now.
# exit(0)
training_env = setup_env(arglist)
testing_env = setup_env(arglist)
#apply Varyetpoints to the training to increase the variance of training data
training_env = VarySetpointsWrapper(training_env, prp.roll_deg, (-30, 30), (10, 30), (5, 30), (0.05, 0.5))
training_env = VarySetpointsWrapper(training_env, prp.flight_path_deg, (-10, -5.5), (10, 45), (5, 30), (0.05, 0.5))
training_env = VarySetpointsWrapper(training_env, prp.sideslip_deg, (-2, 2), (10, 45), (5, 30), (0.05, 0.5))
agent_container = setup_container(training_env.task_list, arglist)
save_test_run(testing_env, agent_container, lab_journal, arglist) #use the testing_env here to have the save_path available in the evaluation
perform_training(training_env, testing_env, agent_container, lab_journal, arglist)
training_env.close()
testing_env.close()
| [
11748,
25064,
11,
28686,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
2,
25064,
13,
6978,
13,
33295,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
1303,
51,
3727,
46... | 2.873123 | 1,332 |
from selenium import webdriver
import time
import os
from urllib.parse import urlparse, parse_qs
from pprint import pprint
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# EXECUTABLE PATH FOR PHANTOMJS(HEADLESS BROWSER)
EXE_PHANTOM = os.path.join(BASE_DIR, 'webdrivers/phantomjs')
def get_params(url):
"""Return params from url"""
return parse_qs(urlparse(url).query)
def get_sel(id, hl):
"""Return results using Selenium + PhantomJs"""
url = 'https://play.google.com/store/apps/details?id=' + str(id) + '&hl=' + str(hl)
driver = webdriver.PhantomJS(executable_path=EXE_PHANTOM)
driver.get(url)
path = '/html/body/div[1]/div[4]/c-wiz/div/div[2]/div/div[1]/div/c-wiz[3]/div/div[2]/div/c-wiz/div/span/div/span/div/a'
view = driver.find_element_by_xpath(path)
try:
driver.execute_script("arguments[0].scrollIntoView();", view)
except Exception as e:
# Todo: implement logging functionality
print(e)
try:
driver.execute_script("arguments[0].click()", view)
except Exception as e:
# Todo: implement logging functionality
print(e)
time.sleep(3)
detail_path = '/html/body/div[4]/div/div[2]/content/c-wiz/div'
detail = driver.find_element_by_xpath(detail_path)
blocks = detail.find_elements_by_xpath('.//ul/..')
results = {}
for block in blocks:
line = block.text.split('\n')
results[line[0]] = line[1:]
driver.quit()
return results
if __name__ == '__main__':
import argparse
arg_parser = argparse.ArgumentParser(description="Parse Google Play Store")
arg_parser.add_argument("-id", "--id", required=True, help="Application ID")
arg_parser.add_argument("-ln", "--language", help="Language")
args = vars(arg_parser.parse_args())
try:
ln = args["language"]
except KeyError:
ln = "en"
pprint(get_sel(args["id"], ln))
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
19016,
29572,
11,
21136,
62,
48382,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
33,
11159,
62,
34720,
796,
... | 2.39625 | 800 |
# LICENSE
#
# _This file is Copyright 2018 by the Image Processing and Analysis Group (BioImage Suite Team). Dept. of Radiology & Biomedical Imaging, Yale School of Medicine._
#
# BioImage Suite Web is licensed under the Apache License, Version 2.0 (the "License");
#
# - you may not use this software except in compliance with the License.
# - You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
#
# __Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.__
#
# ENDLICENSE
import biswebpython.core.bis_basemodule as bis_basemodule
import biswebpython.core.bis_baseutils as bis_baseutils
import biswebpython.core.bis_objects as bis_objects
| [
2,
38559,
24290,
198,
2,
220,
198,
2,
4808,
1212,
2393,
318,
15069,
2864,
416,
262,
7412,
28403,
290,
14691,
4912,
357,
42787,
5159,
26264,
4816,
737,
28786,
13,
286,
5325,
12371,
1222,
8436,
35914,
48656,
11,
19681,
3961,
286,
11558,
... | 3.530249 | 281 |
#!/usr/bin/env python
from yaml import load, dump
from collections import OrderedDict
import time, os
from visualize import visualize
DIV = '\n'+ '='*64 + '\n'
start_time = time.time()
# Iterate over all conversation threads
# Generates insight data on a single conversation thread
# Execute one at a time in order
#summarize()
#conversation_separator()
insight()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
331,
43695,
1330,
3440,
11,
10285,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
11748,
640,
11,
28686,
198,
198,
6738,
38350,
1330,
38350,
198,
198,
33569,
796,
705,
5... | 3.151261 | 119 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseRedirect
from session_csrf import anonymous_csrf
from ..models import Config
from ..forms import SettingsForm
@anonymous_csrf
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
9515,
13921,
3673,
3109,
396,
198,
198,
6738,
42625,
... | 3.755814 | 86 |
import logging
import typing
import warnings
from typing import Dict, Text, Any, List, Optional
from abc import ABC
from rasa_sdk import utils
from rasa_sdk.events import SlotSet, EventType
from rasa_sdk.interfaces import Action
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING: # pragma: no cover
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.types import DomainDict
# this slot is used to store information needed
# to do the form handling
REQUESTED_SLOT = "requested_slot"
LOOP_INTERRUPTED_KEY = "is_interrupted"
ACTION_VALIDATE_SLOT_MAPPINGS = "action_validate_slot_mappings"
class ValidationAction(Action, ABC):
"""A helper class for slot validations and extractions of custom slots."""
def name(self) -> Text:
"""Unique identifier of this simple action."""
return ACTION_VALIDATE_SLOT_MAPPINGS
async def run(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> List[EventType]:
"""Runs the custom actions. Please the docstring of the parent class."""
extraction_events = await self.get_extraction_events(
dispatcher, tracker, domain
)
tracker.add_slots(extraction_events)
validation_events = await self._extract_validation_events(
dispatcher, tracker, domain
)
# Validation events include events for extracted slots
return validation_events
async def required_slots(
self,
domain_slots: List[Text],
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> List[Text]:
"""Returns slots which the validation action should fill.
Args:
domain_slots: Names of slots of this form which were mapped in
the domain.
dispatcher: the dispatcher which is used to
send messages back to the user.
tracker: the conversation tracker for the current user.
domain: the bot's domain.
Returns:
A list of slot names.
"""
return domain_slots
async def get_extraction_events(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> List[EventType]:
"""Extracts custom slots using available `extract_<slot name>` methods.
Uses the information from `self.required_slots` to gather which slots should
be extracted.
Args:
dispatcher: the dispatcher which is used to
send messages back to the user. Use
`dispatcher.utter_message()` for sending messages.
tracker: the state tracker for the current
user. You can access slot values using
`tracker.get_slot(slot_name)`, the most recent user message
is `tracker.latest_message.text` and any other
`rasa_sdk.Tracker` property.
domain: the bot's domain.
Returns:
`SlotSet` for any extracted slots.
"""
custom_slots = {}
slots_to_extract = await self.required_slots(
self.domain_slots(domain), dispatcher, tracker, domain
)
for slot in slots_to_extract:
extraction_output = await self._extract_slot(
slot, dispatcher, tracker, domain
)
custom_slots.update(extraction_output)
# for sequential consistency, also update tracker
# to make changes visible to subsequent extract_{slot_name}
tracker.slots.update(extraction_output)
return [SlotSet(slot, value) for slot, value in custom_slots.items()]
async def get_validation_events(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> List[EventType]:
"""Validate slots by calling a validation function for each slot.
Args:
dispatcher: the dispatcher which is used to
send messages back to the user.
tracker: the conversation tracker for the current user.
domain: the bot's domain.
Returns:
`SlotSet` events for every validated slot.
"""
slots_to_validate = await self.required_slots(
self.domain_slots(domain), dispatcher, tracker, domain
)
slots: Dict[Text, Any] = tracker.slots_to_validate()
for slot_name, slot_value in list(slots.items()):
if slot_name not in slots_to_validate:
slots.pop(slot_name)
continue
method_name = f"validate_{slot_name.replace('-','_')}"
validate_method = getattr(self, method_name, None)
if not validate_method:
logger.warning(
f"Skipping validation for `{slot_name}`: there is no validation "
f"method specified."
)
continue
validation_output = await utils.call_potential_coroutine(
validate_method(slot_value, dispatcher, tracker, domain)
)
if isinstance(validation_output, dict):
slots.update(validation_output)
# for sequential consistency, also update tracker
# to make changes visible to subsequent validate_{slot_name}
tracker.slots.update(validation_output)
else:
warnings.warn(
f"Cannot validate `{slot_name}`: make sure the validation method "
f"returns the correct output."
)
return [SlotSet(slot, value) for slot, value in slots.items()]
@staticmethod
def global_slots(self, domain: "DomainDict") -> List[Text]:
"""Returns all slots that contain no form condition."""
all_slots = domain.get("slots", {})
return [k for k, v in all_slots.items() if not self._is_mapped_to_form(v)]
def domain_slots(self, domain: "DomainDict") -> List[Text]:
"""Returns slots which were mapped in the domain.
Args:
domain: The current domain.
Returns:
Slot names mapped in the domain which do not include
a mapping with an active loop condition.
"""
return self.global_slots(domain)
class FormValidationAction(ValidationAction, ABC):
"""A helper class for slot validations and extractions of custom slots in forms."""
def name(self) -> Text:
"""Unique identifier of this simple action."""
raise NotImplementedError("An action must implement a name")
def form_name(self) -> Text:
"""Returns the form's name."""
return self.name().replace("validate_", "", 1)
def domain_slots(self, domain: "DomainDict") -> List[Text]:
"""Returns slots which were mapped in the domain.
Args:
domain: The current domain.
Returns:
Slot names which should be filled by the form. By default it
returns the slot names which are listed for this form in the domain
and use predefined mappings.
"""
form = domain.get("forms", {}).get(self.form_name(), {})
if "required_slots" in form:
return form.get("required_slots", [])
return []
async def next_requested_slot(
self,
dispatcher: "CollectingDispatcher",
tracker: "Tracker",
domain: "DomainDict",
) -> Optional[EventType]:
"""Sets the next slot which should be requested.
Skips setting the next requested slot in case `missing_slots` was not
overridden.
Args:
dispatcher: the dispatcher which is used to
send messages back to the user.
tracker: the conversation tracker for the current user.
domain: the bot's domain.
Returns:
`None` in case `missing_slots` was not overridden and returns `None`.
Otherwise returns a `SlotSet` event for the next slot to be requested.
If the `SlotSet` event sets `requested_slot` to `None`, the form will be
deactivated.
"""
required_slots = await self.required_slots(
self.domain_slots(domain), dispatcher, tracker, domain
)
if required_slots == self.domain_slots(domain):
# If users didn't override `required_slots` then we'll let the `FormAction`
# within Rasa Open Source request the next slot.
return None
missing_slots = (
slot_name
for slot_name in required_slots
if tracker.slots.get(slot_name) is None
)
return SlotSet(REQUESTED_SLOT, next(missing_slots, None))
| [
11748,
18931,
198,
11748,
19720,
198,
11748,
14601,
198,
6738,
19720,
1330,
360,
713,
11,
8255,
11,
4377,
11,
7343,
11,
32233,
198,
198,
6738,
450,
66,
1330,
9738,
198,
6738,
374,
15462,
62,
21282,
74,
1330,
3384,
4487,
198,
6738,
374... | 2.37763 | 3,755 |
import cv2
import numpy as np
from utils import detector_utils
import tensorflow as tf
import sys
import cv2
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
detection_graph, sess = detector_utils.load_inference_graph()
sess = tf.Session(graph=detection_graph)
width = 680
height = 440
flago = False
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[2]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
# Read video
video = cv2.VideoCapture(0)
video.set(cv2.CAP_PROP_FRAME_WIDTH, width)
video.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# Exit if video not opened.
if not video.isOpened():
print "Could not open video"
sys.exit()
# Read first frame.
# ok, frame = video.read()
# frame = cv2.flip(frame, 1)
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# # if not ok:
# print 'Cannot read video file'
# sys.exit()
# Define an initial bounding box
#bbox = boundingbox(frame)
# flago,bbox = boundingbox(frame,width,height,detection_graph,sess,flago)
# cv2.rectangle(frame,bbox[0],bbox[1],(0,255,0),2)
# #cv2.circle(frame,(int(bbox[0][0]-bbox[1][0]),int(bbox[0][1]-bbox[1][1])),10,(255,255,255),-11)
# cv2.imshow("Tracking", frame)
# print bbox
# xtl = bbox[1][0]
# ytl = bbox[1][1]
# w = np.abs([1][0]-bbox[0][0])
# h = np.abs(bbox[1][1]-bbox[0][1])
# print xtl,ytl,w,h
# ok = tracker.init(frame,(xtl,ytl,w,h))
while True:
# Read a new frame
ok, frame = video.read()
frame = cv2.flip(frame, 1)
if ok:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
#flago = False
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
flago,bbox = boundingbox(frame,width,height,detection_graph,sess)
if flago:
xtl = int(bbox[1][0])
ytl = int(bbox[1][1])
w = np.abs(int(bbox[1][0])-int(bbox[0][0]))
h = np.abs(int(bbox[1][1])-int(bbox[0][1]))
ok =tracker.init(frame,(xtl,ytl,w,h))
else:
continue
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
# imgcv = cv2.imread("./sample_img/sample_person.jpg")
# result = tfnet.return_predict(imgcv)
# for i in range(len(result)):
# x,y = result[i]['bottomright']['x'],result[i]['bottomright']['y']
# xw,yh = result[i]['topleft']['x'], result[i]['topleft']['y']
# print (x,y,xw,yh)
# cv2.rectangle(imgcv,(x,y),(xw,yh),(0,255,0),2)
# cv2.imshow('frame',imgcv)
# cv2.waitKey(0)
# print(result)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3384,
4487,
1330,
31029,
62,
26791,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
25064,
198,
11748,
269,
85,
17,
628,
220,
198,
7,
22478,
62,
332,
11,
4159,
... | 2.019939 | 1,956 |
import numpy as np
import pandas as pd
# The file is stored at the following path:
# 'https://media-doselect.s3.amazonaws.com/generic/NMgEjwkAEGGQZBoNYGr9Ld7w0/rating.csv'
df = pd.read_csv('https://media-doselect.s3.amazonaws.com/generic/NMgEjwkAEGGQZBoNYGr9Ld7w0/rating.csv')
df.set_index(['Office','Department'],inplace=True)
# Provide your answer below
df.sort_index(inplace=True)
print(df.head())
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
383,
2393,
318,
8574,
379,
262,
1708,
3108,
25,
198,
2,
705,
5450,
1378,
11431,
12,
34436,
801,
13,
82,
18,
13,
33103,
8356,
13,
785,
14,
41357,
14,... | 2.466258 | 163 |
#!/usr/bin/env python
# Alert state used for tests.
from __future__ import absolute_import
from __future__ import print_function
import json
import TestRsrcs
class Dashboard(TestRsrcs.Rsrc):
""" A class that encompasses some dashboard data used for tests """
kubeBoxPkiNameMatchString = "SomeNameToBeMachedInKubeBoxPki"
skynetMonitoringNameMatchString = "SomeMatchString"
@staticmethod
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
23276,
1181,
973,
329,
5254,
13,
628,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
33918,
198,
1174... | 3.178295 | 129 |
import pathlib
import time
import scipy.misc
from mxnet import nd
import mxnet as mx
import h5py
import numpy as np
from mxnet import gluon
class BernoulliLogLik(gluon.HybridBlock):
"""Calculate log probability of a Bernoulli."""
def hybrid_forward(self, F, x, logits):
"""Bernoulli log prob is
x * log(1 + exp(-z))^(-1) + (1-x) * log(1 + exp(z))^(-1)
= - x * log(1 + exp(z)) + x * log(exp(z)) - log(1 + exp(z)) + x * log(1 + exp(z))
= x * z - log(1 + exp(z))
= x * z - max(0, z) - log(1 + exp(-|z|)
In the last step, observe that softplus(z) ~= z when z large.
When z small, we hit underflow.
"""
return x * logits - F.relu(logits) - F.Activation(-F.abs(logits), 'softrelu')
if __name__ == '__main__':
np.random.seed(24232)
mx.random.seed(2423232)
USE_GPU = False
LATENT_SIZE = 100
BATCH_SIZE = 64
PRINT_EVERY = 1000
MAX_ITERATIONS = 1000000
OUT_DIR = pathlib.Path(pathlib.os.environ['LOG']) / 'debug'
# hdf5 file from:
# https://github.com/altosaar/proximity_vi/blob/master/get_binary_mnist.py
data_path = pathlib.Path(pathlib.os.environ['DAT']) / 'binarized_mnist.hdf5'
f = h5py.File(data_path, 'r')
raw_data = f['train'][:][:]
f.close()
ctx = [mx.gpu(0)] if USE_GPU else [mx.cpu()]
with mx.Context(ctx[0]):
variational = AmortizedGaussianVariational(LATENT_SIZE, BATCH_SIZE)
model = DeepLatentGaussianModel()
elbo = ELBO(model, variational)
variational.hybridize()
model.hybridize()
elbo.hybridize()
variational.initialize(mx.init.Xavier())
model.initialize(mx.init.Xavier())
params = model.collect_params()
params.update(variational.collect_params())
trainer = gluon.Trainer(params, 'rmsprop', {'learning_rate': 0.001})
# , 'centered': True})
step = 0
t0 = time.time()
train_data = get_data()
while step < MAX_ITERATIONS:
if step % (train_data.num_data // BATCH_SIZE) == 0:
train_data = get_data()
data = next(train_data)
with mx.autograd.record():
elbo_batch = elbo(data.data[0])
(-elbo_batch).backward()
if step % PRINT_EVERY == 0:
get_posterior_predictive(data, step)
np_elbo = np.mean(elbo_batch.asnumpy())
t1 = time.time()
speed = (t1 - t0) / PRINT_EVERY
t0 = t1
print(f'Iter {step}\tELBO: {np_elbo:.1f}\tspeed: {speed:.3e} s/iter')
trainer.step(BATCH_SIZE)
step += 1
| [
11748,
3108,
8019,
198,
11748,
640,
198,
11748,
629,
541,
88,
13,
44374,
198,
6738,
285,
87,
3262,
1330,
299,
67,
198,
11748,
285,
87,
3262,
355,
285,
87,
198,
11748,
289,
20,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
285... | 2.216682 | 1,103 |
"""
This module contains machinery to annotate Notes with a "global address", the path of that note in a with respect to
some global root score.
Note: the annotated Notes are defined here with considerable amount of duplication with clef.py, most notably in the
fact that we define a separate subclass for each note class. Also: in the fact that `to_s_expression` is fully
duplicated (but extended with the relevant address information)
For now, this isn't pretty but it works. I'm open to a more elegant/general solution.
"""
from dsn.s_expr.clef import BecomeAtom, SetAtom, BecomeList, Insert, Delete, Extend, Chord, Score as ChordScore
from dsn.s_expr.simple_score import SimpleScore
from dsn.s_expr.construct import play_note
from dsn.s_expr.structure import Atom, List
from utils import pmts
from dsn.s_expr.score import Score
from dsn.s_expr.note_address import NoteAddress, TheChild, InScore, SExprELS18NoteAddress
# ## Classes for GlobNote (which isn't actually a class itself; we simply subclass each note in clef.py separately)
normal_to_glob = {
BecomeAtom: GlobBecomeAtom,
SetAtom: GlobSetAtom,
BecomeList: GlobBecomeList,
Insert: GlobInsert,
Delete: GlobDelete,
Extend: GlobExtend,
Chord: GlobChord,
}
def note_with_global_address(note, at_address):
"""Recursively annotate a note and (all its descendants) with an address (and the relevant sub-addresses). """
if isinstance(note, Chord):
children = [
note_with_global_address(child, at_address.plus(InScore(i))) for
i, child in enumerate(note.score.notes)
]
return GlobChord(at_address, ChordScore(children))
elif isinstance(note, Insert) or isinstance(note, Extend):
child = note_with_global_address(note.child_note, at_address.plus(TheChild()))
return normal_to_glob[type(note)](at_address, note.index, child)
# further instance-offing below is to account for unequal param-counts and param-names; The point is that to
# re-instantiate a Glob* variant, we need to know what params to use.
elif isinstance(note, SetAtom) or isinstance(note, BecomeAtom): # one param, named 'atom'
return normal_to_glob[type(note)](at_address, note.atom)
elif isinstance(note, Delete): # one param, named 'index'
return normal_to_glob[type(note)](at_address, note.index)
return normal_to_glob[type(note)](at_address)
def score_with_global_address(score):
"""
Score => SimpleScore
i.e. turn a score into a score of notes w/ address annotations.
"""
pmts(score, Score)
return SimpleScore([
note_with_global_address(note, NoteAddress((InScore(i),)))
for i, note in enumerate(score.notes())])
def play_simple_score(score):
"""
This is a copy/pasted version of `play_score`, with the following differences:
* It does not rely on or use the memoization framework (we can't, because GlobNotes are not memoizable (yet?))
* The scores that are created in the tree are themselves SimpleScore objects.
"""
pmts(score, SimpleScore)
tree = None # In the beginning, there is nothing, which we model as `None`
for note in score.notes():
tree = play_note(note, tree, ScoreClass=SimpleScore)
return tree
| [
37811,
198,
1212,
8265,
4909,
20230,
284,
24708,
378,
11822,
351,
257,
366,
20541,
2209,
1600,
262,
3108,
286,
326,
3465,
287,
257,
351,
2461,
284,
198,
11246,
3298,
6808,
4776,
13,
198,
198,
6425,
25,
262,
24708,
515,
11822,
389,
544... | 2.970243 | 1,109 |
# Imports
import re
import os
import glob
import imageio
def gifer(glob_pattern, out_fpath):
"""Converts a series of images into a gif in order of numerical numbers in their name"""
try:
paths = glob.glob(glob_pattern)
paths = list(sorted(paths, \
key=lambda x: int(re.sub('[^0-9]','',os.path.basename(x)))))
images = list(map(imageio.imread, paths))
imageio.mimsave(out_fpath, images)
except ValueError as e:
print('Glob pattern did not pick up valid inputs:')
print(e)
| [
2,
1846,
3742,
198,
11748,
302,
198,
11748,
28686,
198,
11748,
15095,
198,
11748,
2939,
952,
198,
198,
4299,
308,
7087,
7,
4743,
672,
62,
33279,
11,
503,
62,
69,
6978,
2599,
198,
220,
220,
220,
37227,
3103,
24040,
257,
2168,
286,
42... | 2.379913 | 229 |
import bpy
from . import combine_mesh, group_settings, util
| [
11748,
275,
9078,
201,
198,
6738,
764,
1330,
12082,
62,
76,
5069,
11,
1448,
62,
33692,
11,
7736,
201,
198,
201,
198
] | 2.909091 | 22 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import time
import pandas as pd
from pandas import DataFrame
from base import BaseObject
from base import FileIO
class InversionLibraryLoader(BaseObject):
""" Load Inversion Library """
__df_inversion = None
__header_row = ["Id",
"KeyField",
"Term"]
def __init__(self,
library_name: str,
is_debug: bool = False):
"""
Created:
5-Nov-2019
craig.trim@ibm.com
* based on 'vectorspace-library-loader'
:param library_name:
:param is_debug:
"""
BaseObject.__init__(self, __name__)
self.is_debug = is_debug
self._library_name = library_name
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
628,
198,
11748,
28686,
198,
11748,
640,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
1330,
6060,
19778,
1... | 2.062663 | 383 |
from django.db import models
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
628,
198
] | 3.444444 | 9 |
#!/usr/bin/env python3
# Standard Imports
import os
import grp
import pwd
import sys
import glob
import random
import shutil
import string
# Module Imports
import click
import json
from passlib.hash import pbkdf2_sha256
import requests
# Initialize shutil copy function
cp = shutil.copyfile
# Define working directory
working_directory = os.path.dirname(os.path.abspath(__file__))
def construct_test(test_query, location, test_target):
"""Constructs JSON POST data for test_hyperglass function"""
constructed_query = json.dumps(
{"type": test_query, "location": location, "target": test_target}
)
return constructed_query
@click.group()
@hg.command("pylint-badge", help="Runs Pylint and generates a badge for GitHub")
@click.option(
"-i",
"--integer-only",
"int_only",
type=bool,
default=False,
help="Output Pylint score as integer",
)
@hg.command("pre-check", help="Check hyperglass config & readiness")
@hg.command("test", help="Full test of all backend features")
@click.option("-l", "--location", type=str, required=True, help="Location to query")
@click.option(
"-4",
"--target-ipv4",
"target_ipv4",
type=str,
default="1.1.1.0/24",
required=False,
show_default=True,
help="IPv4 Target Address",
)
@click.option(
"-6",
"--target-ipv6",
"target_ipv6",
type=str,
default="2606:4700:4700::/48",
required=False,
show_default=True,
help="IPv6 Target Address",
)
@click.option(
"-c",
"--community",
"test_community",
type=str,
required=False,
show_default=True,
default="65000:1",
help="BGP Community",
)
@click.option(
"-a",
"--aspath",
"test_aspath",
type=str,
required=False,
show_default=True,
default="^65001$",
help="BGP AS Path",
)
@click.option(
"-r",
"--requires-ipv6-cidr",
"requires_ipv6_cidr",
type=str,
required=False,
help="Location for testing IPv6 CIDR requirement",
)
@click.option(
"-b",
"--blacklist",
"test_blacklist",
type=str,
default="100.64.0.1",
required=False,
show_default=True,
help="Address to use for blacklist check",
)
@click.option(
"-h",
"--host",
"test_host",
type=str,
default="localhost",
required=False,
show_default=True,
help="Name or IP address of hyperglass server",
)
@click.option(
"-p",
"--port",
"test_port",
type=int,
default=5000,
required=False,
show_default=True,
help="Port hyperglass is running on",
)
def test_hyperglass(
location,
target_ipv4,
target_ipv6,
requires_ipv6_cidr,
test_blacklist,
test_community,
test_aspath,
test_host,
test_port,
):
"""Fully tests hyperglass backend by making use of requests library to mimic the JS Ajax POST \
performed by the front end."""
test_target = None
invalid_ip = "this_ain't_an_ip!"
invalid_community = "192.0.2.1"
invalid_aspath = ".*"
ipv4_host = "1.1.1.1"
ipv4_cidr = "1.1.1.0/24"
ipv6_host = "2606:4700:4700::1111"
ipv6_cidr = "2606:4700:4700::/48"
test_headers = {"Content-Type": "application/json"}
test_endpoint = f"http://{test_host}:{test_port}/lg"
# No Query Type Test
try:
click.secho("Starting No Query Type test...", fg="black")
test_query = construct_test("", location, target_ipv4)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ No Query Type test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ No Query Type test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# No Location Test
try:
click.secho("Starting No Location test...", fg="black")
test_query = construct_test("bgp_route", "", target_ipv6)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ No Location test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ No Location test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# No Target Test
try:
click.secho("Starting No Target test...", fg="black")
test_query = construct_test("bgp_route", location, "")
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ No Target test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ No Target test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid BGP IPv4 Route Test
try:
click.secho("Starting Valid BGP IPv4 Route test...", fg="black")
test_query = construct_test("bgp_route", location, target_ipv4)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid BGP IPv4 Route test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid BGP IPv4 Route test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid BGP IPv6 Route Test
try:
click.secho("Starting Valid BGP IPv6 Route test...", fg="black")
test_query = construct_test("bgp_route", location, target_ipv6)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid BGP IPv6 Route test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid BGP IPv6 Route test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Invalid BGP Route Test
try:
click.secho("Starting Invalid BGP IPv4 Route test...", fg="black")
test_query = construct_test("bgp_route", location, invalid_ip)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Invalid BGP IPv4 Route test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Invalid BGP IPv4 Route test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Requires IPv6 CIDR Test
if requires_ipv6_cidr:
try:
click.secho("Starting Requires IPv6 CIDR test...", fg="black")
test_query = construct_test("bgp_route", requires_ipv6_cidr, ipv6_host)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Requires IPv6 CIDR test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Requires IPv6 CIDR test failed", fg="red", bold=True)
click.secho(
f"Status Code: {hg_response.status_code}", fg="red", bold=True
)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid BGP Community Test
try:
click.secho("Starting Valid BGP Community test...", fg="black")
test_query = construct_test("bgp_community", location, test_community)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid BGP Community test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid BGP Community test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Invalid BGP Community Test
try:
click.secho("Starting Invalid BGP Community test...", fg="black")
test_query = construct_test("bgp_community", location, target_ipv4)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Invalid BGP Community test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Invalid BGP Community test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid BGP AS_PATH Test
try:
click.secho("Starting Valid BGP AS_PATH test...", fg="black")
test_query = construct_test("bgp_aspath", location, test_aspath)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid BGP AS_PATH test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid BGP AS_PATH test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Invalid BGP AS_PATH Test
try:
click.secho("Starting invalid BGP AS_PATH test...", fg="black")
test_query = construct_test("bgp_aspath", location, invalid_aspath)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Invalid BGP AS_PATH test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Invalid BGP AS_PATH test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid IPv4 Ping Test
try:
click.secho("Starting Valid IPv4 Ping test...", fg="black")
test_query = construct_test("ping", location, ipv4_host)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid IPv4 Ping test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid IPv4 Ping test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Valid IPv6 Ping Test
try:
click.secho("Starting Valid IPv6 Ping test...", fg="black")
test_query = construct_test("ping", location, ipv6_host)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code == 200:
click.secho("✓ Valid IPv6 Ping test passed", fg="green", bold=True)
if not hg_response.status_code == 200:
click.secho("✗ Valid IPv6 Ping test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Invalid IPv4 Ping Test
try:
click.secho("Starting Invalid IPv4 Ping test...", fg="black")
test_query = construct_test("ping", location, ipv4_cidr)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Invalid IPv4 Ping test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Invalid IPv4 Ping test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Invalid IPv6 Ping Test
try:
click.secho("Starting Invalid IPv6 Ping test...", fg="black")
test_query = construct_test("ping", location, ipv6_cidr)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Invalid IPv6 Ping test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Invalid IPv6 Ping test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
# Blacklist Test
try:
click.secho("Starting Blacklist test...", fg="black")
test_query = construct_test("bgp_route", location, test_blacklist)
hg_response = requests.post(
test_endpoint, headers=test_headers, data=test_query
)
if hg_response.status_code in range(400, 500):
click.secho("✓ Blacklist test passed", fg="green", bold=True)
if not hg_response.status_code in range(400, 500):
click.secho("✗ Blacklist test failed", fg="red", bold=True)
click.secho(f"Status Code: {hg_response.status_code}", fg="red", bold=True)
click.secho(hg_response.text, fg="red")
except Exception as e:
click.secho(f"Exception occurred:\n{e}")
@hg.command("clear-cache", help="Clear Flask cache")
def clearcache():
"""Clears the Flask-Caching cache"""
try:
import hyperglass.hyperglass
hyperglass.hyperglass.clear_cache()
click.secho("✓ Successfully cleared cache.", fg="green", bold=True)
except:
click.secho("✗ Failed to clear cache.", fg="red", bold=True)
raise
@hg.command("generate-key", help="Generate API key & hash")
@click.option(
"-l", "--length", "string_length", type=int, default=16, show_default=True
)
def generatekey(string_length):
"""Generates 16 character API Key for hyperglass-frr API, and a corresponding PBKDF2 SHA256 Hash"""
ld = string.ascii_letters + string.digits
api_key = "".join(random.choice(ld) for i in range(string_length))
key_hash = pbkdf2_sha256.hash(api_key)
click.secho(
f"""
Your API Key is: {api_key}
Place your API Key in the `configuration.toml` of your API module. For example, in: `hyperglass_frr/hyperglass-frr/configuration.toml`
Your Key Hash is: {key_hash}
Use this hash as the password for the device using the API module. For example, in: `hyperglass/hyperglass/configuration/devices.toml`
"""
)
@hg.command("dev-server", help="Start Flask development server")
@click.option("--host", type=str, default="0.0.0.0", help="Listening IP")
@click.option("--port", type=int, default=5000, help="TCP Port")
def flask_dev_server(host, port):
"""Starts Flask development server for testing without WSGI/Reverse Proxy"""
try:
from hyperglass import hyperglass
from hyperglass import configuration
from hyperglass import render
debug_state = configuration.debug_state()
render.css()
click.secho(f"✓ Starting Flask development server", fg="green", bold=True)
hyperglass.app.run(host=host, debug=debug_state, port=port)
except:
click.secho("✗ Failed to start test server.", fg="red", bold=True)
raise
@hg.command("compile-sass", help="Compile Sass templates to CSS")
def compile_sass():
"""Renders Jinja2 and Sass templates to HTML & CSS files"""
try:
from hyperglass import render
render.css()
click.secho("✓ Successfully rendered CSS templates.", fg="green", bold=True)
except:
click.secho("✗ Failed to render CSS templates.", fg="red", bold=True)
raise
@hg.command("migrate-configs", help="Copy TOML examples to usable config files")
def migrateconfig():
"""Copies example configuration files to usable config files"""
try:
click.secho("Migrating example config files...", fg="black")
config_dir = os.path.join(working_directory, "hyperglass/configuration/")
examples = glob.iglob(os.path.join(config_dir, "*.example"))
for f in examples:
basefile, extension = os.path.splitext(f)
if os.path.exists(basefile):
click.secho(f"{basefile} already exists", fg="blue")
else:
try:
cp(f, basefile)
click.secho(f"✓ Migrated {basefile}", fg="green")
except:
click.secho(f"✗ Failed to migrate {basefile}", fg="red")
raise
click.secho(
"✓ Successfully migrated example config files", fg="green", bold=True
)
except:
click.secho("✗ Error migrating example config files", fg="red", bold=True)
raise
@hg.command("migrate-gunicorn", help="Copy Gunicorn example to usable config file")
def migrategunicorn():
"""Copies example Gunicorn config file to a usable config"""
try:
import hyperglass
except ImportError as error_exception:
click.secho(f"Error while importing hyperglass:\n{error_exception}", fg="red")
try:
click.secho("Migrating example Gunicorn configuration...", fg="black")
hyperglass_root = os.path.dirname(hyperglass.__file__)
ex_file = os.path.join(hyperglass_root, "gunicorn_config.py.example")
basefile, extension = os.path.splitext(ex_file)
newfile = basefile
if os.path.exists(newfile):
click.secho(f"{newfile} already exists", fg="blue")
else:
try:
cp(ex_file, newfile)
click.secho(
f"✓ Successfully migrated Gunicorn configuration to: {newfile}",
fg="green",
bold=True,
)
except:
click.secho(f"✗ Failed to migrate {newfile}", fg="red")
raise
except:
click.secho(
"✗ Error migrating example Gunicorn configuration", fg="red", bold=True
)
raise
@hg.command("migrate-systemd", help="Copy Systemd example to OS")
@click.option(
"-d", "--directory", default="/etc/systemd/system", help="Destination Directory"
)
def migratesystemd(directory):
"""Copies example systemd service file to /etc/systemd/system/"""
try:
click.secho("Migrating example systemd service...", fg="black")
ex_file_base = "hyperglass.service.example"
ex_file = os.path.join(working_directory, f"hyperglass/{ex_file_base}")
basefile, extension = os.path.splitext(ex_file_base)
newfile = os.path.join(directory, basefile)
if os.path.exists(newfile):
click.secho(f"{newfile} already exists", fg="blue")
else:
try:
cp(ex_file, newfile)
click.secho(
f"✓ Successfully migrated systemd service to: {newfile}",
fg="green",
bold=True,
)
except:
click.secho(f"✗ Failed to migrate {newfile}", fg="red")
raise
except:
click.secho("✗ Error migrating example systemd service", fg="red", bold=True)
raise
@hg.command(
"update-permissions",
help="Fix ownership & permissions of hyperglass project directory",
)
@click.option("--user", default="www-data")
@click.option("--group", default="www-data")
def fixpermissions(user, group):
"""Effectively runs `chmod` and `chown` on the hyperglass/hyperglass directory"""
try:
import hyperglass
except ImportError as error_exception:
click.secho(f"Error importing hyperglass:\n{error_exception}")
hyperglass_root = os.path.dirname(hyperglass.__file__)
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
try:
for root, dirs, files in os.walk(hyperglass_root):
for d in dirs:
full_path = os.path.join(root, d)
os.chown(full_path, uid, gid)
for f in files:
full_path = os.path.join(root, f)
os.chown(full_path, uid, gid)
os.chown(root, uid, gid)
click.secho(
"✓ Successfully changed hyperglass/ ownership", fg="green", bold=True
)
except:
click.secho("✗ Failed to change hyperglass/ ownership", fg="red", bold=True)
raise
try:
for root, dirs, files in os.walk(hyperglass_root):
for d in dirs:
full_path = os.path.join(root, d)
os.chmod(full_path, 0o744)
for f in files:
full_path = os.path.join(root, f)
os.chmod(full_path, 0o744)
os.chmod(root, 0o744)
click.secho(
"✓ Successfully changed hyperglass/ permissions", fg="green", bold=True
)
except:
click.secho("✗ Failed to change hyperglass/ permissions", fg="red", bold=True)
raise
if __name__ == "__main__":
hg()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
8997,
1846,
3742,
198,
11748,
28686,
198,
11748,
1036,
79,
198,
11748,
279,
16993,
198,
11748,
25064,
198,
11748,
15095,
198,
11748,
4738,
198,
11748,
4423,
346,
198,
11748,
... | 2.248415 | 10,410 |
import json
import re
from django.db import models
from kubernetes_manager.consts import byte_units
from kubernetes import client, config
class KubernetesTelemetryMixin(models.Model):
"""
KubernetesTelemetryMixin
:type: mixin
:description: Extends child model to include telemetry features.
:inherits: django.db.models.Model
:fields: object_status, average_cpu_usage,
average_mem_usage, cpu_usage_seconds, mem_usage_seconds
"""
object_status = models.CharField(max_length=128, null=True, blank=True, help_text="status of the object in Kubernetes")
average_cpu_usage = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=4, help_text="Average PIT CPU units consumed")
average_mem_usage = models.IntegerField(null=True, blank=True, help_text="Average PIT bytes consumed")
cpu_usage_seconds = models.DecimalField(null=True, blank=True, max_digits=8, decimal_places=4, help_text="Average cpu usage * seconds live")
mem_usage_seconds = models.IntegerField(null=True, blank=True, help_text="Average mem usage * seconds live")
def splitNumeric(self, size):
"""
:description: Parses string into numeric component.
"""
return filter(None, re.split(r"(\d+)", size))
def parseSize(self, size):
"""
:description: Parses string as numeric, suffix and converts to bytes.
"""
number, unit = [string for string in self.splitNumeric(size)]
return int(float(number) * byte_units[unit])
def read_pod_metrics(self):
"""
:description: Uses metrics_server to get cadvisor data.
"""
api_instance = self.get_client(API=client.CustomObjectsApi)
pod_name = self.slug
pod_namespace = self.namespace.slug
items = api_instance.list_cluster_custom_object("metrics.k8s.io", "v1beta1", "pods").get("items", [])
return [pod for pod in items if pod_name in pod.get("metadata", {}).get("name") and pod_namespace in pod.get("metadata", {}).get("namespace")]
def read_pod_usage(self):
"""
:description: Converts metrics into dictionary for api usage.
"""
pod_name = self.pod_template.slug
pod_namespace = self.namespace.slug
pod_metrics = self.read_pod_metrics()
cpu = 0.000
memory = 0
for metric in pod_metrics:
for container in metric.get("containers", []):
ccpu = container.get("usage", {}).get("cpu", None)
cmem = container.get("usage", {}).get("memory", None)
if "m" in ccpu:
ccpu = int(ccpu.split("m")[0]) / 1000.000
else:
ccpu = int(ccpu)
cpu += ccpu
memory += self.parseSize(cmem)
return {"cpu": cpu, "memory": memory}
@property
def status(self):
"""
:description: Returns status data of object.
"""
type = self._meta.model_name
name = self.slug
namespace = self.namespace.slug
api_instance = self.get_client(API=client.ExtensionsV1beta1Api)
if type == "kubernetesjob":
api_response = api_instance.read_namespaced_job_status(name, namespace)
if type == "kubernetesdeployment":
api_response = api_instance.read_namespaced_deployment_status(name, namespace)
return api_response.status
| [
11748,
33918,
198,
11748,
302,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
479,
18478,
3262,
274,
62,
37153,
13,
1102,
6448,
1330,
18022,
62,
41667,
198,
6738,
479,
18478,
3262,
274,
1330,
5456,
11,
4566,
628,
198,
... | 2.411806 | 1,423 |
from django.conf import settings
from django.db import migrations
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628,
198
] | 3.777778 | 18 |
import sys
sys.path.insert(1,"../../")
import h2o
import math
import os
from tests import pyunit_utils
def calculate_chunk_size(file_path, num_cols, cores, cloud_size):
"""
Return size of a chunk calculated for optimal data handling in h2o java backend.
:param file_path: path to dataset
:param num_cols: number or columns in dataset
:param cores: number of CPUs on machine where the model was trained
:param cloud_size: number of nodes on machine where the model was trained
:return: a chunk size
"""
# get maximal line size from file in bytes
max_line_length = 0
total_size = 0
with open(file_path, "rU") as input_file:
for line in input_file:
size = len(line)
total_size = total_size + size
if size > max_line_length:
max_line_length = size
default_log2_chunk_size = 20+2
default_chunk_size = 1 << default_log2_chunk_size
local_parse_size = int(total_size / cloud_size)
min_number_rows = 10 # need at least 10 rows (lines) per chunk (core)
per_node_chunk_count_limit = 1 << 21 # don't create more than 2M Chunk POJOs per node
min_parse_chunk_size = 1 << 12 # don't read less than this many bytes
max_parse_chunk_size = (1 << 28)-1 # don't read more than this many bytes per map() thread
chunk_size = int(max((local_parse_size / (4*cores))+1, min_parse_chunk_size)) # lower hard limit
if chunk_size > 1024*1024:
chunk_size = (chunk_size & 0xFFFFFE00) + 512 # align chunk size to 512B
# Super small data check - file size is smaller than 64kB
if total_size <= 1 << 16:
chunk_size = max(default_chunk_size, int(min_number_rows * max_line_length))
else:
# Small data check
if (chunk_size < default_chunk_size) and ((local_parse_size / chunk_size) * num_cols < per_node_chunk_count_limit):
chunk_size = max(int(chunk_size), int(min_number_rows * max_line_length))
else:
# Adjust chunk_size such that we don't create too many chunks
chunk_count = cores * 4 * num_cols
if chunk_count > per_node_chunk_count_limit:
# this times too many chunks globally on the cluster
ratio = 1 << max(2, int(math.log2(int(chunk_count / per_node_chunk_count_limit))))
chunk_size = chunk_size * ratio # need to bite off larger chunks
chunk_size = min(max_parse_chunk_size, chunk_size) # hard upper limit
# if we can read at least min_number_rows and we don't create too large Chunk POJOs, we're done
# else, fix it with a catch-all heuristic
if chunk_size <= min_number_rows * max_line_length:
# might be more than default, if the max line length needs it, but no more than the size limit(s)
# also, don't ever create too large chunks
chunk_size = int(max(default_chunk_size, # default chunk size is a good lower limit for big data
min(max_parse_chunk_size, min_number_rows * max_line_length))) # don't read more than 1GB, but enough to read the minimum number of rows)
return int(chunk_size)
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_6339)
else:
pubdev_6339()
| [
11748,
25064,
198,
17597,
13,
6978,
13,
28463,
7,
16,
553,
40720,
40720,
4943,
198,
11748,
289,
17,
78,
198,
11748,
10688,
198,
11748,
28686,
198,
6738,
5254,
1330,
12972,
20850,
62,
26791,
628,
220,
220,
220,
220,
220,
220,
220,
220,... | 2.435098 | 1,379 |
from mule.task import ITask
import mule.util.docker_util as docker
from mule.error import messages
from mule.util import update_dict
import re
| [
6738,
285,
2261,
13,
35943,
1330,
7283,
2093,
198,
11748,
285,
2261,
13,
22602,
13,
45986,
62,
22602,
355,
36253,
198,
6738,
285,
2261,
13,
18224,
1330,
6218,
198,
6738,
285,
2261,
13,
22602,
1330,
4296,
62,
11600,
198,
11748,
302,
19... | 3.404762 | 42 |
# SPDX-FileCopyrightText: 2017 Tony DiCola for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_drv2605`
====================================================
CircuitPython module for the DRV2605 haptic feedback motor driver. See
examples/simpletest.py for a demo of the usage.
* Author(s): Tony DiCola
"""
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DRV2605.git"
##
# EDITED BY github: @MISSCRISPENCAKES
# FOR LRA CLOSED-LOOP DEFAULT
# AUTO-CALIB SETUP
##
# Internal constants:
_DRV2605_ADDR = const(0x5A)
_DRV2605_REG_STATUS = const(0x00)
_DRV2605_REG_MODE = const(0x01)
_DRV2605_REG_RTPIN = const(0x02)
_DRV2605_REG_LIBRARY = const(0x03)
_DRV2605_REG_WAVESEQ1 = const(0x04)
_DRV2605_REG_WAVESEQ2 = const(0x05)
_DRV2605_REG_WAVESEQ3 = const(0x06)
_DRV2605_REG_WAVESEQ4 = const(0x07)
_DRV2605_REG_WAVESEQ5 = const(0x08)
_DRV2605_REG_WAVESEQ6 = const(0x09)
_DRV2605_REG_WAVESEQ7 = const(0x0A)
_DRV2605_REG_WAVESEQ8 = const(0x0B)
_DRV2605_REG_GO = const(0x0C)
_DRV2605_REG_OVERDRIVE = const(0x0D)
_DRV2605_REG_SUSTAINPOS = const(0x0E)
_DRV2605_REG_SUSTAINNEG = const(0x0F)
_DRV2605_REG_BREAK = const(0x10)
_DRV2605_REG_AUDIOCTRL = const(0x11)
_DRV2605_REG_AUDIOLVL = const(0x12)
_DRV2605_REG_AUDIOMAX = const(0x13)
_DRV2605_REG_RATEDV = const(0x16)
_DRV2605_REG_CLAMPV = const(0x17)
_DRV2605_REG_AUTOCALCOMP = const(0x18)
_DRV2605_REG_AUTOCALEMP = const(0x19)
_DRV2605_REG_FEEDBACK = const(0x1A)
_DRV2605_REG_CONTROL1 = const(0x1B)
_DRV2605_REG_CONTROL2 = const(0x1C)
_DRV2605_REG_CONTROL3 = const(0x1D)
_DRV2605_REG_CONTROL4 = const(0x1E)
_DRV2605_REG_VBAT = const(0x21)
_DRV2605_REG_LRARESON = const(0x22)
# User-facing mode value constants:
MODE_INTTRIG = 0x00
MODE_EXTTRIGEDGE = 0x01
MODE_EXTTRIGLVL = 0x02
MODE_PWMANALOG = 0x03
MODE_AUDIOVIBE = 0x04
MODE_REALTIME = 0x05
MODE_DIAGNOS = 0x06
MODE_AUTOCAL = 0x07
LIBRARY_EMPTY = 0x00
LIBRARY_TS2200A = 0x01
LIBRARY_TS2200B = 0x02
LIBRARY_TS2200C = 0x03
LIBRARY_TS2200D = 0x04
LIBRARY_TS2200E = 0x05
LIBRARY_LRA = 0x06
class DRV2605:
"""TI DRV2605 haptic feedback motor driver module."""
# Class-level buffer for reading and writing data with the sensor.
# This reduces memory allocations but means the code is not re-entrant or
# thread safe!
_BUFFER = bytearray(2)
def play(self):
"""Play back the select effect(s) on the motor."""
self._write_u8(_DRV2605_REG_GO, 1)
def stop(self):
"""Stop vibrating the motor."""
self._write_u8(_DRV2605_REG_GO, 0)
@property
def diag(self):
""" Check for auto calibration success """
return self._read_u8(_DRV2605_REG_STATUS)
@diag.setter
@property
def mode(self):
"""
The mode of the chip. Should be a value of:
- MODE_INTTRIG: Internal triggering, vibrates as soon as you call
play(). Default mode.
- MODE_EXTTRIGEDGE: External triggering, edge mode.
- MODE_EXTTRIGLVL: External triggering, level mode.
- MODE_PWMANALOG: PWM/analog input mode.
- MODE_AUDIOVIBE: Audio-to-vibration mode.
- MODE_REALTIME: Real-time playback mode.
- MODE_DIAGNOS: Diagnostics mode.
- MODE_AUTOCAL: Auto-calibration mode.
See the datasheet for the meaning of modes beyond MODE_INTTRIG.
"""
return self._read_u8(_DRV2605_REG_MODE)
@mode.setter
@property
def library(self):
"""
The library selected for waveform playback. Should be
a value of:
- LIBRARY_EMPTY: Empty
- LIBRARY_TS2200A: TS2200 library A (the default)
- LIBRARY_TS2200B: TS2200 library B
- LIBRARY_TS2200C: TS2200 library C
- LIBRARY_TS2200D: TS2200 library D
- LIBRARY_TS2200E: TS2200 library E
- LIBRARY_LRA: LRA library
See the datasheet for the meaning and description of effects in each
library.
"""
return self._read_u8(_DRV2605_REG_LIBRARY) & 0x07
@library.setter
@property
def sequence(self):
"""List-like sequence of waveform effects.
Get or set an effect waveform for slot 0-6 by indexing the sequence
property with the slot number. A slot must be set to either an Effect()
or Pause() class. See the datasheet for a complete table of effect ID
values and the associated waveform / effect.
E.g. 'slot_0_effect = drv.sequence[0]', 'drv.sequence[0] = Effect(88)'
"""
return self._sequence
def set_waveform(self, effect_id, slot=0):
"""Select an effect waveform for the specified slot (default is slot 0,
but up to 7 effects can be combined with slot values 0 to 6). See the
datasheet for a complete table of effect ID values and the associated
waveform / effect.
"""
if not 0 <= effect_id <= 123:
raise ValueError("Effect ID must be a value within 0-123!")
if not 0 <= slot <= 6:
raise ValueError("Slot must be a value within 0-6!")
self._write_u8(_DRV2605_REG_WAVESEQ1 + slot, effect_id)
# pylint: disable=invalid-name
def use_ERM(self):
"""Use an eccentric rotating mass motor (the default)."""
feedback = self._read_u8(_DRV2605_REG_FEEDBACK)
self._write_u8(_DRV2605_REG_FEEDBACK, feedback & 0x7F)
# pylint: disable=invalid-name
def use_LRM(self):
"""Use a linear resonance actuator motor."""
feedback = self._read_u8(_DRV2605_REG_FEEDBACK)
self._write_u8(_DRV2605_REG_FEEDBACK, feedback | 0x80)
class Effect:
"""DRV2605 waveform sequence effect."""
@property
def raw_value(self):
"""Raw effect ID."""
return self._effect_id
@property
# pylint: disable=invalid-name
def id(self):
"""Effect ID."""
return self._effect_id
@id.setter
# pylint: disable=invalid-name
def id(self, effect_id):
"""Set the effect ID."""
if not 0 <= effect_id <= 123:
raise ValueError("Effect ID must be a value within 0-123!")
self._effect_id = effect_id
class Pause:
"""DRV2605 waveform sequence timed delay."""
@property
def raw_value(self):
"""Raw pause duration."""
return self._duration
@property
def duration(self):
"""Pause duration in seconds."""
# Remove wait time flag bit and convert duration to seconds
return (self._duration & 0x7F) / 100.0
@duration.setter
def duration(self, duration):
"""Sets the pause duration in seconds."""
if not 0.0 <= duration <= 1.27:
raise ValueError("Pause duration must be a value within 0.0-1.27!")
# Add wait time flag bit and convert duration to centiseconds
self._duration = 0x80 | round(duration * 100.0)
class _DRV2605_Sequence:
"""Class to enable List-like indexing of the waveform sequence slots."""
def __setitem__(self, slot, effect):
"""Write an Effect or Pause to a slot."""
if not 0 <= slot <= 6:
raise IndexError("Slot must be a value within 0-6!")
if not isinstance(effect, (Effect, Pause)):
raise TypeError("Effect must be either an Effect() or Pause()!")
# pylint: disable=protected-access
self._drv2605._write_u8(_DRV2605_REG_WAVESEQ1 + slot, effect.raw_value)
def __getitem__(self, slot):
"""Read an effect ID from a slot. Returns either a Pause or Effect class."""
if not 0 <= slot <= 6:
raise IndexError("Slot must be a value within 0-6!")
# pylint: disable=protected-access
slot_contents = self._drv2605._read_u8(_DRV2605_REG_WAVESEQ1 + slot)
if slot_contents & 0x80:
return Pause((slot_contents & 0x7F) / 100.0)
return Effect(slot_contents)
def __iter__(self):
"""Returns an iterator over the waveform sequence slots."""
for slot in range(0, 7):
yield self[slot]
def __repr__(self):
"""Return a string representation of all slot's effects."""
return repr(list(self))
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
2177,
8832,
6031,
28635,
329,
1215,
1878,
4872,
20171,
201,
198,
2,
201,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
201,
198,
201,
198,
37811,
201,
198,
63,
324,
1878,
4872,
... | 2.174534 | 3,919 |
from sofi.ui import DescriptionDefinition
| [
6738,
523,
12463,
13,
9019,
1330,
12489,
36621,
198
] | 4.666667 | 9 |
from .button import *
from settings import Colors
| [
6738,
764,
16539,
1330,
1635,
198,
6738,
6460,
1330,
29792,
628,
198
] | 4.333333 | 12 |
import pandas as pd
if __name__ == '__main__':
website = 'https://www.peakbagger.com/'
links = pd.read_csv('raw_data/links.csv')
links['full_link'] = website + links['link']
full_links = links[['Mountain', 'full_link']]
full_links = full_links.rename(columns={'full_link': 'link'})
full_links.to_csv('cleaned_data/full_links.csv', index=False)
| [
11748,
19798,
292,
355,
279,
67,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
3052,
796,
705,
5450,
1378,
2503,
13,
36729,
65,
7928,
13,
785,
14,
6,
628,
220,
220,
220,
6117,
796,
279,
67... | 2.483221 | 149 |
import os
from pathlib import Path
from decouple import config, Csv
import dj_database_url
from functools import partial
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
AUTH_USER_MODEL = 'base.User'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'collectfast',
'django.contrib.staticfiles',
'pypro.base'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pypro.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pypro.wsgi.application'
# Django Debug Toolbar settings
INTERNAL_IPS = config('INTERNAL_IPS', cast=Csv(), default='127.0.0.1')
if DEBUG:
INSTALLED_APPS.append('debug_toolbar')
MIDDLEWARE.insert(0, 'debug_toolbar.middleware.DebugToolbarMiddleware')
default_db_url = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
parse_database = partial(dj_database_url.parse, conn_max_age=600)
DATABASES = {
'default': config('DATABASE_URL', default=default_db_url, cast=parse_database)
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
COLLECTFAST_ENABLED = False
AWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')
if AWS_ACCESS_KEY_ID:
AWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = config('AWS_STORAGE_BUCKET_NAME')
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400', }
AWS_PRELOAD_METADATA = True
AWS_AUTO_CREATE_BUCKET = False
AWS_QUERYSTRING_AUTH = True
AWS_S3_CUSTOM_DOMAIN = None
COLLECTFAST_ENABLED = True
COLLECTFAST_STRATEGY = 'collectfast.strategies.boto3.Boto3Strategy'
AWS_DEFAULT_ACL = 'private'
# Static Assets
STATICFILES_STORAGE = 's3_folder_storage.s3.StaticStorage'
STATIC_S3_PATH = 'static'
STATIC_ROOT = f'/{STATIC_S3_PATH}'
STATIC_URL = f'//s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/{STATIC_S3_PATH}/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
# Upload Media Folder
DEFAULT_FILE_STORAGE = 's3_folder_storage.s3.DefaultStorage'
DEFAULT_S3_PATH = 'media'
MEDIA_ROOT = f'/{DEFAULT_S3_PATH}'
MEDIA_URL = f'//s3.amazonaws.com/{AWS_STORAGE_BUCKET_NAME}/{DEFAULT_S3_PATH}/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
INSTALLED_APPS.append('s3_folder_storage')
INSTALLED_APPS.append('storages')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
875,
43846,
1330,
4566,
11,
327,
21370,
198,
11748,
42625,
62,
48806,
62,
6371,
198,
6738,
1257,
310,
10141,
1330,
13027,
198,
198,
33,
11159,
62,
34720,
796,
10644,
7,
834,
... | 2.251134 | 1,764 |
from __future__ import print_function
import fileinput
import sys
keys_down = []
i = 0
for line in fileinput.input():
i += 1
split_line = line.rstrip().split(' ')
if len(split_line) != 2:
print('Unrecognized format on line ' + str(i), file=sys.stderr)
continue
action = split_line[0]
key = split_line[1]
if action == 'down':
if key in keys_down:
keys_down = [k for k in keys_down if k != key]
keys_down.append(key)
output = ''
for key in keys_down:
output += key + ' '
output = output.rstrip()
print(output)
elif action == 'up':
if key in keys_down:
keys_down = [k for k in keys_down if k != key]
else:
print('Key ' + key + ' went up without going down on line ' + str(i), file=sys.stderr)
else:
print('Unrecognized input on line ' + str(i), file=sys.stderr) | [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
2393,
15414,
198,
11748,
25064,
198,
198,
13083,
62,
2902,
796,
17635,
198,
72,
796,
657,
198,
1640,
1627,
287,
2393,
15414,
13,
15414,
33529,
198,
220,
220,
220,
1312,
15853,
... | 2.192488 | 426 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628,
198
] | 2.842105 | 38 |
#!/usr/bin/env python
import sys, getopt
import glob,os
# sample the first 10**7 reads
help_message = 'usage example: python read_phyler.py -r 1 -i /project/home/original_reads/ -o /project/home/phyler/'
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:],'hr:i:o:',["inputdir="])
except:
print help_message
sys.exit(2)
for opt, arg in opts:
if opt in ('-h','--help'):
print help_message
sys.exit()
elif opt in ('-r',"--filerank"):
fr = int(arg)-1
elif opt in ('-i','--inputdir'):
inputdir = arg
if inputdir[-1] != '/':
inputdir += '/'
elif opt in ('-o','--outputdir'):
outputdir = arg
if outputdir[-1] != '/':
outputdir += '/'
fr = str(fr) + '/'
os.system('mkdir '+outputdir+fr)
FP = glob.glob(os.path.join(inputdir+fr,'*.fastq'))
read_count = 0
for fp in FP:
fileprefix = fp[fp.rfind('/')+1:fp.index('.fastq')]
fasta_file = outputdir + fr + fileprefix + '.fasta'
read_count += get_fasta(fp,fasta_file)
os.system('cat %s*.fasta > %sall.fa' % (outputdir+fr,outputdir+fr))
os.system('rm '+outputdir+fr+'*.fasta')
os.system('touch '+outputdir + fr + 'all.count.' + str(read_count))
os.system('blastall -p blastn -W15 -a1 -e0.01 -m8 -b1 -i %s -d /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.dna > %s' % (outputdir+fr+'all.fa',outputdir+fr+'all.phyler.blastn'))
os.system('rm '+outputdir+fr+'all.fa')
os.system('/seq/msctmp/bcleary/src/MetaPhylerV1.25/metaphylerClassify /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.blastn.classifier /seq/msctmp/bcleary/src/MetaPhylerV1.25/markers/markers.taxonomy %s > %s' % (outputdir+fr+'all.phyler.blastn',outputdir+fr+'all.phyler.blastn.classification'))
os.system('rm '+outputdir+fr+'all.phyler.blastn') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
11,
651,
8738,
198,
11748,
15095,
11,
418,
198,
198,
2,
6291,
262,
717,
838,
1174,
22,
9743,
198,
198,
16794,
62,
20500,
796,
705,
26060,
1672,
25,
21015,
1100,
62... | 2.215463 | 789 |
"""Tests for the Neato init file."""
import pytest
from unittest.mock import patch
from homeassistant.components.neato.const import NEATO_DOMAIN, CONF_VENDOR
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
USERNAME = "myUsername"
PASSWORD = "myPassword"
VENDOR_NEATO = "neato"
VENDOR_VORWERK = "vorwerk"
VENDOR_INVALID = "invalid"
VALID_CONFIG = {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
INVALID_CONFIG = {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_INVALID,
}
@pytest.fixture(name="account")
def mock_controller_login():
"""Mock a successful login."""
with patch("pybotvac.Account", return_value=True):
yield
async def test_no_config_entry(hass):
"""There is nothing in configuration.yaml."""
res = await async_setup_component(hass, NEATO_DOMAIN, {})
assert res is True
async def test_config_entries_in_sync(hass, account):
"""The config entry and configuration.yaml are in sync."""
MockConfigEntry(domain=NEATO_DOMAIN, data=VALID_CONFIG).add_to_hass(hass)
assert hass.config_entries.async_entries(NEATO_DOMAIN)
assert await async_setup_component(hass, NEATO_DOMAIN, {NEATO_DOMAIN: VALID_CONFIG})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
async def test_config_entries_not_in_sync(hass, account):
"""The config entry and configuration.yaml are not in sync."""
MockConfigEntry(domain=NEATO_DOMAIN, data=INVALID_CONFIG).add_to_hass(hass)
assert hass.config_entries.async_entries(NEATO_DOMAIN)
assert await async_setup_component(hass, NEATO_DOMAIN, {NEATO_DOMAIN: VALID_CONFIG})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
| [
37811,
51,
3558,
329,
262,
3169,
5549,
2315,
2393,
526,
15931,
198,
11748,
12972,
9288,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,
13,
710,
5549,
13,
9979,
1330,
10635,
1404,
... | 2.481481 | 918 |
# This module is to estimate the sequence or position biases. It provides
# ways to access and save the bias parameters.
import pysam
import numpy as np
import matplotlib.pyplot as plt
class FastaFile:
"""docstring for FastaFile"""
def get_seq(self, qref, start, stop):
"""get the sequence in a given region, the start is from 1.
The start and stop index may still need double check."""
return self.f.fetch(qref, start-1, stop)
class BiasFile:
"""docstring for BiasFile"""
def __init__(self, bias_file=None):
"""get the bias parameters from the hdf5 file"""
self.set_base_chain()
self.pos5_bias = np.zeros((5, 20))
self.pos3_bias = np.zeros((5, 20))
self.pos5_unif = np.zeros((5, 20))
self.pos3_unif = np.zeros((5, 20))
self.pos5_prob = np.zeros((5, 20))
self.pos3_prob = np.zeros((5, 20))
self.percentile = np.zeros((5, 2))
self.flen_mean, self.flen_std = 0, 0
self.flen_sum1, self.flen_sum2 = 0, 0
self.read_num = 0
self.seq5_bias, self.seq3_bias = {}, {}
self.seq5_unif, self.seq3_unif = {}, {}
self.seq5_prob, self.seq3_prob = {}, {}
for i in range(len(self.chain_len)):
self.seq5_bias[str(i)] = np.zeros(4**self.chain_len[i])
self.seq3_bias[str(i)] = np.zeros(4**self.chain_len[i])
self.seq5_unif[str(i)] = np.zeros(4**self.chain_len[i])
self.seq3_unif[str(i)] = np.zeros(4**self.chain_len[i])
self.seq5_prob[str(i)] = np.zeros(4**self.chain_len[i])
self.seq3_prob[str(i)] = np.zeros(4**self.chain_len[i])
if bias_file is None: return
fid = open(bias_file, "r")
all_lines = fid.readlines()
fid.close()
self.flen_mean = float(all_lines[4].split("\t")[0])
self.flen_std = float(all_lines[4].split("\t")[1])
self.flen_sum1 = float(all_lines[4].split("\t")[2])
self.flen_sum2 = float(all_lines[4].split("\t")[3])
self.read_num = float(all_lines[4].split("\t")[4])
for i in range(5,105):
a, b = (i-5) // 20, (i-5) % 20
if b == 0:
self.percentile[a,:] = all_lines[i].split("|")[0].split("-")
self.pos5_bias[a,b] = all_lines[i].split("\t")[1]
self.pos3_bias[a,b] = all_lines[i].split("\t")[2]
self.pos5_unif[a,b] = all_lines[i].split("\t")[3]
self.pos3_unif[a,b] = all_lines[i].split("\t")[4]
self.pos5_prob[a,b] = max(0, self.pos5_bias[a,b] / self.pos5_unif[a,b])
self.pos3_prob[a,b] = max(0, self.pos3_bias[a,b] / self.pos3_unif[a,b])
# self.pos5_prob[a,b] = self.pos5_bias[a,b] / self.pos5_unif[a,b]
# self.pos3_prob[a,b] = self.pos3_bias[a,b] / self.pos3_unif[a,b]
ii, cnt = all_lines[105].split("|")[0], -1
for i in range(105,849):
if ii == all_lines[i].split("|")[0]:
cnt += 1
else:
ii = all_lines[i].split("|")[0]
cnt = 0
self.seq5_bias[ii][cnt] = all_lines[i].split("\t")[1]
self.seq3_bias[ii][cnt] = all_lines[i].split("\t")[2]
self.seq5_unif[ii][cnt] = all_lines[i].split("\t")[3]
self.seq3_unif[ii][cnt] = all_lines[i].split("\t")[4]
self.seq5_prob[ii][cnt] = max(0, self.seq5_bias[ii][cnt] / self.seq5_unif[ii][cnt])
self.seq3_prob[ii][cnt] = max(0, self.seq3_bias[ii][cnt] / self.seq3_unif[ii][cnt])
self.base_chain[ii][cnt] = all_lines[i].split("\t")[0].split("|")[1]
def set_base_chain(self):
"""set the sub-base chain for the variable-length Markov model (VLMM),
which was proposed by Reberts et al, Genome Biology, 2011:
Figure2 in supp 3. http://genomebiology.com/2011/12/3/r22/"""
b1 = ["A","T","G","C"]
b2, b3 = [], []
for i in b1:
for j in b1:
b2.append(j+i)
for k in b1:
b3.append(k+j+i)
base_comb = [b1, b2, b3]
self.chain_len = [1]*4 + [2]*3 + [3]*10 + [2]*2 + [1]*2
self.base_chain = {}
for i in range(21):
self.base_chain[str(i)] = base_comb[self.chain_len[i]-1]
def get_both_bias(self, seq, loc, ulen, end_num=5):
"""get the bias from the bias parameters"""
prob = (self.get_seq_bias(seq, end_num) *
self.get_pos_bias(loc, ulen, end_num))
return prob
def get_seq_bias(self, seq, end_num):
"""get the sequence bias score"""
if end_num == 5:
parameters = self.seq5_prob
elif end_num == 3:
parameters = self.seq3_prob
else:
print("wrong end_num: %s" %str(end_num))
return None
prob = 1.0
for j in range(len(seq)):
_len = self.chain_len[j]
_bas = seq[j-_len+1 : j+1]
if self.base_chain[str(j)].count(_bas) == 0: continue
_idx = self.base_chain[str(j)].index(_bas)
prob = prob * parameters[str(j)][_idx]
return prob
def get_pos_bias(self, loc, ulen, end_num):
"""get the position bias score, the loc is base pair distance
from the 5'end of the units"""
if end_num == 5:
parameters = self.pos5_prob
elif end_num == 3:
parameters = self.pos3_prob
else:
print("wrong end_num: %s" %str(end_num))
return None
bin1 = (ulen >= self.percentile[:,0]) * (ulen <= self.percentile[:,1])
bin2 = 20.0 * loc / ulen
prob = parameters[bin1, bin2]
return prob
def set_percentile(self, ulen, K=5):
"""set the percentiles by input the lengths of unitsets, i.e., ulen,
and number of percentiles, K."""
perc_gap = np.linspace(0, 100, K+1)
_percent = np.percentile(ulen, list(perc_gap))
self.percentile = np.zeros((K, 2))
for i in range(K):
self.percentile[i, 0] = int(_percent[i])+1
self.percentile[i, 1] = int(_percent[i+1])
if i == 0:
self.percentile[i,0] = 0
elif i==4:
self.percentile[i,1] = float("inf")
def set_both_bias(self, seq, loc, ulen, weight, end_num=5, mode="bias"):
"""get the bias from the bias parameters"""
self.set_seq_bias(seq, weight, end_num, mode)
self.set_pos_bias(loc, ulen, weight, end_num, mode)
def set_seq_bias(self, seq, weight, end_num=5, mode="bias"):
"""get the sequence bias score"""
for j in range(len(seq)):
_len = self.chain_len[j]
_bas = seq[j-_len+1 : j+1]
if self.base_chain[str(j)].count(_bas) == 0: continue
_idx = self.base_chain[str(j)].index(_bas)
if end_num == 5:
if mode == "bias":
self.seq5_bias[str(j)][_idx] += weight
elif mode == "unif":
self.seq5_unif[str(j)][_idx] += weight
else:
if mode == "bias":
self.seq3_bias[str(j)][_idx] += weight
elif mode == "unif":
self.seq3_unif[str(j)][_idx] += weight
def set_pos_bias(self, loc, ulen, weight, end_num=5, mode="bias"):
"""get the position bias score, the loc is base pair distance
from the 5'end of the units"""
bin1 = (ulen >= self.percentile[:,0]) * (ulen <= self.percentile[:,1])
bin2 = int(20.0 * loc / (ulen + 0.0001))
if end_num == 5:
if mode == "bias":
self.pos5_bias[bin1, bin2] += weight
elif mode == "unif":
self.pos5_unif[bin1, bin2] += weight
else:
if mode == "bias":
self.pos3_bias[bin1, bin2] += weight
elif mode == "unif":
self.pos3_unif[bin1, bin2] += weight
def save_file(self, out_file="out_file.bias"):
"""to save the bias file in BIAS FILE FORMAT"""
fid = open(out_file, "w")
fid.writelines("# BIAS PARAMETER FORMAT\n")
fid.writelines("# fragment leng: 5 (mean, std, sum_fl, sum_fl^2, reads), line 5\n")
fid.writelines("# position bias: 5*20*4 (name, b5, b3, u5, u3), line 6-105\n")
fid.writelines("# sequence bias: 744*4 (name, b5, b3, u5, u3), line 106-849\n")
fid.writelines("%.2f\t%.2f\t%.2e\t%.2e\t%.0f\n" %(self.flen_mean, self.flen_std,
self.flen_sum1, self.flen_sum2, self.read_num))
for i in range(self.pos5_bias.shape[0]):
for j in range(self.pos5_bias.shape[1]):
aLine = ("%.0f-%.0f|%d\t%.2e\t%.2e\t%.2e\t%.2e\n"
%(self.percentile[i,0], self.percentile[i,1], j, self.pos5_bias[i,j],
self.pos3_bias[i,j], self.pos5_unif[i,j], self.pos3_unif[i,j]))
fid.writelines(aLine)
for i in sorted(self.base_chain.keys(), key=float):
for j in range(len(self.base_chain[i])):
aLine = ("%s|%s\t%.2e\t%.2e\t%.2e\t%.2e\n"
%(i, self.base_chain[i][j], self.seq5_bias[i][j],
self.seq3_bias[i][j], self.seq5_unif[i][j], self.seq3_unif[i][j]))
fid.writelines(aLine)
fid.close()
def plot_bias(self, mode=None):
"""plot of bias parameters: flen, pos5, pos3, seq5, seq3"""
#fragment distribution
if mode == "flen":
xx = np.arange(0, 1000)
yy = norm_pdf(xx, self.flen_mean, self.flen_std)
plt.fill(xx, yy, 'k')#, linewidth=2.0)
plt.xlabel("fragment length")
plt.ylabel("$p(L)$")
plt.xlim(0, 400)
#position bias
if mode == "pos5" or mode == "pos3":
plt.plot(np.arange(20), np.ones(20), '--k')
for i in range(5):
_label="bin%d: %.0f-%.0f bp" %(i+1, self.percentile[i,0], self.percentile[i,1])
if mode == "pos5":
plt.plot(np.arange(20)+0.5, self.pos5_prob[i,:], linewidth=2.0, label=_label)
else:
plt.plot(np.arange(20)+0.5, self.pos3_prob[i,:], linewidth=2.0, label=_label)
plt.legend(loc="best")
plt.xlabel("fractional transcription position")
plt.ylabel("bias weight")
plt.ylim(0,2)
#sequence bias
if mode == "seq5" or mode == "seq3":
base = ["A", "T", "G", "C"]
_color = ["g", "r", "orange", "b"]
if mode == "seq5":
plt.plot(np.arange(21)-8, np.ones(21), '--k')
plt.plot(np.zeros(2), np.array([0, 2.0]), '--k', linewidth=2.0)
percent = np.zeros((4,21))
for i in range(4):
for j in range(21):
_seq_bias = self.seq5_prob[str(j)]
percent[i,j] = np.sum(_seq_bias[i*4**(self.chain_len[j]-1):
(i+1)*4**(self.chain_len[j]-1)]) / 4**(self.chain_len[j]-1)
plt.plot(np.arange(21)-8, percent[i,:], ":o", c=_color[i], label=base[i])
plt.xlabel("offset from 3' fragment end")
plt.xlim(-8,12)
else:
plt.plot(np.arange(21)-12, np.ones(21), '--k')
plt.plot(np.zeros(2), np.array([0, 2.0]), '--k', linewidth=2.0)
percent = np.zeros((4,21))
for i in range(4):
for k in range(21):
j = 20 - k
_seq_bias = self.seq3_prob[str(j)]
percent[i,j] = np.sum(_seq_bias[i*4**(self.chain_len[j]-1):
(i+1)*4**(self.chain_len[j]-1)]) / 4**(self.chain_len[j]-1)
plt.plot(np.arange(21)-12, percent[i,:], ":o", c=_color[i], label=base[i])
plt.xlabel("offset from 3' fragment end")
plt.xlim(-12,8)
plt.legend(loc="best")
plt.xlabel("offset from %s' fragment end" %mode[3])
plt.ylabel("bias weight")
plt.ylim(0.5,2)
#legend only
if mode == "legend":
base = ["A", "T", "G", "C"]
_color = ["g", "r", "orange", "b"]
plt.axis('off')
ax1 = plt.twinx()
for i in range(len(base)):
ax1.plot([], [], "o", c=_color[i], label=base[i])
ax1.legend(numpoints=1, loc=4)
ax1.axis('off')
ax2 = plt.twinx()
for i in range(5):
_label="bin%d: %.0f-%.0f bp" %(i+1, self.percentile[i,0], self.percentile[i,1])
ax2.plot([], [], linewidth=2.0, label=_label)
ax2.legend(loc=3)
ax2.axis('off')
| [
2,
770,
8265,
318,
284,
8636,
262,
8379,
393,
2292,
29275,
13,
632,
3769,
198,
2,
2842,
284,
1895,
290,
3613,
262,
10690,
10007,
13,
198,
198,
11748,
279,
893,
321,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
... | 1.760098 | 7,328 |
from ..objects.list import ObjectList
from ..objects.method import Method
from .base import ResourceBase
| [
6738,
11485,
48205,
13,
4868,
1330,
9515,
8053,
198,
6738,
11485,
48205,
13,
24396,
1330,
11789,
198,
6738,
764,
8692,
1330,
20857,
14881,
628
] | 4.416667 | 24 |
from click.testing import CliRunner
from reset import reset as command
| [
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
6738,
13259,
1330,
13259,
355,
3141,
628
] | 4.5 | 16 |
from microbit import *
SET, RUN, ALARM = range(3)
state = SET
while True:
if state == SET:
handle_set_state()
elif state == RUN:
handle_run_state()
elif state == ALARM:
handle_alarm_state() | [
6738,
4580,
2545,
1330,
1635,
198,
198,
28480,
11,
32494,
11,
8355,
33456,
796,
2837,
7,
18,
8,
198,
5219,
796,
25823,
198,
220,
198,
4514,
6407,
25,
198,
220,
220,
220,
611,
1181,
6624,
25823,
25,
198,
220,
220,
220,
220,
220,
22... | 2.257426 | 101 |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014, Esteban Pardo Sánchez
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import cv2
import cv2.cv as cv
import numpy
import pygame
import Constants
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
198,
15269,
357,
66,
8,
1946,
11,
10062,
1765,
272,
350,
13109,
311,
21162,
2395,
89,
198,
3237,
2489,
10395,
13,
198,
198,
7738,
396,
3890,
290,
779,
287,
... | 3.323944 | 497 |
import unittest
import io
import os
import collections
import polytaxis
normal_tags = {'a': set(['a'])}
raw_unsized_notags = (
b'polytaxis00u\n'
b'<<<<\n'
b'wug'
)
raw_unsized_normal = (
b'polytaxis00u\n'
b'a=a\n'
b'<<<<\n'
b'wug'
)
raw_sized_minimized_notags = (
b'polytaxis00 0000000000\n'
b'wug'
)
raw_sized_notags = (
b'polytaxis00 0000000512\n' +
b''.join(b'\0' for x in range(511)) +
b'\n'
b'wug'
)
raw_sized_minimized_normal = (
b'polytaxis00 0000000004\n'
b'a=a\n'
b'wug'
)
raw_sized_normal = (
b'polytaxis00 0000000512\n'
b'a=a\n' +
b''.join(b'\0' for x in range(507)) +
b'\n'
b'wug'
)
| [
11748,
555,
715,
395,
198,
11748,
33245,
198,
11748,
28686,
198,
11748,
17268,
198,
198,
11748,
7514,
19290,
271,
198,
198,
11265,
62,
31499,
796,
1391,
6,
64,
10354,
900,
7,
17816,
64,
6,
12962,
92,
198,
198,
1831,
62,
13271,
1143,
... | 1.801047 | 382 |
import os
from blockbased_synapseaware.hole_filling.components import FindPerBlockConnectedComponents
from blockbased_synapseaware.hole_filling.connect import ConnectLabelsAcrossBlocks, CombineAssociatedLabels
from blockbased_synapseaware.hole_filling.mapping import RemoveHoles
from blockbased_synapseaware.utilities.dataIO import ReadMetaData
from blockbased_synapseaware.utilities.constants import *
| [
11748,
28686,
628,
198,
198,
6738,
2512,
3106,
62,
28869,
7512,
9685,
13,
13207,
62,
69,
4509,
13,
5589,
3906,
1330,
9938,
5990,
12235,
13313,
276,
7293,
3906,
198,
6738,
2512,
3106,
62,
28869,
7512,
9685,
13,
13207,
62,
69,
4509,
13,... | 3.709091 | 110 |
import os
from Tkinter import IntVar, Menu, Tk
import Tkinter as tk
from tkMessageBox import askyesnocancel
from multiprocessing import freeze_support
from globalconst import RAVEN_ICON, VERSION, TITLE, BLACK, get_preferences_from_file, write_preferences_to_file
from aboutbox import AboutBox
from setupboard import SetupBoard
from gamemanager import GameManager
from centeredwindow import CenteredWindow
from prefdlg import PreferencesDialog
if __name__ == '__main__':
freeze_support()
start()
| [
11748,
28686,
201,
198,
6738,
309,
74,
3849,
1330,
2558,
19852,
11,
21860,
11,
309,
74,
201,
198,
11748,
309,
74,
3849,
355,
256,
74,
201,
198,
6738,
256,
74,
12837,
14253,
1330,
1265,
8505,
77,
420,
21130,
201,
198,
6738,
18540,
30... | 3.088235 | 170 |
import pygame
import sys
import time
from intro_controller import IntroController
from settings import RESOLUTION
from spacebar_controller import SpacebarController, LONGPRESS, SHORTPRESS
from game_controller import ControllerResignException
from gameplay_controller import GameplayController
from outro_controller import OutroController
if __name__ == "__main__":
window, screen = init_game()
start_time = time.time()
mainloop(screen, start_time)
sys.exit(0)
| [
11748,
12972,
6057,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
18951,
62,
36500,
1330,
37219,
22130,
198,
6738,
6460,
1330,
15731,
3535,
35354,
198,
198,
6738,
2272,
5657,
62,
36500,
1330,
4687,
5657,
22130,
11,
44533,
32761,
11,
6006... | 3.643939 | 132 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from VASP import calc_files_vasp, ctrl_job_vasp, collect_vasp
from QE import calc_files_qe, ctrl_job_qe, collect_qe
from soiap import calc_files_soiap, ctrl_job_soiap, collect_soiap
from ..IO import read_input as rin
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
569,
1921,
47,
1330,
42302,
62,
16624,
62,
85,
5126,
11,
269,
14859,
62,
21858,
62,
85,
5126,
11,
2824,
... | 2.344828 | 116 |
semmed = {
"AnatomicalEntity": {
"affected_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"affects": [
"Disease",
"BiologicalProcess"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"disrupted_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"manifestation_of": [
"BiologicalProcess"
],
"manifested_by": [
"Disease"
],
"negatively_regulated_by": [
"ChemicalSubstance"
],
"negatively_regulates": [
"ChemicalSubstance",
"Gene"
],
"physically_interacts_with": [
"ChemicalSubstance",
"Gene"
],
"positively_regulated_by": [
"ChemicalSubstance",
"Gene"
],
"positively_regulates": [
"ChemicalSubstance",
"Gene"
],
"preceded_by": [
"Disease",
"BiologicalProcess"
],
"precedes": [
"Disease",
"BiologicalProcess"
],
"predisposes": [
"Disease"
],
"prevents": [
"Disease"
],
"related_to": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
]
},
"BiologicalProcess": {
"affected_by": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"affects": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"caused_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"disrupted_by": [
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"disrupts": [
"AnatomicalEntity",
"Cell",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"negatively_regulated_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"negatively_regulates": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"physically_interacts_with": [
"ChemicalSubstance",
"Disease",
"BiologicalProcess",
"Gene"
],
"positively_regulated_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"positively_regulates": [
"ChemicalSubstance",
"BiologicalProcess",
"Gene"
],
"prevents": [
"Disease"
],
"related_to": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"treats": [
"Disease"
]
},
"Cell": {
"affected_by": [
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"affects": [
"Disease",
"Cell"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"coexists_with": [
"Cell",
"ChemicalSubstance",
"Disease",
"PhenotypicFeature",
"Gene"
],
"derives_from": [
"ChemicalSubstance"
],
"disrupted_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"disrupts": [
"BiologicalProcess"
],
"has_part": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"located_in": [
"CellularComponent",
"AnatomicalEntity",
"Cell"
],
"location_of": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"manifestation_of": [
"Disease",
"BiologicalProcess"
],
"manifested_by": [
"Disease"
],
"negatively_regulated_by": [
"ChemicalSubstance"
],
"negatively_regulates": [
"ChemicalSubstance",
"Gene"
],
"part_of": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"physically_interacts_with": [
"ChemicalSubstance",
"Gene"
],
"positively_regulated_by": [
"ChemicalSubstance",
"Gene"
],
"positively_regulates": [
"ChemicalSubstance",
"Gene"
],
"predisposes": [
"Disease"
],
"produced_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"produces": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"related_to": [
"Disease",
"Gene"
],
"subclass_of": [
"Disease",
"BiologicalProcess"
],
"treats": [
"Disease"
]
},
"CellularComponent": {
"affected_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"affects": [
"AnatomicalEntity",
"Cell",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"disrupted_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"disrupts": [
"Disease",
"BiologicalProcess",
"Cell"
],
"located_in": [
"CellularComponent",
"AnatomicalEntity",
"Cell"
],
"negatively_regulated_by": [
"CellularComponent",
"ChemicalSubstance",
"Gene"
],
"negatively_regulates": [
"CellularComponent",
"ChemicalSubstance",
"Gene"
],
"physically_interacts_with": [
"CellularComponent",
"ChemicalSubstance",
"Disease",
"Gene"
],
"positively_regulated_by": [
"CellularComponent",
"ChemicalSubstance",
"Gene"
],
"positively_regulates": [
"CellularComponent",
"ChemicalSubstance",
"Gene"
],
"predisposes": [
"Disease"
],
"related_to": [
"Disease",
"AnatomicalEntity",
"BiologicalProcess"
],
"treats": [
"Disease"
]
},
"ChemicalSubstance": {
"affected_by": [
"ChemicalSubstance",
"Disease",
"BiologicalProcess",
"Gene"
],
"affects": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"coexists_with": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"disrupted_by": [
"ChemicalSubstance",
"Gene"
],
"disrupts": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"negatively_regulated_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"negatively_regulates": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"physically_interacts_with": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"positively_regulated_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"positively_regulates": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"prevents": [
"Disease"
],
"produced_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"produces": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"PhenotypicFeature",
"Gene"
],
"related_to": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"treats": [
"Disease"
]
},
"Disease": {
"affected_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"affects": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"caused_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"coexists_with": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"derives_from": [
"ChemicalSubstance",
"Gene"
],
"derives_info": [
"ChemicalSubstance"
],
"disrupted_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"disrupts": [
"AnatomicalEntity",
"Cell",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"negatively_regulated_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"negatively_regulates": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"physically_interacts_with": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"positively_regulated_by": [
"ChemicalSubstance",
"Disease",
"Gene"
],
"positively_regulates": [
"ChemicalSubstance",
"Disease",
"BiologicalProcess",
"Gene"
],
"prevented_by": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"prevents": [
"Disease"
],
"related_to": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"treated_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"treats": [
"Disease"
]
},
"Gene": {
"affected_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"affects": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"disrupted_by": [
"ChemicalSubstance",
"Disease",
"BiologicalProcess",
"Gene"
],
"disrupts": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"negatively_regulated_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"negatively_regulates": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"physically_interacts_with": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"positively_regulated_by": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"positively_regulates": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene",
"CellularComponent"
],
"prevents": [
"Disease"
],
"related_to": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"treats": [
"Disease"
]
},
"PhenotypicFeature": {
"affected_by": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"affects": [
"CellularComponent",
"Disease",
"BiologicalProcess",
"Cell"
],
"causes": [
"Disease",
"BiologicalProcess"
],
"disrupted_by": [
"ChemicalSubstance",
"Disease",
"Gene"
],
"disrupts": [
"CellularComponent",
"Disease",
"BiologicalProcess",
"Cell"
],
"has_part": [
"ChemicalSubstance",
"Disease",
"CellularComponent",
"Gene"
],
"has_subclass": [
"Disease",
"PhenotypicFeature"
],
"located_in": [
"CellularComponent",
"AnatomicalEntity",
"Cell"
],
"manifestation_of": [
"Disease",
"BiologicalProcess",
"Gene"
],
"manifested_by": [
"Disease"
],
"negatively_regulated_by": [
"ChemicalSubstance",
"Gene"
],
"negatively_regulates": [
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"part_of": [
"AnatomicalEntity",
"Cell",
"ChemicalSubstance",
"Disease",
"CellularComponent"
],
"physically_interacts_with": [
"ChemicalSubstance",
"Disease",
"Gene"
],
"positively_regulated_by": [
"ChemicalSubstance",
"Gene"
],
"positively_regulates": [
"ChemicalSubstance",
"Gene"
],
"preceded_by": [
"Disease",
"BiologicalProcess"
],
"precedes": [
"BiologicalProcess"
],
"predisposes": [
"Disease"
],
"prevents": [
"Disease"
],
"produced_by": [
"Cell",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene",
"CellularComponent"
],
"produces": [
"ChemicalSubstance",
"Gene"
],
"related_to": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"Gene"
],
"subclass_of": [
"AnatomicalEntity",
"ChemicalSubstance",
"BiologicalProcess",
"Disease",
"PhenotypicFeature",
"Gene"
],
"treats": [
"Disease"
]
}
} | [
43616,
1150,
796,
1391,
198,
220,
220,
220,
366,
2025,
265,
22545,
32398,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
43958,
62,
1525,
1298,
685,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
41829,
... | 1.613429 | 13,180 |
# Generated by Django 4.0.3 on 2022-04-12 14:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
604,
13,
15,
13,
18,
319,
33160,
12,
3023,
12,
1065,
1478,
25,
3980,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14... | 3.019231 | 52 |
from __future__ import unicode_literals
import yaml
try:
from yaml import CSafeLoader as SafeLoader
except ImportError:
from yaml import SafeLoader
from django.utils.module_loading import import_string
from .settings import setting_ocr_backend, setting_ocr_backend_arguments
ocr_backend = import_string(
dotted_path=setting_ocr_backend.value
)(
**yaml.load(
stream=setting_ocr_backend_arguments.value or '{}', Loader=SafeLoader
)
)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
331,
43695,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
331,
43695,
1330,
9429,
8635,
17401,
355,
19978,
17401,
198,
16341,
17267,
12331,
25,
198,
220,
220,
... | 2.852761 | 163 |
# -*- coding: utf-8 -*-
"""
Main click group for CLI
"""
import logging
import os
import sys
import click
from click_plugins import with_plugins
# from pkg_resources import iter_entry_points
import scarlett_os
from scarlett_os.common.configure.ruamel_config import ConfigManager
# from scarlett_os.compat import configparser
# def configure_logging(verbosity):
# log_level = max(10, 30 - 10 * verbosity)
# logging.basicConfig(stream=sys.stderr, level=log_level)
# def read_config(cfg):
# parser = configparser.ConfigParser()
# parser.read(cfg)
# rv = {}
# for section in parser.sections():
# for key, value in parser.items(section):
# rv["{0}.{1}".format(section, key)] = value
# return rv
# @with_plugins(ep for ep in list(iter_entry_points("scarlett_os.scarlett_os_commands")))
# @click.group()
# @click.version_option(version=scarlett_os.__version__, message="%(version)s")
# @click.option(
# "--name",
# "-n",
# help="Name ScarlettOS process explicitly.",
# metavar="NAME",
# default="scarlett_system",
# )
# @click.option(
# "--daemon",
# "-d",
# is_flag=True,
# help="Daemon mode, background process.",
# default=False,
# )
# @click.option(
# "--mode",
# "-m",
# type=click.Choice(["dbus_server", "listener", "tasker", "check_all_services"]),
# help="ScarlettOS type",
# default="check_all_services",
# )
# @click.option(
# "--master",
# "-m",
# is_flag=True,
# help="Run ScarlettOS process as a Master",
# default=False,
# )
# @click.option(
# "--slave",
# "-s",
# is_flag=True,
# help="Run ScarlettOS process as a Slave",
# default=False,
# )
# @click.option("--etcd-host", help="Etcd Host for distributed mode.", default=False)
# @click.option(
# "--quiet",
# "-q",
# is_flag=True,
# help="Limit output to errors and warnings.",
# default=False,
# )
# @click.option("--verbose", "-V", is_flag=True, help="Be verbose.", default=False)
# @click.option(
# "--config",
# "-c",
# type=click.Path(exists=True, resolve_path=True),
# help="Config file",
# )
# @click.pass_context
# def main_group(
# ctx, name, daemon, mode, master, slave, etcd_host, quiet, verbose, config
# ):
# """This is the command line interface to ScarlettOS.
# """
# # NOTE: ctx
# # Most public functions are actually methods of a 'context' object which
# # is passed as the first parameter (ctx). The context object stores the
# # precision, cached data, and a few other things. It also defines
# # conversions so that the same high-level code can be used for several
# # different base types (mpf, mpfs in Sage, intervals, Python floats) by
# # switching contexts.
# #
# # The default context is called 'mp'. You can call most functions as
# # mpmath.mp.foo(). The top-level function mpmath.foo() is just an alias
# # for this.
# ctx.obj = {}
# config = config or os.path.join(click.get_app_dir("scarlett_os"), "scarlett_os.ini")
# cfg = read_config(config)
# if cfg:
# ctx.obj["config_file"] = config
# ctx.obj["cfg"] = cfg
# ctx.default_map = cfg
# verbosity = (
# os.environ.get("SCARLETTOS_VERBOSE")
# or ctx.lookup_default("scarlett_os.verbosity")
# or 0
# )
# if verbose or quiet:
# verbosity = verbose - quiet
# verbosity = int(verbosity)
# configure_logging(verbosity)
# ctx.obj["verbosity"] = verbosity
@click.group()
@click.version_option(version=scarlett_os.__version__, message="%(version)s")
@click.option("--debug/--no-debug", default=False)
@click.pass_context
def main_group(ctx, debug):
"""This is the command line interface to ScarlettOS.
"""
# NOTE: ctx
# Most public functions are actually methods of a 'context' object which
# is passed as the first parameter (ctx). The context object stores the
# precision, cached data, and a few other things. It also defines
# conversions so that the same high-level code can be used for several
# different base types (mpf, mpfs in Sage, intervals, Python floats) by
# switching contexts.
#
# The default context is called 'mp'. You can call most functions as
# mpmath.mp.foo(). The top-level function mpmath.foo() is just an alias
# for this.
ctx.obj = {}
ctx.obj["DEBUG"] = debug
click.echo("Debug mode is %s" % ("on" if debug else "off"))
# click.echo('Debug is %s' % (ctx.obj['DEBUG'] and 'on' or 'off'))
@click.command()
@click.pass_context
def config_init(ctx):
"""
Setup default configuration file
"""
cfg = ConfigManager()
cfg.prep_default_config()
click.echo(
"Default configuration created. Please don't forget to update your locations if you don't like the default values!"
)
@click.command()
@click.option(
"--mode",
"-m",
type=click.Choice(["dbus_server", "listener", "tasker", "check_all_services"]),
help="ScarlettOS type",
default="check_all_services",
)
@click.pass_context
def run(ctx):
"""
Run a particular Scarlett Module. Options: [dbus_server|listener|tasker|check_all_services]
"""
pass
@click.command()
@click.pass_context
def dummy(ctx):
"""
Dummy command, doesn't do anything.
"""
click.echo("Dummy command, doesn't do anything.")
main_group.add_command(config_init)
main_group.add_command(dummy)
main_group.add_command(run)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
13383,
3904,
1448,
329,
43749,
198,
37811,
198,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
11748,
3904,
198,
6738,
3904,
62,
37390,
133... | 2.574169 | 2,137 |
from podiomirror.transactions.transaction import Transaction, MODIFY_RELATION
| [
6738,
279,
23130,
296,
343,
1472,
13,
7645,
4658,
13,
7645,
2673,
1330,
45389,
11,
19164,
5064,
56,
62,
16448,
6234,
628
] | 3.590909 | 22 |
# Yilin Wang
# pre_lecture assignment
# BMES T580
import random
import math
print('**********************************')
print(' GUESS_THAT_PRIMER GAME ')
print('**********************************')
print()
DNA = ['A','G','C','T'] # DNA base
# set the initial sequence as a empty string
# the base will be added into this empty string as a target DNA sequence.
the_primer = ''
# use the for loop to generate the random DNA sequence ( the string need to be 5 letters long)
for i in range (0,5):
the_primer +=random.choice(DNA)
#set up the initial values
guess = 'NNNNN'
# asking the users' name to begin the guess game
name = input('What is your name? ')
# calculate the length of the primer (should be 5 in this game).
target_length= len(the_primer)
# print the hint
print('The number of letters for the Target DNA sequence is {}.'.format(target_length))
print ('The DNA base is {}.'. format(DNA))
# set up a while loop to define while the DNA sequence that user guessing is different than the random DNA (the_primer),
#---it is allow the user to take another guess.
while guess != the_primer:
guess_text = input('Guess a primer DNA sequence: ') # asking the user to generate a DNA sequence (5 letters long)
guess = guess_text
# get the length of the guess DNA sequence
guess_length = len(guess)
# using if loop to split the situations (correctly or incorrectly)
# if the user's answer is incorrect, the system will allow another guess
# if the user has teh correct answer, the user win and exit the game
if guess != the_primer:
# set up the initial value of the counter number (the number of correct letters)
count = 0
# get the minimum length of the string
comparing_range = min(target_length,guess_length)
# using for loop to check if there are any correct letter, and count the number of correct letters.
for q in range (comparing_range):
if the_primer[q] == guess[q]:
count = count+1
# print('Your guess of ' + guess + ' was incorrect.' + the number of correct letters is + count)
print('Sorry {}. Your guess {} is incorrect. The number of correct letters is {}. Play again? '.format(name, guess,count))
else:
print('Excellent work {}! you win! The target DNA sequence is {}!'.format(name, guess))
print('Done!!!')
| [
2,
575,
346,
259,
15233,
198,
2,
662,
62,
801,
495,
16237,
198,
2,
29944,
1546,
309,
39322,
198,
198,
11748,
4738,
198,
11748,
10688,
198,
198,
4798,
10786,
17174,
1174,
11537,
198,
4798,
10786,
220,
19348,
7597,
62,
4221,
1404,
62,
... | 3.22541 | 732 |
import logging
import fmcapi
| [
11748,
18931,
198,
11748,
277,
76,
11128,
72,
628
] | 3.333333 | 9 |
import re
number = int(input())
pattern_valid = r"@#+[A-Z][A-Za-z0-9]{4,}[A-Z]@#+"
for _ in range(number):
lines = input()
if re.match(pattern_valid,lines):
digits = re.findall(r"\d", lines)
if digits:
print(f'Product group: {"".join(digits)}')
else:
print('Product group: 00')
else:
print('Invalid barcode')
| [
11748,
302,
198,
198,
17618,
796,
493,
7,
15414,
28955,
628,
198,
33279,
62,
12102,
796,
374,
1,
41573,
10,
58,
32,
12,
57,
7131,
32,
12,
57,
64,
12,
89,
15,
12,
24,
60,
90,
19,
11,
92,
58,
32,
12,
57,
60,
41573,
10,
1,
62... | 2.037037 | 189 |