id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
170844 | import math
import random
from functools import partial
from typing import Callable, Optional, Tuple
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from jax import lax
from jax.random import PRNGKey
from transformers.modeling_flax_utils import (
FlaxPreTrainedModel,
)
from transformers.models.bart.configuration_bart import BartConfig
def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = jnp.roll(input_ids, 1, axis=-1)
shifted_input_ids = jax.ops.index_update(shifted_input_ids, (..., 0), decoder_start_token_id)
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
class FlaxBartTranscoderPreTrainedModel(FlaxPreTrainedModel):
config_class = BartConfig
base_model_prefix: str = "model"
module_class: nn.Module = None
def __init__(
self,
config: BartConfig,
input_shape: Tuple[int] = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
**kwargs
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
# make sure initialization pass will work for FlaxBartForSequenceClassificationModule
input_ids = jax.ops.index_update(input_ids, (..., -1), self.config.eos_token_id)
attention_mask = jnp.ones_like(input_ids)
decoder_input_ids = input_ids
decoder_attention_mask = jnp.ones_like(input_ids)
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
return self.module.init(
rngs,
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
position_ids,
decoder_position_ids,
)["params"]
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (:obj:`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (:obj:`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (:obj:`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
``encoder_outputs`` consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`,
`optional`: :obj:`attentions`). :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length,
hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the
encoder. Used in the cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
)
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
def encode(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example::
>>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
>>> model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors='jax')
>>> encoder_outputs = model.encode(**inputs)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, position_ids, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example::
>>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
>>> model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, max_length=1024, return_tensors='jax')
>>> encoder_outputs = model.encode(**inputs)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
if encoder_attention_mask is None:
batch_size, sequence_length = encoder_hidden_states.shape[:2]
encoder_attention_mask = jnp.ones((batch_size, sequence_length))
batch_size, sequence_length = decoder_input_ids.shape
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxBartAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
def __call__(
self,
input_ids: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
decoder_input_ids: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
position_ids: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
encode_only : bool = False
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# prepare encoder inputs
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
if position_ids is None:
batch_size, sequence_length = input_ids.shape
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
# prepare decoder inputs
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
if decoder_position_ids is None:
batch_size, sequence_length = decoder_input_ids.shape
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
# Handle any PRNG if needed
rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
return self.module.apply(
{"params": params or self.params},
input_ids=jnp.array(input_ids, dtype="i4"),
attention_mask=jnp.array(attention_mask, dtype="i4"),
position_ids=jnp.array(position_ids, dtype="i4"),
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
encode_only=True
) | StarcoderdataPython |
162575 | <reponame>simpeg-research/iris-mt-scratch
"""
revamping of TTF.m --> TTF.py
This uses xarray for the transfer function
This tracks the input_channels, output_channels,
At initialization we will know:
-input channels (iterable of channel objects)
-output_channels (iterable of channel objects)
-the relevant frequency bands
xrd = xr.DataArray(windowed_array, dims=["input_channels", "output_channels", "frequency"],
coords={"input_channels": ["hx", "hy"],
"output_channels": ["hx", "hy"]
"time": time_vector})
"""
import numpy as np
import xarray as xr
class TransferFunction(object):
"""
Class to contain transfer function array.
Supports full covariance, arbitrary number of input / output
channels (but for MT # input channels = Nin is always 2!)
Instantiated with no arguments or 2. When there are 2, these must be:
TTF(NBands,Header).
Example:
Zxx = Z(1, 1, Period)
Zxy = Z(1, 2, Period)
Zyx = Z(2, 1, Period)
Zyy = Z(2, 2, Period)
Parameters:
TF : numpy array
array of transfer functions: TF(Nout, Nin, Nperiods)
T : numpy array
list of periods
Header : transfer_function_header.TransferFunctionHeader() object.
TF header contains local site header, remote site header if
appropriate, and information about estimation approach???
Cov_SS : numpy array
inverse signal power matrix. How do we track the channels
relationship? maybe xarray here as well
Cov_NN : numpy array
noise covariance matrix: see comment at Cov_SS above
num_segments : integer array?
Number of samples used to estimate TF for each band, and for each \
output channel (might be different for different channels)
R2 : numpy array
multiple coherence for each output channel / band
FullCov : boolean
true if full covariance is provided
properties (Dependent)
StdErr % standard errors of TF components, same size and order as TF
NBands
freqs % inverse of period
Nout
Nin
"""
def __init__(self, tf_header, num_bands):
print("TODO: change self.T to self.period")
self.tf_header = tf_header
self.num_bands = num_bands #it would be nice if this was a property
# that depended on some other attr of the processing scheme... surely
# this number is already known-- it comes from the data being processed
self.T = None # replace with periods
self.num_segments = None
#self.periods = None
self.Cov_SS = None
self.Cov_NN = None
self.R2 = None
self.initialized = False
if self.tf_header is not None:
if self.num_bands is not None:
self._initialize_arrays()
def _initialize_arrays(self):
"""
TODO: These may be better cast as xarrays. Review after curves up
and running
TODO: also, I prefer np.full(dim_tuple, np.nan) for init
Returns
-------
"""
if self.tf_header is not None:
self.T = np.zeros(self.num_bands)
self.TF = np.zeros((self.num_channels_out, self.num_channels_in,
self.num_bands), dtype=np.complex128)
self.num_segments = np.zeros((self.num_channels_out,
self.num_bands))
self.Cov_SS = np.zeros((self.num_channels_in,
self.num_channels_in, self.num_bands))
self.Cov_NN = np.zeros((self.num_channels_out,
self.num_channels_out, self.num_bands))
self.R2 = np.zeros((self.num_channels_out, self.num_bands))
self.initialized = True
@property
def periods(self):
return self.T
@property
def minimum_period(self):
return np.min(self.periods)
@property
def maximum_period(self):
return np.max(self.periods)
@property
def num_channels_in(self):
return self.tf_header.num_input_channels
@property
def num_channels_out(self):
return self.tf_header.num_output_channels
def set_tf(self, i_band, regression_estimator, T):
"""
This sets TF elements for one band, using contents of TRegression
object. This version assumes there are estimates for Nout output
channels
TODO: can we get away from the integer i_band index and obtain the
integer from a band_object? Please
TODO: i_band and T are not independent and not both needed here. A
BandAveragingScheme() class will
"""
if self.TF is None:
print('Initialize TTF obect before calling setTF')
raise Exception
n_data = regression_estimator.n_data #use the class
# use TregObj to fill in full impedance, error bars for a
print("TODO: Convert the following commented check into python")
print("although an exception will br raised anyhow actually")
# if any(size(TRegObj.b)~=[obj.Nin obj.Nout])
# error('Regression object not consistent with declared dimensions of TF')
# raise Exception
self.T[i_band] = T;
self.TF[:,:, i_band] = regression_estimator.b.T #check dims are consitent
if regression_estimator.noise_covariance is not None:
self.Cov_NN[:,:, i_band] = regression_estimator.noise_covariance
if regression_estimator.inverse_signal_covariance is not None:
self.Cov_SS[:,:, i_band] = regression_estimator.inverse_signal_covariance
if regression_estimator.R2 is not None:
self.R2[:, ib] = regression_estimator.R2;
self.num_segments[:self.num_channels_out, i_band] = regression_estimator.n_data
return
def standard_error(self):
stderr = np.zeros(self.TF.shape)
for j in range(self.num_channels_out):
for k in range(self.num_channels_in):
stderr[j, k,:] = np.sqrt(self.Cov_NN[j, j,:] * self.Cov_SS[k, k,:]);
return stderr
#<TO BE DEPRECATED/MERGE WITH BandAveragingScheme>
def get_num_bands(self):
return len(self.T)
def get_frequencies(self):
return 1. / self.T
#</TO BE DEPRECATED/MERGE WITH BandAveragingScheme>
def test_ttf():
from iris_mt_scratch.sandbox.transfer_function.transfer_function_header \
import TransferFunctionHeader
tfh = TransferFunctionHeader()
ttf = TTF(tfh, 32)
ttf.set_tf(1,2,3)
def main():
test_ttf()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1625875 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
import os
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
with open('requirements.txt') as reqs_file:
requirements = [i.strip() for i in reqs_file.readlines()]
with open('VERSION') as v_file:
version = v_file.read().strip()
setup_requirements = [
'pytest-runner',
]
with open('requirements_dev.txt') as devreqs_file:
test_requirements = [i.strip() for i in devreqs_file.readlines()]
setup(
name='ansible-vault-rekey',
version=version,
description="Roll keys and re-encrypt secrets in any repo using Ansible Vault",
long_description=readme + '\n\n' + history,
author="<NAME>",
author_email='<EMAIL>',
url='https://github.com/inhumantsar/ansible_vault_rekey',
packages=find_packages(include=['ansible_vault_rekey']),
entry_points={
'console_scripts': [
'ansible-vault-rekey=ansible_vault_rekey.cli:main'
]
},
include_package_data=True,
install_requires=requirements,
license="BSD license",
zip_safe=False,
keywords='ansible-vault-rekey',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| StarcoderdataPython |
180127 | #!/usr/bin/env python
#test client for joint_states_listener
import roslib
roslib.load_manifest('race_joint_states_listener')
import rospy
from race_joint_states_listener.srv import ReturnJointStates
import time
import sys
def call_return_joint_states(joint_names):
rospy.wait_for_service("return_joint_states")
try:
s = rospy.ServiceProxy("return_joint_states", ReturnJointStates)
resp = s(joint_names)
except rospy.ServiceException, e:
print "error when calling return_joint_states: %s"%e
sys.exit(1)
for (ind, joint_name) in enumerate(joint_names):
if(not resp.found[ind]):
print "joint %s not found!"%joint_name
return (resp.position, resp.velocity, resp.effort)
#pretty-print list to string
def pplist(list):
return ' '.join(['%2.3f'%x for x in list])
if __name__ == "__main__":
#print out the positions, velocities, and efforts of the left arm joints
joint_names_left = ["l_shoulder_pan_joint",
"l_shoulder_lift_joint",
"l_upper_arm_roll_joint",
"l_elbow_flex_joint",
"l_forearm_roll_joint",
"l_wrist_flex_joint",
"l_wrist_roll_joint"]
#print out the positions, velocities, and efforts of the right arm joints
joint_names_right = ["r_shoulder_pan_joint",
"r_shoulder_lift_joint",
"r_upper_arm_roll_joint",
"r_elbow_flex_joint",
"r_forearm_roll_joint",
"r_wrist_flex_joint",
"r_wrist_roll_joint"]
while(1):
(position, velocity, effort) = call_return_joint_states(joint_names_left)
print "left position:", pplist(position)
print "left velocity:", pplist(velocity)
print "left effort:", pplist(effort)
(position, velocity, effort) = call_return_joint_states(joint_names_right)
print "right position:", pplist(position)
print "right velocity:", pplist(velocity)
print "right effort:", pplist(effort)
time.sleep(1)
| StarcoderdataPython |
98781 | import os
from PolTools.utils.make_random_filename import generate_random_filename
from PolTools.utils.verify_bed_file import verify_bed_files
def run_coverage(regions_filename, sequencing_filename, output_filename='', flags=None):
"""
Runs strand specific bedtools coverage to get the number of counts in the sequencing file in the regions file.
:param regions_filename: filename of the regions of the genome to quantify
:type regions_filename: str
:param sequencing_filename: filename of the sequencing data collected
:type sequencing_filename: str
:param output_filename: optional name of the output file (will be random if not provided)
:type output_filename: str
:param flags: optional flags (like -d for depth at each base)
:type flags: list
:return: filename of the resultant bedtools coverage output
:rtype: str
"""
if output_filename == '':
output_filename = generate_random_filename()
# Convert the flags to string
if flags != None:
flag_string = ' '.join(flags)
else:
flag_string = ''
verify_bed_files(regions_filename, sequencing_filename)
os.system("bedtools coverage -s -nonamecheck " + flag_string + " -a " + regions_filename + " -b " + sequencing_filename + " > " + output_filename)
return output_filename
| StarcoderdataPython |
3220746 | import gdb
import config
import midas_utils
from execution_context import ExecutionContext
def response(success,
message,
result,
type=None,
variableReference=0,
namedVariables=None,
indexedVariables=None,
memoryReference=None):
return {
"body": {
"result": result,
"type": type,
"variablesReference": variableReference,
"namedVariables": namedVariables,
"indexedVariables": indexedVariables,
"memoryReference": memoryReference
},
"success": success,
"message": message
}
def find_variable(frame, name):
""" Find variable by scanning from current block to global returning first found. """
it = midas_utils.get_closest(frame, name)
if it is not None:
return it
it = midas_utils.get_static(frame, name)
if it is not None:
return it
it = midas_utils.get_global(frame, name)
return it
class WatchVariable(gdb.Command):
"""Not to be confused with watch point."""
def __init__(self, executionContexts):
super(WatchVariable, self).__init__("gdbjs-watch-variable", gdb.COMMAND_USER)
self.name = "watch-variable"
self.executionContexts: ExecutionContext = executionContexts
@config.timeInvocation
def invoke(self, args, from_tty):
try:
[expr, frameId, begin, end, scope] = midas_utils.parse_command_args(args, str, int, int, int, str)
refId = config.variableReferences.get_context(frameId)
if refId is None:
raise gdb.GdbError("No variable reference mapping for frame id {} exists".format(frameId))
ec = self.executionContexts.get(refId.threadId)
if ec is None:
raise gdb.GdbError("Execution context does not exist")
var = ec.free_floating_watchvariable(expr)
if var is not None:
res = var.to_vs()
result = response(success=True,
message=None,
result=res["value"],
type="{}".format(var.get_type()),
variableReference=var.get_variable_reference())
midas_utils.send_response(self.name, result, midas_utils.prepare_command_response)
return
frame = ec.set_known_context(frameId)
components = expr.split(".")
foundFrameId = None
it = find_variable(frame, components[0])
if scope == "first" and it is None:
for sf in ec.stack:
it = find_variable(sf.frame, components[0])
if it is not None:
for comp in components[1:]:
it = it[comp]
if it is None:
break
foundFrameId = sf.frame_id()
if foundFrameId is not None:
# when this stack frame goes out of scope, it removes `expr` free floating variable from ec
if begin != -1 and end != -1:
it = it[begin]
bound = max((end - begin) - 1, 0)
it = it.cast(it.type.array(bound))
expr = "{}[{}:{}]".format(expr, begin, end)
sf = ec.get_stackframe(foundFrameId)
var = sf.add_watched_variable(expr, it)
ec.set_free_floating(expr, var)
sf.set_free_floating(expr)
res = var.to_vs()
result = response(success=True,
message=None,
result=res["value"],
type="{}".format(var.get_type()),
variableReference=var.get_variable_reference())
midas_utils.send_response(self.name, result, midas_utils.prepare_command_response)
return
for comp in components[1:]:
it = it[comp]
if it is None:
break
if it is None:
midas_utils.send_response(
self.name,
response(result="no symbol with that name in context",
success=False,
message="could not evaluate"), midas_utils.prepare_command_response)
else:
if begin != -1 and end != -1:
it = it[begin]
bound = max((end - begin) - 1, 0)
it = it.cast(it.type.array(bound))
expr = "{}[{}:{}]".format(expr, begin, end)
sf = ec.get_stackframe(frameId)
v = sf.add_watched_variable(expr, it)
res = v.to_vs()
result = response(success=True,
message=None,
result=res["value"],
type="{}".format(v.get_type()),
variableReference=v.get_variable_reference())
midas_utils.send_response(self.name, result, midas_utils.prepare_command_response)
except Exception as e:
config.log_exception(config.error_logger(), "{} failed: {}".format(self.name, e), e)
midas_utils.send_response(self.name,
response(success=False, message="Could not be evaluated", result=None),
midas_utils.prepare_command_response)
| StarcoderdataPython |
31306 | <reponame>Vladimir-Antonovich/cloudify-vsphere-plugin
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import Mock, patch
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from vsphere_plugin_common.constants import DELETE_NODE_ACTION
from vsphere_network_plugin import ippool
class IPPoolTest(unittest.TestCase):
def setUp(self):
super(IPPoolTest, self).setUp()
self.mock_ctx = MockCloudifyContext(
'node_name',
properties={},
runtime_properties={}
)
self.mock_ctx._operation = Mock()
current_ctx.set(self.mock_ctx)
@patch('vsphere_plugin_common.VsphereClient.get')
def test_create(self, mock_client_get):
mock_client_get().create_ippool.side_effect = [12345]
rel = Mock()
rel.type_hierarchy = [
"cloudify.relationships.vsphere.ippool_connected_to_network"]
rel.target.node.type_hierarchy = ["cloudify.vsphere.nodes.Network"]
self.mock_ctx.instance._relationships = [rel]
self.mock_ctx.node._properties = {
'connection_config': {
'host': 'host',
'port': '80'
},
"datacenter_name": "datacenter",
"ippool": {
"name": "ippool-check",
"subnet": "192.0.2.0",
"netmask": "255.255.255.0",
"gateway": "192.0.2.254",
"range": "192.0.2.1#12"
}
}
ippool.create()
self.assertEqual(
self.mock_ctx.instance.runtime_properties,
{'ippool': 12345}
)
mock_client_get().create_ippool.assert_called_once_with(
'datacenter', {
'subnet': '192.0.2.0',
'netmask': '255.255.255.0',
'range': '192.0.2.1#12',
'name': 'ippool-check',
'gateway': '192.0.2.254'
},
[rel.target.instance])
@patch('vsphere_plugin_common.VsphereClient.get')
def test_delete(self, mock_client_get):
self.mock_ctx._operation.name = DELETE_NODE_ACTION
mock_client_get().delete_ippool.side_effect = [None]
self.mock_ctx.node.properties['connection_config'] = {
'host': 'host',
'port': '80'
}
self.mock_ctx.node.properties["datacenter_name"] = "datacenter"
# nothing to remove
ippool.delete()
self.assertFalse(self.mock_ctx.instance.runtime_properties)
# something exists
self.mock_ctx.instance.runtime_properties['ippool'] = 12345
ippool.delete()
mock_client_get().delete_ippool.assert_called_once_with(
'datacenter', 12345)
self.assertFalse(self.mock_ctx.instance.runtime_properties)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1673288 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
wakatime.projects.mercurial
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Information about the mercurial project for a given file.
:copyright: (c) 2013 <NAME>.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import sys
from .base import BaseProject
from ..compat import u, open
log = logging.getLogger('WakaTime')
class Mercurial(BaseProject):
def process(self):
self.configDir = self._find_hg_config_dir(self.path)
return self.configDir is not None
def name(self):
if self.configDir:
return u(os.path.basename(os.path.dirname(self.configDir)))
return None # pragma: nocover
def branch(self):
if self.configDir:
branch_file = os.path.join(self.configDir, 'branch')
try:
with open(branch_file, 'r', encoding='utf-8') as fh:
return u(fh.readline().strip().rsplit('/', 1)[-1])
except UnicodeDecodeError: # pragma: nocover
try:
with open(branch_file, 'r', encoding=sys.getfilesystemencoding()) as fh:
return u(fh.readline().strip().rsplit('/', 1)[-1])
except:
log.exception("Exception:")
except IOError: # pragma: nocover
log.exception("Exception:")
return u('default')
def _find_hg_config_dir(self, path):
path = os.path.realpath(path)
if os.path.isfile(path):
path = os.path.split(path)[0]
if os.path.isdir(os.path.join(path, '.hg')):
return os.path.join(path, '.hg')
split_path = os.path.split(path)
if split_path[1] == '':
return None
return self._find_hg_config_dir(split_path[0])
| StarcoderdataPython |
1774973 | <filename>topics/functions/variable-scope.py
name = 'Tonda'
def get_name():
print(f'{name} (inside)')
name = 'Jolana'
print(f'{name} (before call)')
get_name()
print(f'{name} (after call)')
| StarcoderdataPython |
3211260 | #!/usr/bin/env python
import os
import select
from subprocess import Popen
from flask import Flask, request, render_template, redirect, url_for
APP = Flask(__name__)
Popen(['rm mellon_fifo'], shell=True).wait() #remove old FIFO pipe
os.mkfifo("mellon_fifo")
mellon_fifo = None
mellon_fs = None
@APP.route('/', methods=['POST', 'GET'])
def login():
if request.method == 'GET':
return render_template('usr_cred.html')
else:
init_fs_daemon()
return redirect(url_for('auth'))
def init_fs_daemon():
global mellon_fifo
global mellon_fs
args=['./bin/mellon', 'MellonFS']
uname = request.form['username']
if uname:
args.append('--user='+uname)
email = request.form['email']
if email:
args.append('--email='+email)
key = request.form['master_key']
if key:
args.append('--master_key='+key)
mellon_fs = Popen(args)
mellon_fifo = open("mellon_fifo", "w")
@APP.route('/auth', methods=['GET', 'POST'])
def auth():
if mellon_fs.poll():
mellon_fifo.close()
return redirect(url_for('login'))
else:
if request.method == 'POST':
code = request.form['fa_code']
mellon_fifo.write(code[:5])
mellon_fifo.flush()
return render_template('authin.html')
def main():
APP.run()
if __name__=="__main__":
main()
| StarcoderdataPython |
1655132 | <gh_stars>10-100
#!/usr/local/bin/python
"""
Author: <NAME>
Contact: <EMAIL>
Testing:
import dash_client
mpd_file = <MPD_FILE>
dash_client.playback_duration(mpd_file, 'http://192.168.127.12:8005/')
From commandline:
python dash_client.py -m "http://192.168.127.12:8006/media/mpd/x4ukwHdACDw.mpd" -p "all"
python dash_client.py -m "http://127.0.0.1:8000/media/mpd/x4ukwHdACDw.mpd" -p "basic"
"""
from __future__ import division
from datetime import datetime
from argparse import ArgumentParser
from collections import defaultdict
import errno
import httplib
from multiprocessing import Process, Queue
import os
import random
import signal
from string import ascii_letters, digits
import sys
import time
import timeit
import urllib2
import urlparse
import string
import urllib2
import fcntl
import psutil
from subprocess import *
from adaptation import basic_dash, basic_dash2, weighted_dash, netflix_dash
from adaptation.adaptation import WeightedMean
import config_dash
from configure_log_file import configure_log_file, write_json
import dash_buffer
import read_mpd
from oauthlib.uri_validate import segment
from twisted.python.util import println
from cherrypy import quickstart
import subprocess
from symbol import except_clause
''' try:
WindowsError
except NameError:
from shutil import WindowsError
'''
# Constants
DEFAULT_PLAYBACK = 'BASIC'
DOWNLOAD_CHUNK = 1024
# Globals for arg parser with the default values
# Not sure if this is the correct way ....
MPD = None
HOST = None
LIST = False
QUIC = False
CURL = False
PLAYBACK = DEFAULT_PLAYBACK
DOWNLOAD = False
SEGMENT_LIMIT = None
CONNECTION_TYPE_STR = ""
JUMP = False
JUMP_SCENARIO = ""
CMD = ""
JUMP_BUFFER_COUNTER = 0
class DashPlayback:
"""
Audio[bandwidth] : {duration, url_list}
Video[bandwidth] : {duration, url_list}
"""
def __init__(self):
self.min_buffer_time = None
self.playback_duration = None
self.audio = dict()
self.video = dict()
def get_mpd(url):
""" Module to download the MPD from the URL and save it to file"""
try:
connection = urllib2.urlopen(url, timeout=9999)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to download MPD file HTTP Error: %s" % error.code)
return None
except urllib2.URLError:
error_message = "URLError. Unable to reach Server.Check if Server active"
config_dash.LOG.error(error_message)
print error_message
return None
except IOError, httplib.HTTPException:
message = "Unable to , file_identifierdownload MPD file HTTP Error."
config_dash.LOG.error(message)
return None
mpd_data = connection.read()
connection.close()
mpd_file = url.split('/')[-1]
mpd_file_handle = open(mpd_file, 'w')
mpd_file_handle.write(mpd_data)
mpd_file_handle.close()
config_dash.LOG.info("Downloaded the MPD file {}".format(mpd_file))
return mpd_file
def get_bandwidth(data, duration):
""" Module to determine the bandwidth for a segment
download"""
return data * 8 / duration
def get_domain_name(url):
""" Module to obtain the domain name from the URL
From : http://stackoverflow.com/questions/9626535/get-domain-name-from-url
"""
parsed_uri = urlparse.urlparse(url)
domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
return domain
def id_generator(id_size=6):
""" Module to create a random string with uppercase
and digits.
"""
TEMP_STR = "TEMP_"
return TEMP_STR + ''.join(random.choice(ascii_letters + digits) for _ in range(id_size))
def download_segment(segment_url, dash_folder, sb):
# URLLIB
if (not CURL and not QUIC): # URLLIB
""" HTTP Module to download the segment """
try:
# print segment_url
connection = urllib2.urlopen(segment_url, timeout=None)
except urllib2.HTTPError, error:
config_dash.LOG.error("Unable to download DASH Segment {} HTTP Error:{} ".format(segment_url, str(error.code)))
return None
parsed_uri = urlparse.urlparse(segment_url)
segment_path = '{uri.path}'.format(uri=parsed_uri)
while segment_path.startswith('/'):
segment_path = segment_path[1:]
segment_filename = os.path.join(dash_folder, os.path.basename(segment_path))
make_sure_path_exists(os.path.dirname(segment_filename))
segment_file_handle = open(segment_filename, 'wb')
segment_size = 0
while True:
segment_data = connection.read(DOWNLOAD_CHUNK)
segment_size += len(segment_data)
segment_file_handle.write(segment_data)
if len(segment_data) < DOWNLOAD_CHUNK:
break
connection.close()
segment_file_handle.close()
return segment_size, segment_filename
if (CURL or QUIC): # CURL or QUIC client
""" CURL or QUIC client Module to download the segment """
parsed_uri = urlparse.urlparse(segment_url)
segment_path = '{uri.path}'.format(uri=parsed_uri)
while segment_path.startswith('/'):
segment_path = segment_path[1:]
segment_filename = os.path.join(dash_folder, os.path.basename(segment_path))
requested_url = segment_url
if QUIC:
requested_url = string.replace(segment_url, 'https://' + HOST, config_dash.QUIC_FILES_HEADER_XORIGINAL_URL_DOMAIN)
print "Write requested_url to subprocess stdin: ", requested_url
print sb.stdin.write(requested_url + '\n')
while True:
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
if "FATAL" in out or "Failed to connect" in out or "ERROR" in out:
segment_size = "-1"
print "calculated segment size:", int(segment_size)
int_segment_size = int(segment_size)
check_kill_process("quic_client")
break
if "file_size_start:" in out:
start_index = out.find("file_size_start:") + len("file_size_start:")
end_index = out.find(":file_size_end")
segment_size = out[start_index:end_index]
print "calculated segment size:", int(segment_size)
int_segment_size = int(segment_size)
if int_segment_size == -1:
check_kill_process("LibCurlCppConsole")
break
return int_segment_size, segment_filename
def get_media_all(domain, media_info, file_identifier, done_queue):
""" Download the media from the list of URL's in media
"""
bandwidth, media_dict = media_info
media = media_dict[bandwidth]
media_start_time = timeit.default_timer()
for segment in [media.initialization] + media.url_list:
start_time = timeit.default_timer()
segment_url = urlparse.urljoin(domain, segment)
_, segment_file = download_segment(segment_url, file_identifier)
elapsed = timeit.default_timer() - start_time
if segment_file:
done_queue.put((bandwidth, segment_url, elapsed))
media_download_time = timeit.default_timer() - media_start_time
done_queue.put((bandwidth, 'STOP', media_download_time))
return None
def make_sure_path_exists(path):
""" Module to make sure the path exists if not create it
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def print_representations(dp_object):
""" Module to print the representations"""
print "The DASH media has the following video representations/bitrates"
for bandwidth in dp_object.video:
print bandwidth
def start_playback_smart(dp_object, domain, playback_type=None, download=False, video_segment_duration=None, connection_type="", JUMP_SCENARIO=""):
""" Module that downloads the MPD-FIle and download
all the representations of the Module to download
the MPEG-DASH media.
Example: start_playback_smart(dp_object, domain, "SMART", DOWNLOAD, video_segment_duration)
:param dp_object: The DASH-playback object
:param domain: The domain name of the server (The segment URLS are domain + relative_address)
:param playback_type: The type of playback
1. 'BASIC' - The basic adapataion scheme
2. 'SARA' - Segment Aware Rate Adaptation
3. 'NETFLIX' - Buffer based adaptation used by Netflix
:param download: Set to True if the segments are to be stored locally (Boolean). Default False
:param video_segment_duration: Playback duratoin of each segment
:return:
"""
# Initialize the DASH buffer
dash_player = dash_buffer.DashPlayer(dp_object.playback_duration, video_segment_duration, connection_type)
dash_player.start()
# A folder to save the segments in
file_identifier = 'URLLIB_' #id_generator()
config_dash.LOG.info("The segments are stored in %s" % file_identifier)
dp_list = defaultdict(defaultdict)
# Creating a Dictionary of all that has the URLs for each segment and different bitrates
for bitrate in dp_object.video:
# Getting the URL list for each bitrate
dp_object.video[bitrate] = read_mpd.get_url_list(dp_object.video[bitrate], video_segment_duration,
dp_object.playback_duration, bitrate)
if "$Bandwidth$" in dp_object.video[bitrate].initialization:
dp_object.video[bitrate].initialization = dp_object.video[bitrate].initialization.replace(
"$Bandwidth$", str(bitrate))
media_urls = [dp_object.video[bitrate].initialization] + dp_object.video[bitrate].url_list
for segment_count, segment_url in enumerate(media_urls, dp_object.video[bitrate].start):
# segment_duration = dp_object.video[bitrate].segment_duration
dp_list[segment_count][bitrate] = segment_url
# print segment_count,bitrate,segment_url
bitrates = dp_object.video.keys()
bitrates.sort()
average_dwn_time = 0
segment_files = []
# For basic adaptation
previous_segment_times = []
recent_download_sizes = []
weighted_mean_object = None
current_bitrate = bitrates[0]
previous_bitrate = None
total_downloaded = 0
# Delay in terms of the number of segments
delay = 0
segment_duration = 0
segment_size = segment_download_time = None
# Netflix Variables
average_segment_sizes = netflix_rate_map = None
netflix_state = "INITIAL"
sb = None
global JUMP_BUFFER_COUNTER
JUMP_BUFFER_COUNTER=0
# Start playback of all the segments
"""
for segment1 in dp_list.keys():
for bitrate1 in dp_list[segment1]:
print segment1, bitrate1, dp_list[segment1][bitrate1]
"""
if (CURL or QUIC): # CURL or QUIC client
""" CURL or QUIC client Module to download the segment """
if CURL:
CMD = config_dash.CURL_CLIENT_CMD
print CMD
if QUIC:
CMD = config_dash.QUIC_CLIENT_CMD
print CMD
sb = Popen(CMD, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
while True:
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
if "started" in out:
print out
break
max_jump_count = 0
current_jump_index = 0
if JUMP:
JUMP_SCENARIO_ARR = JUMP_SCENARIO.split(',')
max_jump_count = len(JUMP_SCENARIO_ARR)
total_segment_count = len(dp_list)
segment_number = 1
while segment_number <= total_segment_count:
config_dash.LOG.info("*************** segment_number:" + str(segment_number) + "*********************")
config_dash.LOG.info(" {}: Processing the segment {}".format(playback_type.upper(), segment_number))
write_json()
if not previous_bitrate:
previous_bitrate = current_bitrate
if SEGMENT_LIMIT:
if not dash_player.segment_limit:
dash_player.segment_limit = int(SEGMENT_LIMIT)
if segment_number > int(SEGMENT_LIMIT):
config_dash.LOG.info("Segment limit reached")
break
if segment_number == dp_object.video[bitrate].start:
current_bitrate = bitrates[0]
else:
if playback_type.upper() == "BASIC":
current_bitrate, average_dwn_time = basic_dash2.basic_dash2(segment_number, bitrates, average_dwn_time,
recent_download_sizes,
previous_segment_times, current_bitrate)
if dash_player.buffer.qsize() > config_dash.BASIC_THRESHOLD:
delay = dash_player.buffer.qsize() - config_dash.BASIC_THRESHOLD
config_dash.LOG.info("Basic-DASH: Selected {} for the segment {}".format(current_bitrate,
segment_number + 1))
elif playback_type.upper() == "SMART":
if not weighted_mean_object:
weighted_mean_object = WeightedMean(config_dash.SARA_SAMPLE_COUNT)
config_dash.LOG.debug("Initializing the weighted Mean object")
# Checking the segment number is in acceptable range
if segment_number < len(dp_list) - 1 + dp_object.video[bitrate].start:
try:
config_dash.LOG.info("JUMP_BUFFER_COUNTER: %s",str(JUMP_BUFFER_COUNTER))
current_bitrate, delay,JUMP_BUFFER_COUNTER = weighted_dash.weighted_dash(bitrates, dash_player,
weighted_mean_object.weighted_mean_rate,
current_bitrate,
get_segment_sizes(dp_object,
segment_number + 1),JUMP_BUFFER_COUNTER)
except IndexError, e:
config_dash.LOG.error(e)
elif playback_type.upper() == "NETFLIX":
config_dash.LOG.info("Playback is NETFLIX")
# Calculate the average segment sizes for each bitrate
if not average_segment_sizes:
average_segment_sizes = get_average_segment_sizes(dp_object)
if segment_number < len(dp_list) - 1 + dp_object.video[bitrate].start:
try:
if segment_size and segment_download_time:
segment_download_rate = segment_size / segment_download_time
else:
segment_download_rate = 0
config_dash.LOG.info("JUMP_BUFFER_COUNTER: %s",str(JUMP_BUFFER_COUNTER))
current_bitrate, netflix_rate_map, netflix_state,JUMP_BUFFER_COUNTER = netflix_dash.netflix_dash(
bitrates, dash_player, segment_download_rate, current_bitrate, average_segment_sizes,
netflix_rate_map, netflix_state,JUMP_BUFFER_COUNTER)
config_dash.LOG.info("NETFLIX: Next bitrate = {}".format(current_bitrate))
except IndexError, e:
config_dash.LOG.error(e)
else:
config_dash.LOG.critical("Completed segment playback for Netflix")
break
# If the buffer is full wait till it gets empty
if dash_player.buffer.qsize() >= config_dash.NETFLIX_BUFFER_SIZE:
delay = (dash_player.buffer.qsize() - config_dash.NETFLIX_BUFFER_SIZE + 1) * segment_duration
config_dash.LOG.info("NETFLIX: delay = {} seconds".format(delay))
else:
config_dash.LOG.error("Unknown playback type:{}. Continuing with basic playback".format(playback_type))
current_bitrate, average_dwn_time = basic_dash.basic_dash(segment_number, bitrates, average_dwn_time,
segment_download_time, current_bitrate)
segment_path = dp_list[segment_number][current_bitrate]
segment_url = urlparse.urljoin(domain, segment_path)
config_dash.LOG.info("{}: Segment URL = {}".format(playback_type.upper(), segment_url))
if delay:
delay_start = time.time()
config_dash.LOG.info("SLEEPING for {}seconds ".format(delay * segment_duration))
while time.time() - delay_start < (delay * segment_duration):
time.sleep(1)
delay = 0
config_dash.LOG.debug("SLEPT for {}seconds ".format(time.time() - delay_start))
start_time = timeit.default_timer()
try:
while True:
# print "CALLING download_segment"
segment_size, segment_filename = download_segment(segment_url, file_identifier, sb)
if segment_size > -1: # SUCCESS DOWNLOAD
config_dash.LOG.info("{}: Downloaded segment {}".format(playback_type.upper(), segment_url))
break
else: # FAIL DOWNLOAD
config_dash.LOG.error("Unable to download segment %s" % segment_url)
config_dash.LOG.info("TRYING to GET NEW SUBRPOCESS, SLEEPING for 0.5 SECOND")
sb = get_sub_process(CMD)
config_dash.LOG.info("GOT NEW SUBRPOCESS")
except IOError, e:
config_dash.LOG.error("Unable to save segment %s" % e)
return None
segment_download_time = timeit.default_timer() - start_time
previous_segment_times.append(segment_download_time)
recent_download_sizes.append(segment_size)
# Updating the JSON information
segment_name = os.path.split(segment_url)[1]
if "segment_info" not in config_dash.JSON_HANDLE:
config_dash.JSON_HANDLE["segment_info"] = list()
config_dash.JSON_HANDLE["segment_info"].append((segment_name, current_bitrate, segment_size,
segment_download_time))
total_downloaded += segment_size
config_dash.LOG.info("{} : The total downloaded = {}, segment_size = {}, segment_number = {}".format(
playback_type.upper(),
total_downloaded, segment_size, segment_number))
if playback_type.upper() == "SMART" and weighted_mean_object:
weighted_mean_object.update_weighted_mean(segment_size, segment_download_time)
segment_info = {'playback_length': video_segment_duration,
'size': segment_size,
'bitrate': current_bitrate,
'data': segment_filename,
'URI': segment_url,
'segment_number': segment_number}
segment_duration = segment_info['playback_length']
dash_player.write(segment_info)
segment_files.append(segment_filename)
config_dash.LOG.info("Downloaded %s. Size = %s in %s seconds" % (
segment_url, segment_size, str(segment_download_time)))
if previous_bitrate:
if previous_bitrate < current_bitrate:
config_dash.JSON_HANDLE['playback_info']['up_shifts'] += 1
elif previous_bitrate > current_bitrate:
config_dash.JSON_HANDLE['playback_info']['down_shifts'] += 1
previous_bitrate = current_bitrate
if JUMP and current_jump_index < int(max_jump_count) :
current_jump_scenario = JUMP_SCENARIO_ARR[current_jump_index]
current_jump_scenario = current_jump_scenario.split('->')
jump_at_second = int(current_jump_scenario[0])
jump_to_second = int(current_jump_scenario[1])
if dash_player.playback_timer.time() >= float(jump_at_second):
current_jump_index = current_jump_index + 1
segment_number = int(jump_to_second / segment_duration) - 1
JUMP_BUFFER_COUNTER = config_dash.JUMP_BUFFER_COUNTER_CONSTANT
dash_player.jump(jump_at_second, jump_to_second, current_bitrate)
if(jump_to_second > jump_at_second):
dash_player.playback_timer.backwardStartTime(jump_to_second - jump_at_second)
else:
dash_player.playback_timer.forwardStartTime(jump_at_second - jump_to_second)
config_dash.LOG.info("Jumped to segment: %s", segment_number + 1)
segment_number = segment_number + 1
# waiting for the player to finish playing
while dash_player.playback_state not in dash_buffer.EXIT_STATES:
time.sleep(1)
write_json()
if not download:
clean_files(file_identifier)
if (CURL or QUIC):
print "Exiting From Client Library"
print sb.stdin.write("exit" + '\n')
print "Exit Command Send To Client Library"
if QUIC:
try:
print "Killing Process quic_client"
check_kill_process("quic_client")
print "Killed Process quic_client"
except:
None
if CURL:
try:
print "Killing Process LibCurlCppConsole"
check_kill_process("LibCurlCppConsole")
print "Killed Process LibCurlCppConsole"
except:
None
return dash_player.playback_timer.time(), total_downloaded
def get_sub_process(command):
if CURL:
config_dash.LOG.info("get_sub_process for CURL")
sb = Popen(command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
time.sleep(0.5) #wait for initialization of subprocess
config_dash.LOG.info("got_sub_process for CURL")
while True:
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
print "non_block_read result:",out
if "started" in out:
break
return sb
elif QUIC:
sleeped = False
while True:
if not sleeped:
# libcurl's timeout is set to 5 seconds
# Because of this quic_client must also sleep 5 seconds only once when a connection loss is detected.
# Assumption: The maximum segment download time is less than 5 seconds for libcurl
# If it's longer, quic_timeout_seconds and libcurl timeout must be set to higher but same values
quic_timeout_seconds=5;
config_dash.LOG.info("get_sub_process for QUIC SLEEPING for "+str(quic_timeout_seconds)+" secs")
time.sleep(quic_timeout_seconds)
sleeped = True
config_dash.LOG.info("get_sub_process for QUIC")
sb = Popen(command, shell=True, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
time.sleep(0.5) #wait for initialization of subprocess
config_dash.LOG.info("got_sub_process for QUIC")
out = non_block_read(sb.stdout) # will return '' instead of hanging for ever
print "non_block_read result:",out
if "started" in out:
break
check_kill_process("quic_client")
return sb
def get_segment_sizes(dp_object, segment_number):
""" Module to get the segment sizes for the segment_number
:param dp_object:
:param segment_number:
:return:
"""
segment_sizes = dict([(bitrate, dp_object.video[bitrate].segment_sizes[segment_number]) for bitrate in dp_object.video])
config_dash.LOG.debug("The segment sizes of {} are {}".format(segment_number, segment_sizes))
return segment_sizes
def get_average_segment_sizes(dp_object):
"""
Module to get the avearge segment sizes for each bitrate
:param dp_object:
:return: A dictionary of aveage segment sizes for each bitrate
"""
average_segment_sizes = dict()
for bitrate in dp_object.video:
segment_sizes = dp_object.video[bitrate].segment_sizes
segment_sizes = [float(i) for i in segment_sizes]
# average_segment_sizes[bitrate] = sum(segment_sizes) / len(segment_sizes)
try:
average_segment_sizes[bitrate] = sum(segment_sizes) / len(segment_sizes)
except ZeroDivisionError:
average_segment_sizes[bitrate] = 0
config_dash.LOG.info("The avearge segment size for is {}".format(average_segment_sizes.items()))
return average_segment_sizes
def clean_files(folder_path):
"""
:param folder_path: Local Folder to be deleted
"""
if os.path.exists(folder_path):
try:
for video_file in os.listdir(folder_path):
file_path = os.path.join(folder_path, video_file)
if os.path.isfile(file_path):
os.unlink(file_path)
os.rmdir(folder_path)
except (OSError), e:
config_dash.LOG.info("Unable to delete the folder {}. {}".format(folder_path, e))
config_dash.LOG.info("Deleted the folder '{}' and its contents".format(folder_path))
def start_playback_all(dp_object, domain):
""" Module that downloads the MPD-FIle and download all the representations of
the Module to download the MPEG-DASH media.
"""
# audio_done_queue = Queue()
video_done_queue = Queue()
processes = []
file_identifier = id_generator(6)
config_dash.LOG.info("File Segments are in %s" % file_identifier)
for bitrate in dp_object.video:
dp_object.video[bitrate] = read_mpd.get_url_list(bitrate, dp_object.video[bitrate],
dp_object.playback_duration,
dp_object.video[bitrate].segment_duration)
# Same as download audio
process = Process(target=get_media_all, args=(domain, (bitrate, dp_object.video),
file_identifier, video_done_queue))
process.start()
processes.append(process)
for process in processes:
process.join()
count = 0
for queue_values in iter(video_done_queue.get, None):
bitrate, status, elapsed = queue_values
if status == 'STOP':
config_dash.LOG.critical("Completed download of %s in %f " % (bitrate, elapsed))
count += 1
if count == len(dp_object.video):
# If the download of all the videos is done the stop the
config_dash.LOG.critical("Finished download of all video segments")
break
def create_arguments(parser):
""" Adding arguments to the parser """
parser.add_argument('-m', '--MPD',
help="Url to the MPD File")
parser.add_argument('-l', '--LIST', action='store_true',
help="List all the representations")
parser.add_argument('-p', '--PLAYBACK',
default=DEFAULT_PLAYBACK,
help="Playback type (basic, sara, netflix, or all)")
parser.add_argument('-n', '--SEGMENT_LIMIT',
default=SEGMENT_LIMIT,
help="The Segment number limit")
parser.add_argument('-d', '--DOWNLOAD', action='store_true',
default=False,
help="Keep the video files after playback")
parser.add_argument('-quic', '--QUIC', action='store_true',
default=False,
help="Use Quic Downloder")
parser.add_argument('-curl', '--CURL', action='store_true',
default=False,
help="Use Curl Downloder")
parser.add_argument('-host', '--HOST',
help="Host Ip for QUIC")
parser.add_argument('-jump', '--JUMP', action='store_true',
default=False,
help="Jump sceneario enabled")
parser.add_argument('-js', '--JUMP_SCENARIO',
help="Jump Scenario")
def non_block_read(output):
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
time.sleep(0.5)
try:
output_str = output.read()
print "non_block_read_ouptut >>>" + output_str + "<<< non_block_read_ouptut"
return output_str
except:
return ""
def main():
""" Main Program wrapper """
# configure the log file
# Create arguments
sumOfTotalDownloaded = 0.0
sumOfPlaybackTime = 0.0
program_start_time = datetime.now()
for runNo in range(1, 11):
parser = ArgumentParser(description='Process Client parameters')
create_arguments(parser)
args = parser.parse_args()
globals().update(vars(args))
if QUIC:
CONNECTION_TYPE_STR = "QUIC_" + str(runNo) + "_" + PLAYBACK
elif CURL:
CONNECTION_TYPE_STR = "CURL_" + str(runNo) + "_" + PLAYBACK
else:
CONNECTION_TYPE_STR = "URLLIB_" + str(runNo) + "_" + PLAYBACK
configure_log_file(playback_type=PLAYBACK.lower(), connection_type=CONNECTION_TYPE_STR)
config_dash.JSON_HANDLE['playback_type'] = PLAYBACK.lower()
if not MPD:
print "ERROR: Please provide the URL to the MPD file. Try Again.."
return None
config_dash.LOG.info('Downloading MPD file %s' % MPD)
# Retrieve the MPD files for the video
mpd_file = None
while mpd_file == None:
mpd_file = get_mpd(MPD)
if mpd_file != None:
break;
domain = get_domain_name(MPD)
dp_object = DashPlayback()
# Reading the MPD file created
dp_object, video_segment_duration = read_mpd.read_mpd(mpd_file, dp_object)
config_dash.LOG.info("The DASH media has %d video representations" % len(dp_object.video))
if LIST:
# Print the representations and EXIT
print_representations(dp_object)
return None
if "all" in PLAYBACK.lower():
if mpd_file:
config_dash.LOG.critical("Start ALL Parallel PLayback")
playbackTime, totalDownloaded = start_playback_all(dp_object, domain)
elif "basic" in PLAYBACK.lower():
config_dash.LOG.critical("Started Basic-DASH Playback")
playbackTime, totalDownloaded = start_playback_smart(dp_object, domain, "BASIC", DOWNLOAD, video_segment_duration, CONNECTION_TYPE_STR, JUMP_SCENARIO)
elif "sara" in PLAYBACK.lower():
config_dash.LOG.critical("Started SARA-DASH Playback")
playbackTime, totalDownloaded = start_playback_smart(dp_object, domain, "SMART", DOWNLOAD, video_segment_duration, CONNECTION_TYPE_STR, JUMP_SCENARIO)
elif "netflix" in PLAYBACK.lower():
config_dash.LOG.critical("Started Netflix-DASH Playback")
playbackTime, totalDownloaded = start_playback_smart(dp_object, domain, "NETFLIX", DOWNLOAD, video_segment_duration, CONNECTION_TYPE_STR, JUMP_SCENARIO)
else:
config_dash.LOG.error("Unknown Playback parameter {}".format(PLAYBACK))
return None
sumOfTotalDownloaded = sumOfTotalDownloaded + totalDownloaded
sumOfPlaybackTime = sumOfPlaybackTime + playbackTime
print "Run No:", runNo, "TOTAL DOWNLOADED: ", totalDownloaded
print "Run No:", runNo, "PLAYPACK TIME: ", playbackTime
print "Run No:", runNo, "SUM TOTAL DOWNLOADED: ", sumOfTotalDownloaded
print "Run No:", runNo, "SUM PLAYPACK TIME: ", sumOfPlaybackTime
totalDownloaded = 0
playbackTime = 0
program_end_time = datetime.now()
delta = program_end_time - program_start_time
print CONNECTION_TYPE_STR, "PROGRAM STARTED AT: ", program_start_time
print CONNECTION_TYPE_STR, "PROGRAM FINISHED AT: ", program_end_time
print CONNECTION_TYPE_STR, "PROGRAM DURATION: ", delta.total_seconds()
print CONNECTION_TYPE_STR, "FINAL SUM OF TOTAL DOWNLOADED: ", sumOfTotalDownloaded
print CONNECTION_TYPE_STR, "FINAL SUM OF PLAYPACK TIME: ", sumOfPlaybackTime
def kill(proc_pid):
try:
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except:
return
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
try:
os.kill(int(pid), signal.SIGKILL)
except:
return
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1609566 | import unittest
from openvpn_api.models import VPNModelBase
import netaddr # type: ignore
class ModelStub(VPNModelBase):
def parse_raw(cls, raw: str):
return None
class TestModelBase(unittest.TestCase):
def test_parse_string(self):
self.assertIsNone(ModelStub._parse_string(None))
self.assertIsNone(ModelStub._parse_string(""))
self.assertEqual(ModelStub._parse_string("a"), "a")
self.assertEqual(ModelStub._parse_string(" a "), "a")
self.assertEqual(ModelStub._parse_string(1), "1")
self.assertEqual(ModelStub._parse_string(False), "False")
def test_parse_int(self):
self.assertIsNone(ModelStub._parse_int(None))
self.assertEqual(ModelStub._parse_int(0), 0)
self.assertEqual(ModelStub._parse_int(1), 1)
with self.assertRaises(ValueError):
ModelStub._parse_int("a")
with self.assertRaises(ValueError):
ModelStub._parse_int(False)
def test_parse_ipv4(self):
self.assertIsNone(ModelStub._parse_ipv4(None))
self.assertEqual(netaddr.IPAddress("1.2.3.4"), ModelStub._parse_ipv4("1.2.3.4"))
self.assertEqual(netaddr.IPAddress("1.2.3.4"), ModelStub._parse_ipv4(" 1.2.3.4 "))
self.assertEqual(netaddr.IPAddress("1.2.3.4"), ModelStub._parse_ipv4("1.2.3.4\n"))
with self.assertRaises(netaddr.core.AddrFormatError):
ModelStub._parse_ipv4("asd")
| StarcoderdataPython |
3311036 | <filename>riccipy/metrics/kasner_1.py
"""
Name: <NAME>
Coordinates: Cartesian
Symmetry: Axial
"""
from sympy import Rational, diag, symbols
coords = symbols("t x y z", real=True)
variables = ()
functions = ()
t, x, y, z = coords
metric = diag(-1, t ** Rational(4, 3), t ** Rational(4, 3), t ** Rational(-2, 3))
| StarcoderdataPython |
55534 | import theano
import numpy
# CRF implementation based on Lample et al.
# "Neural Architectures for Named Entity Recognition"
floatX=theano.config.floatX
def log_sum(x, axis=None):
x_max_value = x.max(axis=axis)
x_max_tensor = x.max(axis=axis, keepdims=True)
return x_max_value + theano.tensor.log(theano.tensor.exp(x - x_max_tensor).sum(axis=axis))
def forward(observation_weights, transition_weights, return_best_sequence=False):
def recurrence(observation_weights, previous_scores, transition_weights):
previous_scores = previous_scores.dimshuffle(0, 1, 'x')
observation_weights = observation_weights.dimshuffle(0, 'x', 1)
scores = previous_scores + observation_weights + transition_weights.dimshuffle('x', 0, 1)
if return_best_sequence:
best_scores = scores.max(axis=1)
best_states = scores.argmax(axis=1)
return best_scores, best_states
else:
return log_sum(scores, axis=1)
initial = observation_weights[0]
crf_states, _ = theano.scan(
fn=recurrence,
outputs_info=(initial, None) if return_best_sequence else initial,
sequences=[observation_weights[1:],],
non_sequences=transition_weights
)
if return_best_sequence:
sequence, _ = theano.scan(
fn=lambda beta_i, previous: beta_i[theano.tensor.arange(previous.shape[0]), previous],
outputs_info=theano.tensor.cast(theano.tensor.argmax(crf_states[0][-1], axis=1), 'int32'),
sequences=theano.tensor.cast(crf_states[1][::-1], 'int32')
)
sequence = theano.tensor.concatenate([sequence[::-1], [theano.tensor.argmax(crf_states[0][-1], axis=1)]])
return sequence, crf_states[0]
else:
return log_sum(crf_states[-1], axis=1)
def construct(name, input_tensor, n_labels, gold_labels, fn_create_parameter_matrix):
transition_weights = fn_create_parameter_matrix(name + "_crf_transition_weights", (n_labels + 2, n_labels + 2))
small = -1000.0
padding_start = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_start = theano.tensor.set_subtensor(padding_start[:,:,-2], 0.0)
padding_end = theano.tensor.zeros((input_tensor.shape[0], 1, n_labels + 2)) + small
padding_end = theano.tensor.set_subtensor(padding_end[:,:,-1], 0.0)
observation_weights = theano.tensor.concatenate([input_tensor, theano.tensor.zeros((input_tensor.shape[0], input_tensor.shape[1], 2)) + small], axis=2)
observation_weights = theano.tensor.concatenate([padding_start, observation_weights, padding_end], axis=1)
observation_weights = observation_weights.dimshuffle(1,0,2) # reordering the tensor (words, sentences, labels)
# Score from tags
real_paths_scores = input_tensor[theano.tensor.arange(input_tensor.shape[0])[:, numpy.newaxis], theano.tensor.arange(input_tensor.shape[1]), gold_labels].sum(axis=1)
# Score from transition_weights
padding_id_start = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels
padding_id_end = theano.tensor.zeros((gold_labels.shape[0], 1), dtype=numpy.int32) + n_labels + 1
padded_gold_labels = theano.tensor.concatenate([padding_id_start, gold_labels, padding_id_end], axis=1)
real_paths_scores += transition_weights[
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1)],
padded_gold_labels[theano.tensor.arange(gold_labels.shape[0])[:, numpy.newaxis], theano.tensor.arange(gold_labels.shape[1] + 1) + 1]
].sum(axis=1)
all_paths_scores = forward(observation_weights, transition_weights)
best_sequence, scores = forward(observation_weights, transition_weights, return_best_sequence=True)
scores = scores.dimshuffle(1,0,2)[:,:-1,:-2]
best_sequence = best_sequence.dimshuffle(1,0)[:,1:-1]
return all_paths_scores, real_paths_scores, best_sequence, scores
| StarcoderdataPython |
3233753 | #!/usr/bin/env python
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
def setup():
from setuptools import setup
setup(
name='xData',
version='1.0.0',
maintainer='<EMAIL>',
packages=[
'xData',
'xData.Documentation',
'xData.uncertainty',
'xData.uncertainty.physicalQuantity',
'xData.interactivePlot'
],
package_dir={'xData': '.'},
install_requires=[
'numericalFunctions',
'pqu',
'numpy>=1.15'
],
description='',
license=''
)
if __name__ == '__main__':
setup()
| StarcoderdataPython |
3381243 | import torch
import torch.nn as nn
import torch.nn.functional as F
def _reset_parameters(layers):
for layer in layers:
layer.weight.data.uniform_(-3e-3,3e-3)
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc_layers=[256,64]):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc_layers (list): Number of nodes in hidden layers
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
# Define input and output values for the hidden layers
dims = [state_size] + fc_layers + [action_size]
# Create the hidden layers
self.fc_layers = nn.ModuleList(
[nn.Linear(dim_in, dim_out) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
# Initialize the hidden layer weights
_reset_parameters(self.fc_layers)
print('Actor network built:', self.fc_layers)
def forward(self, x):
"""Build an actor (policy) network that maps states -> actions."""
# Pass the input through all the layers apllying ReLU activation, but the last
for layer in self.fc_layers[:-1]:
x = F.relu(layer(x))
# Pass the result through the output layer apllying hyperbolic tangent function
x = torch.tanh(self.fc_layers[-1](x))
# Return the better action for the input state
return x
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fc_layers=[256,64]):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc_layers (list): Number of nodes in hidden layers
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
# Append the output size to the layers's dimensions
dims = fc_layers + [1]
# Create a list of layers
layers_list = []
layers_list.append(nn.Linear(state_size, dims[0]))
# The second layer receives the the first layer output + action
layers_list.append(nn.Linear(dims[0] + action_size, dims[1]))
# Build the next layers, if that is the case
for dim_in, dim_out in zip(dims[1:-1], dims[2:]):
layers_list.append(nn.Linear(dim_in, dim_out))
# Store the layers as a ModuleList
self.fc_layers = nn.ModuleList(layers_list)
# Initialize the hidden layer weights
_reset_parameters(self.fc_layers)
# Add batch normalization to the first hidden layer
self.bn = nn.BatchNorm1d(dims[0])
print('Critic network built:', self.fc_layers)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
# Pass the states into the first layer
x = self.fc_layers[0](state)
x = self.bn(x)
x = F.relu(x)
# Concatenate the first layer output with the action
x = torch.cat((x, action), dim=1)
# Pass the input through all the layers apllying ReLU activation, but the last
for layer in self.fc_layers[1:-1]:
x = F.relu(layer(x))
# Pass the result through the output layer apllying sigmoid activation
x = torch.sigmoid(self.fc_layers[-1](x))
# Return the Q-Value for the input state-action
return x | StarcoderdataPython |
191153 | <filename>Python_PyPal_PM/PyPoll/mainpypoll.py
# Module for reading CSV files
import csv
#input_file
csv_path = ("Resources/election_data.csv")
#output_file
txt_path = ("Output/election_data.txt")
# Creating empty lists to iterate through the rows and get total values
total_votes = []
list_candidates = []
# Reading of CSV file
with open (csv_path, newline='') as csvfile:
# Store the contents of election_data.csv in the variable csvreader and convert to list
csvreader = csv.reader(csvfile, delimiter=',')
csvlist = list(csvreader)[1 :]
for row in csvlist:
total_votes.append(row[0])
list_candidates.append(row[2])
#get a unique list of values from the list_candidates above using set and conver the set to list
unique_setcandidates = set(list_candidates)
unique_listcandidates = list(unique_setcandidates)
votecount=[]
for name in unique_listcandidates:
count = 0
for row in csvlist:
if name==row[2]:
count+=1
votecount.append(count)
percentagevote =[]
for item in votecount:
percentagevote.append((item/len(total_votes) * 100))
Roundedpercentagevote = ["%.3f"%item for item in percentagevote]
#finding the index of the max vote count from the list of votecount to identify the winner
max_index = votecount.index(max(votecount))
winner = unique_listcandidates[max_index]
#print analysis to terminal and output file
with open(txt_path, "w") as output_file:
print("Election Results")
output_file.write("Election Results")
output_file.write("\n")
print("-------------------------")
output_file.write("----------------------")
output_file.write("\n")
print(f'Total Votes: {len(total_votes)}')
output_file.write(f'Total Votes: {len(total_votes)}')
output_file.write("\n")
print("-------------------------")
output_file.write("----------------------")
output_file.write("\n")
#printing the list of candidates and percentage votes and votecount in order of highest to lowest votes
finalcandidate = ''
finalpercentage = 0.000
finalvotecount = 0
for i in range(len(votecount)):
indexofmax = votecount.index(max(votecount))
finalcandidate = unique_listcandidates[indexofmax]
finalpercentage = Roundedpercentagevote[indexofmax]
finalvotecount = votecount[indexofmax]
print("{}: {}% ({})".format(finalcandidate, finalpercentage, finalvotecount))
output_file.write("{}: {}% ({})".format(finalcandidate, finalpercentage, finalvotecount))
output_file.write("\n")
unique_listcandidates.pop(indexofmax)
votecount.pop(indexofmax)
Roundedpercentagevote.pop(indexofmax)
print("-------------------------")
output_file.write("-------------------------")
output_file.write("\n")
print(f'Winner: {winner}')
output_file.write(f'Winner: {winner}')
output_file.write("\n")
print("-------------------------")
output_file.write("-------------------------")
| StarcoderdataPython |
1728494 | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from colorama import Fore
from clin.models.auth import ReadOnlyAuth
from clin.models.shared import Cleanup, Category, Entity, Kind, Audience, Partitioning
@dataclass
class OutputEventType:
category: Category
owning_application: str
audience: Audience
repartitioning: Optional[Partitioning]
cleanup: Cleanup
@staticmethod
def from_spec(spec: dict[str, any]):
return OutputEventType(
category=Category(spec["category"]),
owning_application=spec["owningApplication"],
audience=Audience(spec["audience"]),
repartitioning=Partitioning.from_spec(spec["repartitioning"])
if "repartitioning" in spec
else None,
cleanup=Cleanup.from_spec(spec["cleanup"]),
)
def to_spec(self) -> dict[str, any]:
spec = {
"category": str(self.category),
"owningApplication": self.owning_application,
"audience": str(self.audience),
"cleanup": self.cleanup.to_spec(),
}
if self.repartitioning:
spec["repartitioning"] = self.repartitioning.to_spec()
return spec
@dataclass
class SqlQuery(Entity):
name: str
sql: str
envelope: bool
output_event_type: OutputEventType
auth: ReadOnlyAuth
def __str__(self) -> str:
return f"sql query {Fore.BLUE}{self.name}{Fore.RESET}"
@property
def kind(self) -> Kind:
return Kind.SQL_QUERY
@staticmethod
def from_spec(spec: dict[str, any]) -> SqlQuery:
return SqlQuery(
name=spec["name"],
sql=spec["sql"],
envelope=spec["envelope"],
output_event_type=OutputEventType.from_spec(spec["outputEventType"]),
auth=ReadOnlyAuth.from_spec(spec["auth"]),
)
def to_spec(self) -> dict[str, any]:
return {
"name": self.name,
"sql": self.sql,
"envelope": self.envelope,
"outputEventType": self.output_event_type.to_spec(),
"auth": self.auth.to_spec() if self.auth else {},
}
| StarcoderdataPython |
187526 | <filename>benchmarks/linear_algebra/kernels/mvt/mvt.py
# Copyright 2021 Universidade da Coruña
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>.es>
#
# Contact:
# <NAME> <<EMAIL>>
"""<replace_with_module_description>"""
from benchmarks.polybench import PolyBench
from benchmarks.polybench_classes import ArrayImplementation
from benchmarks.polybench_classes import PolyBenchOptions, PolyBenchSpec
from numpy.core.multiarray import ndarray
import numpy as np
class Mvt(PolyBench):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
implementation = options.POLYBENCH_ARRAY_IMPLEMENTATION
if implementation == ArrayImplementation.LIST:
return _StrategyList.__new__(_StrategyList, options, parameters)
elif implementation == ArrayImplementation.LIST_PLUTO:
return _StrategyListPluto.__new__(_StrategyListPluto, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED:
return _StrategyListFlattened.__new__(_StrategyListFlattened, options, parameters)
elif implementation == ArrayImplementation.NUMPY:
return _StrategyNumPy.__new__(_StrategyNumPy, options, parameters)
elif implementation == ArrayImplementation.LIST_FLATTENED_PLUTO:
return _StrategyListFlattenedPluto.__new__(_StrategyListFlattenedPluto, options, parameters)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
# The parameters hold the necessary information obtained from "polybench.spec" file
params = parameters.DataSets.get(self.DATASET_SIZE)
if not isinstance(params, dict):
raise NotImplementedError(f'Dataset size "{self.DATASET_SIZE.name}" not implemented '
f'for {parameters.Category}/{parameters.Name}.')
# Set up problem size from the given parameters (adapt this part with appropriate parameters)
self.N = params.get('N')
def print_array_custom(self, array: list, name: str):
# Although two arrays will be printed, they share the same format, so there is no need to check which one comes.
for i in range(0, self.N):
if i % 20 == 0:
self.print_message('\n')
self.print_value(array[i])
def run_benchmark(self):
# Create data structures (arrays, auxiliary variables, etc.)
A = self.create_array(2, [self.N, self.N], self.DATA_TYPE(0))
x1 = self.create_array(1, [self.N], self.DATA_TYPE(0))
x2 = self.create_array(1, [self.N], self.DATA_TYPE(0))
y_1 = self.create_array(1, [self.N], self.DATA_TYPE(0))
y_2 = self.create_array(1, [self.N], self.DATA_TYPE(0))
# Initialize data structures
self.initialize_array(x1, x2, y_1, y_2, A)
# Benchmark the kernel
self.time_kernel(x1, x2, y_1, y_2, A)
# Return printable data as a list of tuples ('name', value).
# Each tuple element must have the following format:
# (A: str, B: matrix)
# - A: a representative name for the data (this string will be printed out)
# - B: the actual data structure holding the computed result
#
# The syntax for the return statement would then be:
# - For single data structure results:
# return [('data_name', data)]
# - For multiple data structure results:
# return [('matrix1', m1), ('matrix2', m2), ... ]
return [('x1', x1), ('x2', x2)]
class _StrategyList(Mvt):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyList)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
for i in range(0, self.N):
x1[i] = self.DATA_TYPE(i % self.N) / self.N
x2[i] = self.DATA_TYPE((i + 1) % self.N) / self.N
y_1[i] = self.DATA_TYPE((i + 3) % self.N) / self.N
y_2[i] = self.DATA_TYPE((i + 4) % self.N) / self.N
for j in range(0, self.N):
A[i][j] = self.DATA_TYPE(i * j % self.N) / self.N
def kernel(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# scop begin
for i in range(0, self.N):
for j in range(0, self.N):
x1[i] = x1[i] + A[i][j] * y_1[j]
for i in range(0, self.N):
for j in range(0, self.N):
x2[i] = x2[i] + A[j][i] * y_2[j]
# scop end
class _StrategyListPluto(_StrategyList):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListPluto)
def kernel(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# scop begin
if((self.N-1>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.N-1)+1):
x1[c1] = x1[c1] + A[c1][c2] * y_1[c2]
x2[c1] = x2[c1] + A[c2][c1] * y_2[c2]
# scop end
class _StrategyListFlattened(Mvt):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattened)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
if options.LOAD_ELIMINATION: self.kernel = self.kernel_le
else: self.kernel = self.kernel_regular
def initialize_array(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
for i in range(0, self.N):
x1[i] = self.DATA_TYPE(i % self.N) / self.N
x2[i] = self.DATA_TYPE((i + 1) % self.N) / self.N
y_1[i] = self.DATA_TYPE((i + 3) % self.N) / self.N
y_2[i] = self.DATA_TYPE((i + 4) % self.N) / self.N
for j in range(0, self.N):
A[self.N * i + j] = self.DATA_TYPE(i * j % self.N) / self.N
def kernel_regular(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# scop begin
for i in range(0, self.N):
for j in range(0, self.N):
x1[i] = x1[i] + A[self.N * i + j] * y_1[j]
for i in range(0, self.N):
for j in range(0, self.N):
x2[i] = x2[i] + A[self.N * j + i] * y_2[j]
# scop end
def kernel_le(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# scop begin
for i in range(0, self.N):
tmp = x1[i] # load elimination
for j in range(0, self.N):
tmp = tmp + A[self.N * i + j] * y_1[j] # load elimination
x1[i] = tmp # load elimination
for i in range(0, self.N):
tmp = x2[i] # load elimination
for j in range(0, self.N):
tmp = tmp + A[self.N * j + i] * y_2[j] # load elimination
x2[i] = tmp # load elimination
# scop end
class _StrategyNumPy(Mvt):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyNumPy)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
def initialize_array(self, x1: ndarray, x2: ndarray, y_1: ndarray, y_2: ndarray, A: ndarray):
for i in range(0, self.N):
x1[i] = self.DATA_TYPE(i % self.N) / self.N
x2[i] = self.DATA_TYPE((i + 1) % self.N) / self.N
y_1[i] = self.DATA_TYPE((i + 3) % self.N) / self.N
y_2[i] = self.DATA_TYPE((i + 4) % self.N) / self.N
for j in range(0, self.N):
A[i, j] = self.DATA_TYPE(i * j % self.N) / self.N
def kernel(self, x1: ndarray, x2: ndarray, y_1: ndarray, y_2: ndarray, A: ndarray):
# scop begin
x1[0:self.N] = x1[0:self.N] + np.dot( A[0:self.N,0:self.N], y_1[0:self.N] )
x2[0:self.N] = x2[0:self.N] + np.dot( A[0:self.N,0:self.N].T, y_2[0:self.N] )
# scop end
class _StrategyListFlattenedPluto(_StrategyListFlattened):
def __new__(cls, options: PolyBenchOptions, parameters: PolyBenchSpec):
return object.__new__(_StrategyListFlattenedPluto)
def __init__(self, options: PolyBenchOptions, parameters: PolyBenchSpec):
super().__init__(options, parameters)
self.kernel = getattr( self, "kernel_%s" % (options.POCC) )
def kernel_pluto(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# --pluto
# scop begin
if((self.N-1>= 0)):
for c1 in range ((self.N-1)+1):
for c2 in range ((self.N-1)+1):
x1[c1] = x1[c1] + A[self.N*(c1) + c2] * y_1[c2]
x2[c1] = x2[c1] + A[self.N*(c2) + c1] * y_2[c2]
# scop end
def kernel_vectorizer(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# --pluto --pluto-prevector --vectorizer --pragmatizer
# scop begin
if((self.N-1>= 0)):
for c2 in range ((self.N-1)+1):
for c1 in range ((self.N-1)+1):
x1[c1] = x1[c1] + A[self.N*(c1) + c2] * y_1[c2]
x2[c1] = x2[c1] + A[self.N*(c2) + c1] * y_2[c2]
# scop end
def kernel_maxfuse(self, x1: list, x2: list, y_1: list, y_2: list, A: list):
# --pluto --pluto-fuse maxfuse
# scop begin
if((self.N-1>= 0)):
for c0 in range ((self.N-1)+1):
for c1 in range ((self.N-1)+1):
x1[c0] = x1[c0] + A[(c0)*self.N + c1] * y_1[c1]
x2[c0] = x2[c0] + A[(c1)*self.N + c0] * y_2[c1]
# scop end
| StarcoderdataPython |
3201599 | '''
inventoryanalytics: a Python library for Inventory Analytics
Author: <NAME>
MIT License
Copyright (c) 2018 <NAME>
'''
import unittest
import inventoryanalytics.lotsizing.deterministic.constant.eoq as eoq
import numpy as np
class TestEOQ(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*d*K/h), places=2) # closed-form
self.assertAlmostEqual(self.eoq.compute_eoq(), 252.98, places=2)
def test_cost(self):
Q = self.eoq.compute_eoq()
self.assertAlmostEqual(1020.72, self.eoq.cost(Q), places=2)
K, h, d, v = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.v
self.assertAlmostEqual(self.eoq.cost(Q),
np.sqrt(2*K*h*d)+v*d, places=2) # closed-form
def test_relevant_cost(self):
Q = self.eoq.compute_eoq()
K, h, d = self.eoq.K, self.eoq.h, self.eoq.d
self.assertAlmostEqual(self.eoq.relevant_cost(Q),
np.sqrt(2*K*h*d), places=2) # closed-form
def test_itr(self):
self.assertAlmostEqual(self.eoq.itr(), 18.97, places=2)
def test_sensitivity_to_Q(self):
Q = 30
Qopt = self.eoq.compute_eoq()
d, v = self.eoq.d, self.eoq.v
self.assertAlmostEquals(self.eoq.sensitivity_to_Q(Q), (self.eoq.cost(Q)-d*v)/(self.eoq.cost(Qopt)-d*v), places=2)
def test_reorder_point(self):
L = 1/12
self.assertAlmostEquals(self.eoq.reorder_point(L), 200, places=2)
def test_coverage(self):
self.assertAlmostEqual(self.eoq.coverage(), 1.26/12, places=2)
class TestEOQ_all_units_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_all_units_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 800, places=2)
self.assertAlmostEqual(pb.cost(Q), 978.6, places=2)
class TestEOQ_incremental_discounts(unittest.TestCase):
def setUp(self):
instance = {"K": 3.2, "h": 0.24, "d": 2400, "v": 0.4}
self.eoq = eoq.eoq(**instance)
def tearDown(self):
pass
def test_eoq(self):
instance = {"K": 8, "h": 0.3, "d": 1300, "b": [400,800], "v": [0.75,0.72,0.68]}
pb = eoq.eoq_incremental_discounts(**instance)
Q = pb.compute_eoq()
self.assertAlmostEqual(Q, 304.05, places=2)
self.assertAlmostEqual(pb.cost(Q), 1043.41, places=2)
class TestEOQ_planned_backorders(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.eoq = eoq.eoq_planned_backorders(**instance)
def tearDown(self):
pass
def test_eoq(self):
K, h, d, p = self.eoq.K, self.eoq.h, self.eoq.d, self.eoq.p
self.assertAlmostEqual(self.eoq.compute_eoq(),
np.sqrt(2*K*d*(h+p)/(h*p)),
places=2) # closed-form
class TestEPQ(unittest.TestCase):
def setUp(self):
instance = {"K": 8, "h": 0.3*0.75, "d": 1300, "v": 75, "p": 5}
self.epq = eoq.epq(**instance)
def tearDown(self):
pass
def test_epq(self):
K, h, d, p = self.epq.K, self.epq.h, self.epq.d, self.epq.p
rho = p/d
self.assertAlmostEqual(self.epq.compute_epq(),
np.sqrt(2*K*d/(h*(1-rho))),
places=2) # closed-form | StarcoderdataPython |
3269953 | <reponame>taliamax/uosu-petition<gh_stars>1-10
from server.controllers.firestore import FirestoreDriver
import json
def main():
db = FirestoreDriver()
result = {}
client = db.database_client
documents = client.stream()
for snapshot in documents:
result[snapshot.id] = snapshot.to_dict()
with open('db_backup.json', 'w') as f:
json.dump(result, f)
if __name__ == "__main__":
main()
| StarcoderdataPython |
9101 | <reponame>Cyberdeep/archerysec
# _
# /\ | |
# / \ _ __ ___| |__ ___ _ __ _ _
# / /\ \ | '__/ __| '_ \ / _ \ '__| | | |
# / ____ \| | | (__| | | | __/ | | |_| |
# /_/ \_\_| \___|_| |_|\___|_| \__, |
# __/ |
# |___/
# Copyright (C) 2017-2018 ArcherySec
# This file is part of ArcherySec Project.
from django.conf.urls import url
from tools import views
app_name = 'tools'
urlpatterns = [
url(r'^sslscan/$',
views.sslscan,
name='sslscan'),
url(r'^sslscan_result/$',
views.sslscan_result,
name='sslscan_result'),
]
| StarcoderdataPython |
1737422 | <reponame>danmcelroy/VoSeq
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.db import models
class Dataset(models.Model):
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
created = models.DateTimeField(auto_now_add=True)
completed = models.DateTimeField(null=True)
content = models.TextField(null=True)
task_id = models.TextField(null=True)
errors = JSONField(blank=True, null=True)
warnings = JSONField(blank=True, null=True)
# genbank datasets require nucleotidae and aminoacid datasets
sister_dataset_id = models.IntegerField(null=True, blank=True)
# eg. Phylip datasets require an extra part for the gene definitions
charset_block = models.TextField(null=True)
| StarcoderdataPython |
3316469 | <reponame>nro2/godirect-examples<filename>python/gdx_getting_started_9.py
import sys
import time
from gdx import gdx #the gdx function calls are from a gdx.py file inside the gdx folder.
gdx = gdx.gdx()
gdx.open_usb()
info = gdx.sensor_info()
chan = info[0][0]
gdx.select_sensors() #Auto-selects lowest channel available in connected device
gdx.new_start() #Start function that begins collection thread gdx.collectLoop()
count = int(0)
num = int(100) #Number of measurements to take
sensNum = len(gdx.getSensors()) #Get total number of sensors to test dictionary against
begin = time.time()
#time.sleep(0)
while count < num:
#print("inside while")
time.sleep(.05)
measurements = gdx.retValues() #returns a list of measurements from the sensors selected
if measurements != None:
for val in measurements:
if count < num: #Prevents printing out too many measurements, ie packet of 3 printing 100 measurements ends on 102
print(count+1, "", val)
count += 1
else:
#print("pausing again")
time.sleep(.02)
end = time.time()
final = end - begin
print("")
print(final, "is total runtime")
""" gdx.collectRunning = False
gdx.collectThread.join """
print("after collect")
gdx.stop()
print("after stop")
gdx.close()
print("after close") | StarcoderdataPython |
139028 | pkgname = "evolution-data-server"
pkgver = "3.44.0"
pkgrel = 0
build_style = "cmake"
# TODO: libgdata
configure_args = [
"-DENABLE_GOOGLE=OFF", "-DWITH_LIBDB=OFF",
"-DSYSCONF_INSTALL_DIR=/etc", "-DENABLE_INTROSPECTION=ON",
"-DENABLE_VALA_BINDINGS=ON",
]
hostmakedepends = [
"cmake", "ninja", "pkgconf", "flex", "glib-devel", "gperf",
"gobject-introspection", "gettext-tiny", "vala", "perl",
]
makedepends = [
"libglib-devel", "libcanberra-devel", "libical-devel", "heimdal-devel",
"webkitgtk-devel", "libsecret-devel", "gnome-online-accounts-devel",
"gcr-devel", "sqlite-devel", "libgweather-devel", "libsoup-devel",
"json-glib-devel", "nss-devel", "nspr-devel", "vala-devel",
"openldap-devel",
]
checkdepends = ["dbus"]
pkgdesc = "Centralized access to appointments and contacts"
maintainer = "q66 <<EMAIL>>"
license = "LGPL-2.0-or-later"
url = "https://gitlab.gnome.org/GNOME/evolution-data-server"
source = f"$(GNOME_SITE)/{pkgname}/{pkgver[:-2]}/{pkgname}-{pkgver}.tar.xz"
sha256 = "0d8881b5c51e1b91761b1945db264a46aabf54a73eea1ca8f448b207815d582e"
# internally passes some stuff that only goes to linker
tool_flags = {"CFLAGS": ["-Wno-unused-command-line-argument"]}
options = ["!cross"]
def post_install(self):
self.rm(self.destdir / "usr/lib/systemd", recursive = True)
@subpackage("evolution-data-server-devel")
def _devel(self):
return self.default_devel()
| StarcoderdataPython |
1666479 | <reponame>VlachosGroup/PythonGroupAdditivity
import os
from warnings import warn
from collections import Mapping
from .. import yaml_io
import numpy as np
from .. Error import GroupMissingDataError
from . Group import Group, Descriptor
from . Scheme import GroupAdditivityScheme
from . DataDir import get_data_dir
class GroupLibrary(Mapping):
"""Represent library of contributing properties organized by group.
The set of properties that may be represented in a :class:`GroupLibrary` is
extensible. Contributing properties are represented by *property sets*
organized by group. See the manual for a list of available property sets.
.. note::
Because the *property set* system is extensible, the module within
which a particular *property set* is defined and registered must be
imported before loading a group library that contains data for that
type of *property set*.
To estimate properties, call :meth:`GroupLibrary.estimate()` with the
*property set* `name` and set of groups contained in the chemical
structure of interest. The properties estimate will be returned as an
object whose type depends on the particular *property set*.
To determine which groups are present in a particular chemical structure,
use :meth:`GroupLibrary.match_groups()`.
Data in multiple group libraries can be combined so long as the groups
they contain are defined within compatible schemes. See
:meth:`GroupLibrary.update()`.
"""
_property_set_estimator_types = {}
_property_set_group_yaml_types = {}
@classmethod
def register_property_set_type(cls, name, group_yaml_type, estimator_type):
"""(class method) Register new property set type.
Parameters
----------
name : str
Name of new property set type.
group_yaml_type : str
Name of property set type in the YAML type namespace.
estimator_type : class
The provided class is instantiated when an estimate is to be made
for a particular set of groups. The constructor should accept the
following parameters:
library : :class:`GroupLibrary`
library which is to be used to estimate these properties.
groups : mapping
Map from :class:`Group` to int or float specifying counts
of each group in the chemical structure.
"""
if name in cls._property_set_group_yaml_types:
raise KeyError('Property set %r already registered.' % name)
cls._property_set_group_yaml_types[name] = group_yaml_type
cls._property_set_estimator_types[name] = estimator_type
def __init__(self, scheme, contents={}, uq_contents={}, path=None):
"""Initialize library of contributing properties organized by group.
Parameters
----------
scheme : :class:`GroupScheme`
Specify group-additivity scheme to use.
contents : mapping or list
Define initial contents of the library either as mapping or list of
(`key`, `value`) pairs. See the last paragraph of the class
documentation for information on the format.
Other Parameters
----------------
path : str
File-system path library was loaded from or should be saved to by
default.
"""
self.scheme = scheme
self.path = path
if isinstance(contents, Mapping):
contents = list(contents.items())
self.contents = dict((group, property_sets)
for (group, property_sets) in contents)
self.uq_contents = uq_contents
def GetDescriptors(self, mol):
"""Determine groups appearing in chemical structure `chem`.
Parameters
----------
mol : :class:`rdkit.mol`
Specify chemical structure to match groups for.
manual_descriptors : mapping, optional
Specify value(s)/degree(s) of influence of additional descriptors
to include.
Returns
-------
groups : mapping
Map from :class:`Group` to int or float identifying groups and
their number of occurence in the structure.
"""
self.name = mol
return self.scheme.GetDescriptors(mol)
def Estimate(self, groups, property_set_name):
"""Estimate set of properties for chemical.
Parameters
----------
groups : mapping (dictionary)
Map from :class:`Group` to int or float specifying counts of each
group in the chemical structure.
property_set_name : str
Name of property set to estimate.
Returns
-------
estimated_properties : (varies)
The estimated properties, an object whose type depends on the
particular property set.
"""
if property_set_name not in self._property_set_estimator_types:
raise KeyError('Invalid property_set name: %r' % property_set_name)
# Verify groups present.
missing_groups = [group for group in groups
if property_set_name not in self[group]]
if missing_groups:
raise GroupMissingDataError(missing_groups, property_set_name)
estimator_type = self._property_set_estimator_types[property_set_name]
return estimator_type(self, groups)
def __contains__(self, group):
"""Test if this library contains contributing properties for `group`.
Parameters
----------
group : :class:`Group`
Group whose membership is being tested.
Returns
-------
result : bool
True if this library has properties for `group`.
"""
return group in self.contents
def __iter__(self):
"""Return iterator over all groups with property data in this library.
"""
return iter(self.contents)
def __len__(self):
"""Return number of groups with properties in this library."""
return len(self.contents)
def __getitem__(self, group):
"""Return contributing properties sets for `group`.
If no properties exist for `group`, then return ``{}`` instead of
raising an exception.
Parameters
----------
group : :class:`Group`
Identify group whose property sets are to be retrieved.
Returns
-------
property_sets : dict
Sets of contributing properties for `group`.
"""
return self.contents.get(group, {})
@classmethod
def Load(cls, path):
"""(class method) Load group-additivity library from file-system
`path` or builtin.
Parameters
----------
path : str
Specify either the path to a file containing the data or a symbolic
name of a builtin library to load (*e.g.* ``gas_benson`` to load
gas phase Benson groups.)
Returns
-------
lib : :class:`GroupLibrary`
Group library containing the loaded data.
"""
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Load the scheme.yaml from the selected data directory:
scheme = GroupAdditivityScheme.Load(os.path.join(base_path,
'scheme.yaml'))
# Use that scheme to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _Load(cls, path, scheme):
if os.sep not in path and '.' not in path and not os.path.exists(path):
# [JTF] where's our data directory?
base_path = os.path.join(get_data_dir(), path)
# We want to load the library.yaml in that directory:
path = os.path.join(base_path, 'library.yaml')
else:
# The base path is the directory containing whatever file/directory
# is referenced by path:
base_path = os.path.dirname(path)
# Use the scheme passed to us to load the rest of the library:
return cls._do_load(path, base_path, scheme)
@classmethod
def _do_load(cls, path, base_path, scheme):
# Read data from file.
context = {'base_path': base_path}
with open(path) as f:
lib_data = yaml_io.load(
yaml_io.parse(f.read()), context, loader=cls._yaml_loader)
context['units'] = lib_data.units
group_properties = lib_data.groups
other_descriptor_properties = lib_data.other_descriptors
UQ = lib_data.UQ
if cls._property_set_group_yaml_types:
# Prepare property_sets loader.
property_sets_loader = yaml_io.make_object_loader(yaml_io.parse(
'\n'.join(('%r:\n type: %r\n optional: true'
% (str(name),
str(cls._property_set_group_yaml_types[name])))
for name in cls._property_set_group_yaml_types)))
# Read all properties.
lib_contents = {}
for name in group_properties:
group = Group.parse(scheme, name)
if group in lib_contents:
raise KeyError('Multiple definitions of group %s' % group)
property_sets = yaml_io.load(
group_properties[name], context,
loader=property_sets_loader)
lib_contents[group] = property_sets
for name in other_descriptor_properties:
descriptor = Descriptor(scheme, name)
if descriptor in lib_contents:
raise KeyError('Multiple definitions of descriptor %s' %
descriptor)
property_sets = yaml_io.load(
other_descriptor_properties[name], context,
loader=property_sets_loader)
lib_contents[descriptor] = property_sets
# Read UQ data
uq_contents = {}
if UQ:
uq_contents['RMSE'] = yaml_io.load(
UQ['RMSE'], context,
loader=property_sets_loader)
uq_contents['descriptors'] = UQ['InvCovMat']['groups']
uq_contents['mat'] = np.array(UQ['InvCovMat']['mat'])
uq_contents['dof'] = UQ['DOF']
else:
# No property sets defined.
warn('GroupLibrary.load(): No property sets defined.')
lib_contents = {}
uq_contents = {}
new_lib = cls(scheme, lib_contents, uq_contents, path=path)
# Update with included content.
for include_path in lib_data.include:
new_lib.Update(cls._Load(os.path.join(base_path,
include_path), scheme))
return new_lib
def Update(self, lib, overwrite=False):
"""Add complete contents of `lib` into this library.
Parameters
----------
lib : :class:`GroupLibrary`
Library to import from.
overwrite : bool
If True, then existing data may be overwritten by data from `lib`.
"""
for (group, other_property_sets) in list(lib.items()):
if group not in self.contents:
self.contents[group] = {}
property_sets = self.contents[group]
for name in other_property_sets:
if name not in property_sets:
property_sets[name] = other_property_sets[name].copy()
else:
property_sets[name].update(
other_property_sets[name], overwrite)
# UQ stuff can only be loaded once
if self.uq_contents and lib.uq_contents:
raise ValueError('More than one uncertainty quantification',
'information provided')
if not self.uq_contents:
self.uq_contents = lib.uq_contents
_yaml_loader = yaml_io.make_object_loader(yaml_io.parse("""
units:
type: mapping
default: {}
include:
type: list
item_type: string
default: []
groups:
type: mapping
default: {}
other_descriptors:
type: mapping
default: {}
UQ:
type: mapping
default: {}
"""))
| StarcoderdataPython |
182854 | #!/usr/bin/env python
"""
train_SVM.py
VARPA, University of Coruna
<NAME>, <NAME>.
26 Oct 2017
"""
from sklearn import metrics
import numpy as np
class performance_measures:
def __init__(self, n):
self.n_classes = n
self.confusion_matrix = np.empty([])
self.Recall = np.empty(n)
self.Precision = np.empty(n)
self.Specificity = np.empty(n)
self.Acc = np.empty(n)
self.F_measure = np.empty(n)
self.gmean_se = 0.0
self.gmean_p = 0.0
self.Overall_Acc = 0.0
self.kappa = 0.0
self.Ij = 0.0
self.Ijk = 0.0
# Compute Cohen' kappa from a confussion matrix
# Kappa value:
# < 0.20 Poor
# 0.21-0.40 Fair
# 0.41-0.60 Moderate
# 0.61-0.80 Good
# 0.81-1.00 Very good
def compute_cohen_kappa(confusion_matrix):
prob_expectedA = np.empty(len(confusion_matrix))
prob_expectedB = np.empty(len(confusion_matrix))
prob_observed = 0
for n in range(0, len(confusion_matrix)):
prob_expectedA[n] = sum(confusion_matrix[n,:]) / sum(sum(confusion_matrix))
prob_expectedB[n] = sum(confusion_matrix[:,n]) / sum(sum(confusion_matrix))
prob_observed = prob_observed + confusion_matrix[n][n]
prob_expected = np.dot(prob_expectedA, prob_expectedB)
prob_observed = prob_observed / sum(sum(confusion_matrix))
kappa = (prob_observed - prob_expected) / (1 - prob_expected)
return kappa, prob_observed, prob_expected
# Compute the performance measures following the AAMI recommendations.
# Using sensivity (recall), specificity (precision) and accuracy
# for each class: (N, SVEB, VEB, F)
def compute_AAMI_performance_measures(predictions, gt_labels):
n_classes = 4 #5
pf_ms = performance_measures(n_classes)
# TODO If conf_mat no llega a clases 4 por gt_labels o predictions...
# hacer algo para que no falle el codigo...
# NOTE: added labels=[0,1,2,3])...
# Confussion matrix
conf_mat = metrics.confusion_matrix(gt_labels, predictions, labels=[0,1,2,3])
conf_mat = conf_mat.astype(float)
pf_ms.confusion_matrix = conf_mat
# Overall Acc
pf_ms.Overall_Acc = metrics.accuracy_score(gt_labels, predictions)
# AAMI: Sens, Spec, Acc
# N: 0, S: 1, V: 2, F: 3 # (Q: 4) not used
for i in range(0, n_classes):
TP = conf_mat[i,i]
FP = sum(conf_mat[:,i]) - conf_mat[i,i]
TN = sum(sum(conf_mat)) - sum(conf_mat[i,:]) - sum(conf_mat[:,i]) + conf_mat[i,i]
FN = sum(conf_mat[i,:]) - conf_mat[i,i]
if i == 2: # V
# Exceptions for AAMI recomendations:
# 1 do not reward or penalize a classifier for the classification of (F) as (V)
FP = FP - conf_mat[i][3]
pf_ms.Recall[i] = TP / (TP + FN)
pf_ms.Precision[i] = TP / (TP + FP)
pf_ms.Specificity[i] = TN / (TN + FP); # 1-FPR
pf_ms.Acc[i] = (TP + TN) / (TP + TN + FP + FN)
if TP == 0:
pf_ms.F_measure[i] = 0.0
else:
pf_ms.F_measure[i] = 2 * (pf_ms.Precision[i] * pf_ms.Recall[i] )/ (pf_ms.Precision[i] + pf_ms.Recall[i])
# Compute Cohen's Kappa
pf_ms.kappa, prob_obsv, prob_expect = compute_cohen_kappa(conf_mat)
# Compute Index-j recall_S + recall_V + precision_S + precision_V
pf_ms.Ij = pf_ms.Recall[1] + pf_ms.Recall[2] + pf_ms.Precision[1] + pf_ms.Precision[2]
# Compute Index-jk
w1 = 0.5
w2 = 0.125
pf_ms.Ijk = w1 * pf_ms.kappa + w2 * pf_ms.Ij
return pf_ms
# Export to filename.txt file the performance measure score
def write_AAMI_results(performance_measures, filename):
f = open(filename, "w")
f.write("Ijk: " + str(format(performance_measures.Ijk, '.4f')) + "\n")
f.write("Ij: " + str(format(performance_measures.Ij, '.4f'))+ "\n")
f.write("Cohen's Kappa: " + str(format(performance_measures.kappa, '.4f'))+ "\n\n")
# Conf matrix
f.write("Confusion Matrix:"+ "\n\n")
f.write("\n".join(str(elem) for elem in performance_measures.confusion_matrix.astype(int))+ "\n\n")
f.write("Overall ACC: " + str(format(performance_measures.Overall_Acc, '.4f'))+ "\n\n")
f.write("mean Acc: " + str(format(np.average(performance_measures.Acc[:]), '.4f'))+ "\n")
f.write("mean Recall: " + str(format(np.average(performance_measures.Recall[:]), '.4f'))+ "\n")
f.write("mean Precision: " + str(format(np.average(performance_measures.Precision[:]), '.4f'))+ "\n")
f.write("N:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[0], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[0], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[0], '.4f'))+ "\n")
f.write("SVEB:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[1], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[1], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[1], '.4f'))+ "\n")
f.write("VEB:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[2], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[2], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[2], '.4f'))+ "\n")
f.write("F:"+ "\n\n")
f.write("Sens: " + str(format(performance_measures.Recall[3], '.4f'))+ "\n")
f.write("Prec: " + str(format(performance_measures.Precision[3], '.4f'))+ "\n")
f.write("Acc: " + str(format(performance_measures.Acc[3], '.4f'))+ "\n")
f.close()
| StarcoderdataPython |
3345094 | <reponame>JulianEberius/Eclim.tmbundle
#!/usr/bin/env python
import os, sys
import eclim
import util
from util import caret_position, current_identifier
def call_eclim(project, file, length, offset, new_name):
eclim.update_java_src(project, file)
rename_cmd = "$ECLIM -command java_refactor_rename \
-p %s \
-f %s \
-o %i \
-e utf-8 \
-l %i \
-n %s" % (project, file, offset, length, new_name)
out = eclim.call_eclim(rename_cmd)
return out
def rename_command():
project, file = eclim.get_context()
# we cannot read the code from TM via stdin, as it will not have
# the correct line endings when editing windows files (it will just have \n)
#code = sys.stdin.read()
# so we read from disk
with open(os.environ["TM_FILEPATH"]) as f:
code = f.read()
pos = caret_position(code)
identifier = current_identifier()
pos = code.find(identifier, pos-len(identifier))
new_name = util.get_input(default=identifier,title="Enter new name")
call_eclim(project, file, len(identifier), pos, new_name)
if __name__ == '__main__':
if sys.argv[1] == '--rename':
out = rename_command()
#print out
| StarcoderdataPython |
63197 | # The MIT License (MIT)
#
# Copyright (c) 2011, 2013 OpenWorm.
# http://openworm.org
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the MIT License
# which accompanies this distribution, and is available at
# http://opensource.org/licenses/MIT
#
# Contributors:
# OpenWorm - http://openworm.org/people.html
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import with_statement
from neuron import h
import pylab
def test_function():
#to test interaction with c++
return 0
def plot(vectors_dict):
# plot the results
pylab.subplot (2 ,1 ,1)
pylab.plot (vectors_dict['t'], vectors_dict['v_pre'] ,
vectors_dict['t'] , vectors_dict['v_post'])
pylab.subplot (2 ,1 ,2)
pylab.plot(vectors_dict['t'],vectors_dict['i_syn'])
pylab.show()
class test_simulation():
#simple simulation to test the principle
def __init__(self,increment=0.1):
#create pre- and post- synaptic sections
self.increment = increment
self.pre = h.Section()
self.post = h.Section()
for sec in self.pre,self.post:
sec.insert('hh')
#inject current in the pre-synaptic section
self.stim = h.IClamp(0.5, sec=self.pre)
self.stim.amp = 10.0
self.stim.delay = 5.0
self.stim.dur = 5.0
#create a synapse in the pre-synaptic section
self.syn = h.ExpSyn(0.5,sec=self.post)
#connect the pre-synaptic section to the synapse object:
self.nc = h.NetCon(self.pre(0.5)._ref_v,self.syn)
self.nc.weight[0] = 2.0
# record the membrane potentials and
# synaptic currents
vec = {}
for var in 'v_pre', 'v_post', 'i_syn', 't':
vec[var] = h.Vector()
vec['v_pre'].record(self.pre(0.5)._ref_v )
vec['v_post'].record(self.post(0.5)._ref_v )
vec['i_syn'].record(self.syn._ref_i )
vec['t'].record(h._ref_t)
self.vector = vec
#Initialize the simulation
h.load_file ("stdrun.hoc")
h.init()
def run(self,do_plot = False):
#run and return resting potential
t_now = h.t
while h.t < t_now + self.increment:
h.fadvance()
if do_plot:
plot(self.vector)
#return the post-synaptic membrane potential
return self.vector['v_post'][-1]
#example use:
a=test_simulation(increment=100)
print a.run(do_plot=True)
| StarcoderdataPython |
6535 | <filename>A2/semcor_chunk.py
from nltk.corpus import semcor
class semcor_chunk:
def __init__(self, chunk):
self.chunk = chunk
#returns the synset if applicable, otherwise returns None
def get_syn_set(self):
try:
synset = self.chunk.label().synset()
return synset
except AttributeError:
try:
synset = wn.synset(self.chunk.label())
return synset
except:
return None
#returns a list of the words in the chunk
def get_words(self):
try:
return self.chunk.leaves()
except AttributeError:
return self.chunk
# if __name__ == "__main__":
# s = semcor.tagged_sents(tag='sem')[0]
# for chunk in s:
# a = semcor_chunk(chunk)
# print a.get_syn_set()
# for chunk in s:
# a = semcor_chunk(chunk)
# print a.get_words() | StarcoderdataPython |
187397 | <reponame>k-nut/jedeschule-scraper
from __future__ import annotations # needed so that update_or_create can define School return type
import logging
import os
from geoalchemy2 import Geometry, WKTElement
from sqlalchemy import String, Column, JSON
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from jedeschule.items import School as SchoolItem
from jedeschule.pipelines.school_pipeline import SchoolPipelineItem
Base = declarative_base()
def get_session():
engine = create_engine(os.environ.get("DATABASE_URL"), echo=False)
Session = sessionmaker(bind=engine)
session = Session()
return session
class School(Base):
__tablename__ = 'schools'
id = Column(String, primary_key=True)
name = Column(String)
address = Column(String)
address2 = Column(String)
zip = Column(String)
city = Column(String)
website = Column(String)
email = Column(String)
school_type = Column(String)
legal_status = Column(String)
provider = Column(String)
fax = Column(String)
phone = Column(String)
director = Column(String)
raw = Column(JSON)
location = Column(Geometry('POINT'))
@staticmethod
def update_or_create(item: SchoolPipelineItem, session=None) -> School:
if not session:
session = get_session()
school_data = {**item.info}
school = session.query(School).get(item.info['id'])
latitude = school_data.pop('latitude', None)
longitude = school_data.pop('longitude', None)
if latitude is not None and longitude is not None:
location = WKTElement(f"POINT({longitude} {latitude})", srid=4326)
school_data['location'] = location
if school:
session.query(School).filter_by(id=item.info['id']).update({**school_data, 'raw': item.item})
else:
school = School(**school_data, raw=item.item)
return school
def __str__(self):
return f'<School id={self.id}, name={self.name}>'
class DatabasePipeline:
def __init__(self):
self.session = get_session()
def process_item(self, item: SchoolPipelineItem, spider):
school = School.update_or_create(item, session=self.session)
try:
self.session.add(school)
self.session.commit()
except SQLAlchemyError as e:
logging.warning('Error when putting to DB')
logging.warning(e)
self.session.rollback()
return school
| StarcoderdataPython |
3370337 | """
数据访问层
data access layer
"""
import csv
from typing import List
from model import HouseModel
class HouseDao:
"""
房源数据访问对象
"""
__house_data = [] # type:List[HouseModel]
__FILE_NAME = "/home/tarena/PycharmProjects/untitled/house_information_manager_system/house.csv"
@classmethod
def load(cls) -> List[HouseModel]:
"""
加载房源信息
:return:文件中所有房源信息
"""
cls.__house_data.clear()
# 打开文件
with open(cls.__FILE_NAME,encoding="utf-8") as csvfile:
# 使用csv模块读取数据
for row in csv.reader(csvfile):
model = cls.__string_to_HouseModel(row)
cls.__house_data.append(model)
return cls.__house_data
@staticmethod
def __string_to_HouseModel(line):
return HouseModel(int(line[0]), line[1], line[2], line[3], line[4], float(line[5]), line[6], line[7],
float(line[8]), float(line[9]), line[10])
@classmethod
def save(cls) -> None:
"""
保存房源信息
"""
with open(cls.__FILE_NAME, "w") as csvfile:
csv_writer = csv.writer(csvfile)
for house in cls.__house_data:
csv_writer.writerow(house.__dict__.values())
if __name__ == '__main__':
# 测试代码
for item in HouseDao.load():
print(item.__dict__) | StarcoderdataPython |
3302934 | <filename>fdk_client/platform/models/Bags.py
"""Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
from .BagItem import BagItem
class Bags(BaseSchema):
# Order swagger.json
item = fields.Nested(BagItem, required=False)
id = fields.Int(required=False)
| StarcoderdataPython |
3206699 | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list_home, name='post_list'),
path('blog/over-explained/<str:slug>', views.explained, name="explained"),
path('blog/<str:category>/<str:series>/<str:slug>', views.post_detail, name='post_detail'),
path('blog/<str:category>/<str:series>', views.series_list, name='series_list'),
path('blog/archive/', views.archive, name='archive'),
path('about', views.about, name='about'),
path('resources', views.resources, name='resources'),
path('search/', views.MySearchView.as_view(), name="search"),
] | StarcoderdataPython |
3395564 | <reponame>hristo-vrigazov/dnn.cool<gh_stars>10-100
import torch
from torch import nn
def dummy_tensor(inputs):
return nn.Parameter(data=torch.ones_like(inputs))
def dummy_dict(inputs):
res = nn.ParameterDict()
for key, value in inputs.items():
if key != 'gt' and not key.startswith('precondition'):
res[key] = dummy_tensor(value)
return res
class RFModel(nn.Module):
def __init__(self, inputs, model):
super().__init__()
self.dummy = dummy_dict(inputs) if isinstance(inputs, dict) else dummy_tensor(inputs)
self.model = model
def forward(self, x):
if isinstance(x, torch.Tensor):
return self.model(self.dummy * x)
for key, value in self.dummy.items():
x[key] *= value
return self.model(x)
def compute_receptive_field(model, inputs, out_mask, marker=5e8):
rf_model = RFModel(inputs, model)
pred = rf_model(inputs)
target = mark_predictions(pred, out_mask, marker)
rf_backprop(pred, target)
rf_mask = get_rf_mask(rf_model)
return rf_mask
def get_rf_mask(rf_model, eps=1e-4):
if isinstance(rf_model.dummy, torch.Tensor):
return abs(rf_model.dummy.grad) > eps
rf_mask = {}
for key, value in rf_model.dummy.items():
rf_mask[key] = abs(value.grad) > eps
return rf_mask
def rf_backprop(pred, target):
if isinstance(pred, torch.Tensor):
mse = nn.MSELoss()
loss = mse(pred, target)
loss.backward()
return
mse = nn.MSELoss()
losses = []
for key, value in pred.items():
losses.append(mse(value, target[key]))
loss = torch.stack(losses).mean()
loss.backward()
def mark_predictions(pred, out_mask, marker):
if isinstance(pred, torch.Tensor):
target = pred.detach().clone()
target[out_mask] += marker
return target
assert isinstance(pred, dict)
target = {}
for key, value in pred.items():
target[key] = value.detach().clone()
target[key][out_mask[key]] += marker
return target
| StarcoderdataPython |
194912 |
import os
import time
import calendar
from glideinwms.lib import x509Support
from glideinwms.lib import condorExe
import logging
logger = logging.getLogger()
class Credential:
def __init__(self, proxy_id, proxy_fname, group_descript):
self.req_idle = 0
self.req_max_run = 0
self.advertize = False
proxy_security_classes = group_descript['ProxySecurityClasses']
proxy_trust_domains = group_descript['ProxyTrustDomains']
proxy_types = group_descript['ProxyTypes']
proxy_keyfiles = group_descript['ProxyKeyFiles']
proxy_pilotfiles = group_descript['ProxyPilotFiles']
proxy_vm_ids = group_descript['ProxyVMIds']
proxy_vm_types = group_descript['ProxyVMTypes']
proxy_creation_scripts = group_descript['ProxyCreationScripts']
proxy_update_frequency = group_descript['ProxyUpdateFrequency']
proxy_vmid_fname = group_descript['ProxyVMIdFname']
proxy_vmtype_fname = group_descript['ProxyVMTypeFname']
proxy_remote_username = group_descript['ProxyRemoteUsernames']
proxy_project_id = group_descript['ProxyProjectIds']
self.proxy_id = proxy_id
# self.filename (absfname) always contains component of credential
# used to submit glidein and based on the type contains following:
# grid_proxy: x509 proxy (also used by pilot to talk to User collector
# key_pair: public/access key
# cert_pair: public cert
# auth_file: auth file used
self.filename = proxy_fname
self.type = proxy_types.get(proxy_fname, "Unknown")
self.security_class = proxy_security_classes.get(proxy_fname, proxy_id)
self.trust_domain = proxy_trust_domains.get(proxy_fname, "None")
self.update_frequency = int(
proxy_update_frequency.get(proxy_fname, -1))
# Following items can be None
self.vm_id_fname = proxy_vmid_fname.get(proxy_fname)
self.vm_type_fname = proxy_vmtype_fname.get(proxy_fname)
self.vm_id = proxy_vm_ids.get(proxy_fname)
self.vm_type = proxy_vm_types.get(proxy_fname)
self.creation_script = proxy_creation_scripts.get(proxy_fname)
self.key_fname = proxy_keyfiles.get(proxy_fname)
self.pilot_fname = proxy_pilotfiles.get(proxy_fname)
self.remote_username = proxy_remote_username.get(proxy_fname)
self.project_id = proxy_project_id.get(proxy_fname)
# Will be initialized when get_id() is called
self._id = None
def get_id(self, recreate=False):
"""
Generate the Credential id if we do not have one already
Since the Id is dependent on the credential content for proxies
recreate them if asked to do so
:param recreate: Recreate the credential id
:type recreate :obj: `Boolean`
@rtype: string
@return: Id of the credential
"""
if (not self._id) or recreate:
# Create the credential id
self.create()
self._id = self.file_id(self.get_id_filename())
return self._id
def get_id_filename(self):
"""
Get credential file used to generate the credential id
@rtype: string
@return: credential filename used to create the credential id
"""
cred_file = None
if self.filename:
cred_file = self.filename
elif self.key_fname:
cred_file = self.key_fname
elif self.pilot_fname:
cred_file = self.pilot_fname
return cred_file
def create(self):
"""
Generate the credential
"""
if self.creation_script:
logger.debug("Creating credential using %s" %
(self.creation_script))
try:
condorExe.iexe_cmd(self.creation_script)
except Exception:
logger.exception(
"Creating credential using %s failed" % (self.creation_script))
self.advertize = False
# Recreating the credential can result in ID change
self._id = self.file_id(self.get_id_filename())
def create_if_not_exist(self):
"""
Generate the credential if it does not exists.
"""
if self.filename and (not os.path.exists(self.filename)):
logger.debug("Credential %s does not exist." % (self.filename))
self.create()
def get_string(self, cred_file=None):
"""
Based on the type of credentials read appropriate files and return
the credentials to advertise as a string. The output should be
encrypted by the caller as required.
@rtype: string
@return: Read the credential from the file and return the string
"""
cred_data = ''
if not cred_file:
# If not file specified, assume the file used to generate Id
cred_file = self.get_id_filename()
try:
data_fd = open(cred_file)
cred_data = data_fd.read()
data_fd.close()
except Exception:
# This credential should not be advertised
self.advertize = False
logger.exception("Failed to read credential %s: " % cred_file)
raise
return cred_data
# PM: Why are the usage details part of Credential Class?
# This is overloading the purpose of Credential Class
def add_usage_details(self, req_idle=0, req_max_run=0):
"""
Add usage details for this credential
"""
self.req_idle = req_idle
self.req_max_run = req_max_run
def get_usage_details(self):
"""
Return usage details for this credential
"""
return (self.req_idle, self.req_max_run)
def file_id(self, filename, ignoredn=False):
"""
Generate hash for the credential id
"""
if (("grid_proxy" in self.type) and not ignoredn):
dn = x509Support.extract_DN(filename)
hash_str = filename + dn
else:
hash_str = filename
return str(abs(hash(hash_str)) % 1000000)
def time_left(self):
"""
Returns the time left if a grid proxy
If missing, returns 0
If not a grid proxy or other unidentified error, return -1
"""
if (not os.path.exists(self.filename)):
return 0
if ("grid_proxy" in self.type) or ("cert_pair" in self.type):
time_list = condorExe.iexe_cmd(
"openssl x509 -in %s -noout -enddate" % self.filename)
if "notAfter=" in time_list[0]:
time_str = time_list[0].split("=")[1].strip()
timeleft = calendar.timegm(time.strptime(
time_str, "%b %d %H:%M:%S %Y %Z")) - int(time.time())
return timeleft
else:
return -1
def renew(self):
"""
Renews credential if time_left()<update_frequency
Only works if type is grid_proxy or creation_script is provided
"""
remaining = self.time_left()
if ((remaining != -1) and (self.update_frequency != -1) and
(remaining < self.update_frequency)):
self.create()
def supports_auth_method(self, auth_method):
"""
Check if this credential has all the necessary info to support
auth_method for a given factory entry
"""
type_set = set(self.type.split('+'))
am_set = set(auth_method.split('+'))
return am_set.issubset(type_set)
def __str__(self):
output = ""
output += "id = %s\n" % self.get_id()
output += "proxy_id = %s\n" % self.proxy_id
output += "req_idle = %s\n" % self.req_idle
output += "req_max_run = %s\n" % self.req_max_run
output += "filename = %s\n" % self.filename
output += "type = %s\n" % self.type
output += "security_class = %s\n" % self.security_class
output += "trust_domain = %s\n" % self.trust_domain
try:
output += "key_fname = %s\n" % self.key_fname
output += "pilot_fname = %s\n" % self.pilot_fname
except Exception:
pass
output += "vm_id = %s\n" % self.vm_id
output += "vm_type = %s\n" % self.vm_type
output += "remote_username = %s\n" % self.remote_username
output += "project_id = %s\n" % self.project_id
return output
class CredentialCache:
# Cache the credential ids so we do not create again unless required
# Specially expensive to create id for proxy which uses DN in hash
def __init__(self):
self.file_id_cache = {}
def file_id(self, cred, filename):
"""
Return the cached credential id for the credential
"""
k = (cred.type, filename)
if not (k in self.file_id_cache):
self.file_id_cache[k] = cred.file_id(filename)
return self.file_id_cache[k]
| StarcoderdataPython |
1613439 | <filename>src/fidesops/schemas/connection_configuration/connection_config.py
from datetime import datetime
from typing import Optional, List
from pydantic import Extra, BaseModel
from fidesops.schemas.api import BulkResponse, BulkUpdateFailed
from fidesops.schemas.shared_schemas import FidesOpsKey
from fidesops.models.connectionconfig import ConnectionType, AccessLevel
class CreateConnectionConfiguration(BaseModel):
"""
Schema for creating a ConnectionConfiguration
Note that secrets are *NOT* allowed to be supplied here.
"""
name: str
key: Optional[FidesOpsKey]
connection_type: ConnectionType
access: AccessLevel
class Config:
"""Restrict adding other fields through this schema and set orm_mode to support mapping to ConnectionConfig"""
orm_mode = True
use_enum_values = True
extra = Extra.forbid
class ConnectionConfigurationResponse(BaseModel):
"""
Describes the returned schema for a ConnectionConfiguration.
Do *NOT* add "secrets" to this schema.
"""
name: str
key: FidesOpsKey
connection_type: ConnectionType
access: AccessLevel
created_at: datetime
updated_at: Optional[datetime]
last_test_timestamp: Optional[datetime]
last_test_succeeded: Optional[bool]
class Config:
"""Set orm_mode to support mapping to ConnectionConfig"""
orm_mode = True
class BulkPutConnectionConfiguration(BulkResponse):
"""Schema with mixed success/failure responses for Bulk Create/Update of ConnectionConfiguration responses."""
succeeded: List[ConnectionConfigurationResponse]
failed: List[BulkUpdateFailed]
| StarcoderdataPython |
90511 | <reponame>BLSQ/iaso
from django.contrib.auth import update_session_auth_hash
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import JsonResponse
from django.utils.translation import gettext as _
from iaso.models import Profile, OrgUnit
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
class HasProfilePermission(permissions.BasePermission):
def has_permission(self, request, view):
if view.action in ("retrieve", "partial_update") and view.kwargs.get("pk") == "me":
return True
if (not request.user.has_perm("menupermissions.iaso_users")) and request.method != "GET":
return False
return True
class ProfilesViewSet(viewsets.ViewSet):
"""Profiles API
This API is restricted to authenticated users having the "menupermissions.iaso_users" permission, with one
exception: GET /api/profiles/me is accessible to any authenticated user.
GET /api/profiles/
GET /api/profiles/me => current user
GET /api/profiles/<id>
POST /api/profiles/
POST /api/profiles/me => current user, can only set language field
PATCH /api/profiles/<id>
DELETE /api/profiles/<id>
"""
# FIXME : replace by a model viewset
permission_classes = [permissions.IsAuthenticated, HasProfilePermission]
def get_queryset(self):
account = self.request.user.iaso_profile.account
return Profile.objects.filter(account=account)
def list(self, request):
limit = request.GET.get("limit", None)
page_offset = request.GET.get("page", 1)
orders = request.GET.get("order", "user__user_name").split(",")
search = request.GET.get("search", None)
queryset = self.get_queryset()
if search:
queryset = queryset.filter(
Q(user__username__icontains=search)
| Q(user__first_name__icontains=search)
| Q(user__last_name__icontains=search)
)
if limit:
queryset = queryset.order_by(*orders)
limit = int(limit)
page_offset = int(page_offset)
paginator = Paginator(queryset, limit)
res = {"count": paginator.count}
if page_offset > paginator.num_pages:
page_offset = paginator.num_pages
page = paginator.page(page_offset)
res["profiles"] = map(lambda x: x.as_dict(), page.object_list)
res["has_next"] = page.has_next()
res["has_previous"] = page.has_previous()
res["page"] = page_offset
res["pages"] = paginator.num_pages
res["limit"] = limit
return Response(res)
else:
return Response({"profiles": [profile.as_short_dict() for profile in queryset]})
def retrieve(self, request, *args, **kwargs):
pk = kwargs.get("pk")
if pk == "me":
profile = request.user.iaso_profile
return Response(profile.as_dict())
else:
profile = get_object_or_404(self.get_queryset(), pk=pk)
return Response(profile.as_dict())
def partial_update(self, request, pk=None):
if pk == "me":
# allow user to change his own language
user = request.user
profile = request.user.iaso_profile
if "language" in request.data:
profile.language = request.data["language"]
profile.save()
return Response(profile.as_dict())
profile = get_object_or_404(self.get_queryset(), id=pk)
username = request.data.get("user_name")
password = request.data.get("password", "")
if not username:
return JsonResponse({"errorKey": "user_name", "errorMessage": "Nom d'utilisateur requis"}, status=400)
user = profile.user
user.first_name = request.data.get("first_name", "")
user.last_name = request.data.get("last_name", "")
user.username = username
user.email = request.data.get("email", "")
profile.language = request.data.get("language", "")
profile.save()
if password != "":
user.set_password(password)
permissions = request.data.get("permissions", [])
user.user_permissions.clear()
for permission_codename in permissions:
permission = get_object_or_404(Permission, codename=permission_codename)
user.user_permissions.add(permission)
user.save()
if password and request.user == user:
# update session hash if you changed your own password so you don't get unlogged
# https://docs.djangoproject.com/en/3.2/topics/auth/default/#session-invalidation-on-password-change
update_session_auth_hash(request, user)
org_units = request.data.get("org_units", [])
profile.org_units.clear()
for org_unit in org_units:
org_unit_item = get_object_or_404(OrgUnit, pk=org_unit.get("id"))
profile.org_units.add(org_unit_item)
profile.save()
return Response(profile.as_dict())
def create(self, request):
username = request.data.get("user_name")
password = request.data.get("password", "")
if not username:
return JsonResponse({"errorKey": "user_name", "errorMessage": _("Nom d'utilisateur requis")}, status=400)
if not password:
return JsonResponse({"errorKey": "password", "errorMessage": _("Mot de passe requis")}, status=400)
existing_profile = User.objects.filter(username=username).first()
if existing_profile:
return JsonResponse({"errorKey": "user_name", "errorMessage": _("Nom d'utilisateur existant")}, status=400)
user = User()
user.first_name = request.data.get("first_name", "")
user.last_name = request.data.get("last_name", "")
user.username = username
user.email = request.data.get("email", "")
permissions = request.data.get("permissions", [])
if password != "":
user.set_password(password)
user.save()
for permission_codename in permissions:
permission = get_object_or_404(Permission, codename=permission_codename)
user.user_permissions.add(permission)
if permissions != []:
user.save()
# Create a iaso profile for the new user and attach it to the same account
# as the currently authenticated user
current_profile = request.user.iaso_profile
user.profile = Profile.objects.create(
user=user, account=current_profile.account, language=request.data.get("language", "")
)
org_units = request.data.get("org_units", [])
profile = get_object_or_404(Profile, id=user.profile.pk)
profile.org_units.clear()
for org_unit in org_units:
org_unit_item = get_object_or_404(OrgUnit, pk=org_unit.get("id"))
profile.org_units.add(org_unit_item)
profile.save()
return Response(user.profile.as_dict())
def delete(self, request, pk=None):
profile = get_object_or_404(self.get_queryset(), id=pk)
user = profile.user
user.delete()
profile.delete()
return Response(True)
| StarcoderdataPython |
156384 | # Copyright 2017-2019 <NAME>, <NAME>, <NAME>
# Copyright 2019-2020 Intel Corporation
#
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
kAFL Slave Implementation.
Request fuzz input from Master and process it through various fuzzing stages/mutations.
Each Slave is associated with a single Qemu instance for executing fuzz inputs.
"""
import os
import psutil
import time
import signal
import sys
import shutil
import lz4.frame as lz4
from common.config import FuzzerConfiguration
from common.debug import log_slave
from common.qemu import qemu
from common.util import read_binary_file, atomic_write, print_warning, print_fail
from fuzzer.bitmap import BitmapStorage, GlobalBitmap
from fuzzer.communicator import ClientConnection, MSG_IMPORT, MSG_RUN_NODE, MSG_BUSY
from fuzzer.node import QueueNode
from fuzzer.state_logic import FuzzingStateLogic
from fuzzer.statistics import SlaveStatistics
from fuzzer.technique.helper import rand
def slave_loader(slave_id):
def sigterm_handler(signal, frame):
if slave_process.q:
slave_process.q.async_exit()
sys.exit(0)
log_slave("PID: " + str(os.getpid()), slave_id)
# sys.stdout = open("slave_%d.out"%slave_id, "w")
config = FuzzerConfiguration()
if config.argument_values["cpu_affinity"]:
psutil.Process().cpu_affinity([config.argument_values["cpu_affinity"]])
else:
psutil.Process().cpu_affinity([slave_id])
connection = ClientConnection(slave_id, config)
slave_process = SlaveProcess(slave_id, config, connection)
signal.signal(signal.SIGTERM, sigterm_handler)
os.setpgrp()
try:
slave_process.loop()
except:
if slave_process.q:
slave_process.q.async_exit()
raise
log_slave("Exit.", slave_id)
num_funky = 0
class SlaveProcess:
def __init__(self, slave_id, config, connection, auto_reload=False):
self.config = config
self.slave_id = slave_id
self.q = qemu(self.slave_id, self.config,
debug_mode=config.argument_values['debug'])
self.statistics = SlaveStatistics(self.slave_id, self.config)
self.logic = FuzzingStateLogic(self, self.config)
self.conn = connection
self.bitmap_storage = BitmapStorage(self.config, self.config.config_values['BITMAP_SHM_SIZE'], "master")
def handle_import(self, msg):
meta_data = {"state": {"name": "import"}, "id": 0}
payload = msg["task"]["payload"]
self.logic.process_node(payload, meta_data)
self.conn.send_ready()
def handle_busy(self):
busy_timeout = 1
kickstart = False
if kickstart: # spend busy cycle by feeding random strings?
log_slave("No ready work items, attempting random..", self.slave_id)
start_time = time.time()
while (time.time() - start_time) < busy_timeout:
meta_data = {"state": {"name": "import"}, "id": 0}
payload = rand.bytes(rand.int(32))
self.logic.process_node(payload, meta_data)
else:
log_slave("No ready work items, waiting...", self.slave_id)
time.sleep(busy_timeout)
self.conn.send_ready()
def handle_node(self, msg):
meta_data = QueueNode.get_metadata(msg["task"]["nid"])
payload = QueueNode.get_payload(meta_data["info"]["exit_reason"], meta_data["id"])
results, new_payload = self.logic.process_node(payload, meta_data)
if new_payload:
default_info = {"method": "validate_bits", "parent": meta_data["id"]}
if self.validate_bits(new_payload, meta_data, default_info):
log_slave("Stage %s found alternative payload for node %d"
% (meta_data["state"]["name"], meta_data["id"]),
self.slave_id)
else:
log_slave("Provided alternative payload found invalid - bug in stage %s?"
% meta_data["state"]["name"],
self.slave_id)
self.conn.send_node_done(meta_data["id"], results, new_payload)
def loop(self):
if not self.q.start():
return
log_slave("Started qemu", self.slave_id)
while True:
try:
msg = self.conn.recv()
except ConnectionResetError:
log_slave("Lost connection to master. Shutting down.", self.slave_id)
return
if msg["type"] == MSG_RUN_NODE:
self.handle_node(msg)
elif msg["type"] == MSG_IMPORT:
self.handle_import(msg)
elif msg["type"] == MSG_BUSY:
self.handle_busy()
else:
raise ValueError("Unknown message type {}".format(msg))
def quick_validate(self, data, old_res, quiet=False):
# Validate in persistent mode. Faster but problematic for very funky targets
self.statistics.event_exec()
old_array = old_res.copy_to_array()
new_res = self.__execute(data).apply_lut()
new_array = new_res.copy_to_array()
if new_array == old_array:
return True
if not quiet:
log_slave("Input validation failed! Target is funky?..", self.slave_id)
return False
def funky_validate(self, data, old_res):
# Validate in persistent mode with stochastic prop of funky results
validations = 8
confirmations = 0
for _ in range(validations):
if self.quick_validate(data, old_res, quiet=True):
confirmations += 1
if confirmations >= 0.8*validations:
return True
log_slave("Funky input received %d/%d confirmations. Rejecting.." % (confirmations, validations), self.slave_id)
if self.config.argument_values['v']:
self.store_funky(data)
return False
def store_funky(self, data):
global num_funky
num_funky += 1
# store funky input for further analysis
funky_folder = self.config.argument_values['work_dir'] + "/funky/"
atomic_write(funky_folder + "input_%02d_%05d" % (self.slave_id, num_funky), data)
def validate_bits(self, data, old_node, default_info):
new_bitmap, _ = self.execute(data, default_info)
# handle non-det inputs
if new_bitmap is None:
return False
old_bits = old_node["new_bytes"].copy()
old_bits.update(old_node["new_bits"])
return GlobalBitmap.all_new_bits_still_set(old_bits, new_bitmap)
def validate_bytes(self, data, old_node, default_info):
new_bitmap, _ = self.execute(data, default_info)
# handle non-det inputs
if new_bitmap is None:
return False
old_bits = old_node["new_bytes"].copy()
return GlobalBitmap.all_new_bits_still_set(old_bits, new_bitmap)
def execute_redqueen(self, data):
self.statistics.event_exec_redqueen()
return self.q.execute_in_redqueen_mode(data)
def __send_to_master(self, data, execution_res, info):
info["time"] = time.time()
info["exit_reason"] = execution_res.exit_reason
info["performance"] = execution_res.performance
if self.conn is not None:
self.conn.send_new_input(data, execution_res.copy_to_array(), info)
def trace_payload(self, data, info):
trace_file_in = self.config.argument_values['work_dir'] + "/redqueen_workdir_%d/pt_trace_results.txt" % self.slave_id;
trace_folder = self.config.argument_values['work_dir'] + "/traces/"
trace_file_out = trace_folder + "payload_%05d" % info['id']
log_slave("Tracing payload_%05d.." % info['id'], self.slave_id)
try:
self.q.set_payload(data)
exec_res = self.q.execute_in_trace_mode(timeout_detection=False)
with open(trace_file_in, 'rb') as f_in:
with lz4.LZ4FrameFile(trace_file_out + ".lz4", 'wb', compression_level=lz4.COMPRESSIONLEVEL_MINHC) as f_out:
shutil.copyfileobj(f_in, f_out)
if not exec_res.is_regular():
self.statistics.event_reload()
self.q.reload()
except Exception as e:
log_slave("Failed to produce trace %s: %s (skipping..)" % (trace_file_out, e), self.slave_id)
return None
return exec_res
def __execute(self, data, retry=0):
try:
self.q.set_payload(data)
return self.q.send_payload()
except (ValueError, BrokenPipeError):
if retry > 2:
# TODO if it reliably kills qemu, perhaps log to master for harvesting..
print_fail("Slave %d aborting due to repeated SHM/socket error. Check logs." % self.slave_id)
log_slave("Aborting due to repeated SHM/socket error. Payload: %s" % repr(data), self.slave_id)
raise
print_warning("SHM/socket error on Slave %d (retry %d)" % (self.slave_id, retry))
log_slave("SHM/socket error, trying to restart qemu...", self.slave_id)
self.statistics.event_reload()
if not self.q.restart():
raise
return self.__execute(data, retry=retry+1)
def execute(self, data, info):
self.statistics.event_exec()
exec_res = self.__execute(data)
is_new_input = self.bitmap_storage.should_send_to_master(exec_res)
crash = exec_res.is_crash()
stable = False;
# store crashes and any validated new behavior
# do not validate timeouts and crashes at this point as they tend to be nondeterministic
if is_new_input:
if not crash:
assert exec_res.is_lut_applied()
if self.config.argument_values["funky"]:
stable = self.funky_validate(data, exec_res)
else:
stable = self.quick_validate(data, exec_res)
if not stable:
# TODO: auto-throttle persistent runs based on funky rate?
self.statistics.event_funky()
if crash or stable:
self.__send_to_master(data, exec_res, info)
else:
if crash:
log_slave("Crashing input found (%s), but not new (discarding)" % (exec_res.exit_reason), self.slave_id)
# restart Qemu on crash
if crash:
self.statistics.event_reload()
self.q.reload()
return exec_res, is_new_input
| StarcoderdataPython |
174568 | ##
## PasswordChanger
## by AliAbdul
##
from Components.ActionMap import ActionMap
from Components.config import config, ConfigText, ConfigSubsection, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.Language import language
from Components.ScrollLabel import ScrollLabel
from os import environ
from Plugins.Plugin import PluginDescriptor
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from telnetlib import Telnet
from Tools.Directories import resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS
import gettext
############################################
def localeInit():
lang = language.getLanguage()
environ["LANGUAGE"] = lang[:2]
gettext.bindtextdomain("enigma2", resolveFilename(SCOPE_LANGUAGE))
gettext.textdomain("enigma2")
gettext.bindtextdomain("PasswordChanger", "%s%s" % (resolveFilename(SCOPE_PLUGINS), "Extensions/PasswordChanger/locale/"))
def _(txt):
t = gettext.dgettext("PasswordChanger", txt)
if t == txt:
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
############################################
config.plugins.PasswordChanger = ConfigSubsection()
config.plugins.PasswordChanger.old_password = ConfigText(default="", fixed_size=False)
config.plugins.PasswordChanger.new_password = ConfigText(default="", fixed_size=False)
############################################
class PasswordChanger(ConfigListScreen, Screen):
skin = """
<screen position="center,center" size="420,70" title="%s" >
<widget name="config" position="0,0" size="420,70" scrollbarMode="showOnDemand" />
</screen>""" % _("Password Changer")
def __init__(self, session, args=None):
Screen.__init__(self, session)
self.session = session
ConfigListScreen.__init__(self, [
getConfigListEntry(_("Old password:"), config.plugins.PasswordChanger.old_password),
getConfigListEntry(_("New password:"), config.plugins.PasswordChanger.new_password)])
self["actions"] = ActionMap(["OkCancelActions"], {"ok": self.changePassword, "cancel": self.exit}, -2)
def changePassword(self):
old_pass = config.plugins.PasswordChanger.old_password.value
new_pass = config.plugins.PasswordChanger.new_password.value
if len(new_pass) > 4 and len(new_pass) < 9:
self.session.open(PasswordChangerConsole, old_pass, new_pass)
else:
self.session.open(MessageBox, _("Incorrect new password!\nMinimum length: 5\nMaximum length: 8"), MessageBox.TYPE_ERROR)
def exit(self):
for x in self["config"].list:
x[1].cancel()
self.close()
############################################
class PasswordChangerConsole(Screen):
skin = """
<screen position="center,center" size="520,400" title="%s" >
<widget name="label" position="0,0" size="520,400" font="Regular;20" />
</screen>""" % _("Password Changer")
def __init__(self, session, old_pass, new_pass):
Screen.__init__(self, session)
self.working = True
self.old_pass = old_pass
self.new_pass = <PASSWORD>
self.log = ""
self.timeout = 2
self["label"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions"],
{
"ok": self.exit,
"back": self.exit,
"up": self["label"].pageUp,
"down": self["label"].pageDown,
"left": self["label"].pageUp,
"right": self["label"].pageDown
}, -1)
self.onLayoutFinish.append(self.run)
def exit(self):
if not self.working:
self.sendMessage("exit")
self.close()
def sendMessage(self, message):
if self.t is not None:
self.t.write(message + "\n")
r = self.t.read_until("UNKNOWN", self.timeout)
self.log += r
return r
else:
return ""
def run(self):
logged_in = False
try:
self.t = Telnet("localhost")
self.log = self.t.read_until("login:", self.timeout)
if self.log.__contains__("login:"):
r = self.sendMessage("root")
if r.__contains__("~#"):
logged_in = True
elif r.__contains__("Password:"):
r = self.sendMessage(self.old_pass)
if r.__contains__("~#"):
logged_in = True
except:
self.t = None
if logged_in:
self.changePassword()
else:
self.log += _("Could not log in!")
self["label"].setText(self.log)
self.working = False
def changePassword(self):
try:
r = self.sendMessage("passwd")
if r.__contains__("Enter new password:") or r.__contains__("New password:"):
r = self.sendMessage(self.new_pass)
if r.__contains__("Re-enter new password:") or r.__contains__("Retype password:"):
r = self.sendMessage(self.new_pass)
except:
self.log += _("Error while setting new password!")
self["label"].setText(self.log)
self.working = False
############################################
def start(session, **kwargs):
session.open(PasswordChanger)
def main(menuid):
if menuid != "system":
return [ ]
return [(_("Password Changer"), start, "password_changer", None)]
############################################
def Plugins(**kwargs):
return PluginDescriptor(name=_("Password Changer"), description=_("Change your ftp and telnet password"), where=PluginDescriptor.WHERE_MENU, fnc=main)
| StarcoderdataPython |
118326 | from random import randint, choice
from discord.ext.commands import command, group
from names import get_full_name
from pony.orm import db_session, sql_debug
from .cog import Cog
from dicebag import Character, Race, Role
class DND(Cog):
@command()
async def characters(self):
"""Lists all of the created characters."""
with db_session:
characters = ', '.join(str(c) for c in Character.select())
await self.bot.reply(characters)
@group(pass_context=True)
async def make(self, ctx):
if not ctx.invoked_subcommand:
await self.bot.reply('Make what??!')
@make.command()
async def race(self, name=None):
"""adds a new race"""
if not name:
await self.bot.reply("Needs a name")
return None
with db_session:
r = Race(name=name.title())
await self.bot.reply("Made: {}".format(r))
@make.command()
async def role(self, name=None):
"""adds a new role, should be class but.. python keyword"""
if not name:
await self.bot.reply('Needs a name')
return None
with db_session:
try:
role = Role(name=name.title())
await self.bot.reply('Made: {}'.format(role))
except:
await self.bot.reply('Failed to create: {}'.format(name))
@make.command(aliases=['char','chars'])
async def character(self, race, role, name=None):
"""creates a new character"""
name = name or get_full_name()
with db_session:
race = Race.get(name=race.title())
role = Role.get(name=role.title())
if race and role:
await self.bot.reply(Character(name=name.title(), race=race, role=role))
else:
await self.bot.reply("Couldn't make character with race: {} or role: {}".format(race, role))
@group(pass_context=True)
async def ls(self, ctx):
"""Lists stuff"""
if not ctx.invoked_subcommand:
await self.bot.reply('list what??')
@ls.command(aliases=['race'])
async def races(self):
with db_session:
await self.bot.reply(', '.join(str(r) for r in Race.select()))
@ls.command(aliases=['role'])
async def roles(self):
with db_session:
await self.bot.reply(', '.join(str(r) for r in Role.select()))
@ls.command(aliases=['char','chars'])
async def characters(self):
with db_session:
await self.bot.reply(', '.join(str(c) for c in Character.select()))
def setup(bot):
sql_debug(bot.config.get_sql_debug())
bot.add_cog(DND(bot))
| StarcoderdataPython |
1776381 | <reponame>moacirsouza/nadas
print("""
097) Faça um programa que tenha uma função chamada escreva(), que receba um
texto qualquer como parâmetro e mostre uma mensagem com tamanho adaptável.
""")
def escreva(mensagem, caractereDeFormatacao='~'):
mensagemFormatada = f' {mensagem} '
comprimentoDaMensagem = len(mensagemFormatada)
print(f'{caractereDeFormatacao}'*comprimentoDaMensagem)
print(mensagemFormatada)
print(f'{caractereDeFormatacao}'*comprimentoDaMensagem)
mensagemDoUsuario = input('Digite sua mensagem: ').strip()
escreva(mensagemDoUsuario, '-')
| StarcoderdataPython |
3381078 | # interface to link up the methods info and the dynamic class builder
from provider import DriverMethod, get_providers_info, get_driver_methods
from dynamicclass import DynamicClass
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
# get list of the supported provider
def providersList():
result = get_providers_info()
print result
# get list of methods supported by provider
def providerSupportedMethod(driver, method):
result = get_driver_methods(driver)
if method in result:
return method
else:
print "Method doesn't exit with provider"
def get_provider_instance(name):
Provider = getattr(Provider, name)
cls = get_driver(Provider.name)
return cls
# request dynamic builder to create the command class
def buildCommandClass():
cls = get_driver(Provider.EC2_US_WEST)
method_desc = methodInfo(cls, 'create_node')
cls = DynamicClass(method_desc)
#result = factory(cls, method_desc['name'])
return cls
# get method info
def methodInfo(driver, method):
D = DriverMethod(driver, method)
result = D.get_description()
return result
# new name of the class
def factory(BaseClass, name):
class NewClass(BaseClass):
pass
NewClass.__name__ = "%s" % name
return NewClass
| StarcoderdataPython |
4802833 | # -*- coding: utf-8 -*-
"""Imports for the credential manager."""
| StarcoderdataPython |
3327573 | # Copyright 2016-2020 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-datasets/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flask blueprint for modular routes."""
from werkzeug.exceptions import HTTPException
from flask import Blueprint
from flask import jsonify
from flask import request
from flask import current_app
from flask import render_template
from flask import url_for, redirect
from mongoengine import ValidationError
from flask_login import current_user
from flask_security import login_required
from deepcell_datasets.database.models import Samples
from deepcell_datasets.database.models import Experiments
from deepcell_datasets.samples.forms import SampleForm, SampleFilterForm
from deepcell_datasets.utils import nest_dict
samples_bp = Blueprint('samples_bp', __name__, # pylint: disable=C0103
template_folder='templates')
@samples_bp.errorhandler(Exception)
def handle_exception(err):
"""Error handler
https://flask.palletsprojects.com/en/1.1.x/errorhandling/
"""
# pass through HTTP errors
if isinstance(err, HTTPException):
return err
elif isinstance(err, ValidationError):
return jsonify({'error': str(err)}), 400
# now you're handling non-HTTP exceptions only
return jsonify({'error': str(err)}), 500
# Routes for HTML pages.
# TODO: This should likely be split into several routes allowing
# users the option to re-use information like scope, step, marker, etc.
# This could be down with checkbox and passing objects from one route
# to the next.
@samples_bp.route('/data_entry/<exp_id>', methods=['GET', 'POST'])
@login_required
def add_sample(exp_id):
form = SampleForm()
# flask-mongoengine wtf validation fails for required fields
# TODO: likely a bug in flask-mongo but the following logic shouldnt stay
if form.validate_on_submit():
# TODO: This is here to remind us of the package bug:
current_app.logger.info('Form errors are %s ', form.errors)
# Do something with data
body_raw = request.form
current_app.logger.info('Form body is %s ', body_raw)
body_dict = nest_dict(body_raw.to_dict())
# Add in Experiment ID information here
experiment = Experiments.objects.get_or_404(id=exp_id)
body_dict['experiment'] = experiment
current_app.logger.info('Nested dict to save is %s ', body_dict)
sample = Samples(**body_dict).save()
current_app.logger.info('sample %s saved succesfully', sample)
unique_id = sample.id
current_app.logger.info('unique_id %s extracted as key', unique_id)
return redirect(url_for('samples_bp.success'))
return render_template('samples/data_entry.html',
form=form,
current_user=current_user,
exp_id=exp_id)
@samples_bp.route('/', methods=['GET'])
@login_required
def view_all_samples():
page = request.args.get('page', default=1, type=int)
filters = [
'experiment',
'kinetics',
'spatial_dim',
'species',
'specimen',
'modality__imaging_modality',
'modality__compartment',
'modality__marker',
'imaging_params__platform',
'imaging_params__magnification',
]
provided_values = (request.args.get(f, default='') for f in filters)
kwargs = {f: v for f, v in zip(filters, provided_values) if v}
samples = Samples.objects(**kwargs)
form = SampleFilterForm()
per_page = current_app.config['ITEMS_PER_PAGE']
paginated_samples = samples.paginate(page=page, per_page=per_page)
return render_template('samples/samples-table.html',
paginated_samples=paginated_samples,
form=form,
**kwargs)
@samples_bp.route('/success')
def success():
return 'Sample Successfully Submitted'
| StarcoderdataPython |
107185 | <filename>vision/datasets/VIRAT_DataLoader.py
import torch
import os
import sys
import numpy as np
import pickle
import torch.utils.data as data
import glob2
import logging
import cv2
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
class VIRAT_Loader(data.Dataset):
def __init__(self,image_path, anno_path, transform=None, target_transform=None, transforms=None):
super(VIRAT_Loader,self).__init__()
# check if img and anno path exist
self._check_path_exists(image_path, anno_path)
# define classes
self._class_names = {u"BackGround":0, u"unknown":1, u"person":2, u"car":3, u"vehicles":4, u"object":5, u"bike,bicycles":6}
self.class_names = ["BackGround","unknown","person","car","vehicles","object","bike,bicycles"]
self.data_size = None
self.img_pickle_list = glob2.glob(image_path+"/*.pickle")
self.anno_pickle_list = glob2.glob(anno_path+"/*.pickle")
self.transform = transform
self.transforms = transforms
self.target_transform = target_transform
self.count = 0
# https://www.twblogs.net/a/5c835297bd9eee35fc13bd96
# https://blog.csdn.net/u012436149/article/details/78545766
# Test one batch size
with open(self.img_pickle_list[0], 'rb') as f:
shuffled_img = pickle.load(f)
self.data_size = shuffled_img.shape[0]
def __getitem__(self, index):
# if self.count == 0:
# self.shuffled_img = self._load_samples()
# self.BBOXes, self.LABELes = self._load_anno()
# else:
# pass
# img = self.shuffled_img[index]
# bboxes_x0y0x1y1, labels = self.BBOXes[index], self.LABELes[index]
# if self.transform:
# img, bboxes_x0y0x1y1, labels = self.transform(self.shuffled_img[index], self.BBOXes[index], self.LABELes[index])
# if self.target_transform:
# bboxes_x0y0x1y1, labels = self.target_transform(self.BBOXes[index], self.LABELes[index])
# img = self.shuffled_img[index]
img = self._load_samples(index)
bboxes_x0y0x1y1, labels = self._load_anno(index)
logging.debug("===== before transform VIRAT img shape : {} ======".format(img.shape))
logging.debug("===== before transform VIRAT bbox shape : {} & type : {} ======".format(bboxes_x0y0x1y1.shape, bboxes_x0y0x1y1.dtype))
logging.debug("===== before transform VIRAT labels shape : {} & type : {} ======".format(labels.shape, labels.dtype))
if self.transform:
img, bboxes_x0y0x1y1, labels = self.transform(img, bboxes_x0y0x1y1, labels)
if self.target_transform:
bboxes_x0y0x1y1, labels = self.target_transform(bboxes_x0y0x1y1, labels)
labels = labels.type('torch.LongTensor')
logging.debug("===== VIRAT img shape : {} ======".format(img.shape))
logging.debug("===== VIRAT bbox shape : {} ======".format(bboxes_x0y0x1y1.shape))
logging.debug("===== VIRAT label shape : {} ======".format(labels.shape))
return img, bboxes_x0y0x1y1, labels
def __len__(self):
# return self.data_size
return self.data_size * len(self.img_pickle_list)
def _check_path_exists(self, image_path, anno_path):
print(image_path)
assert os.path.exists(image_path), 'The folder path : {} is wrong '.format(image_path)
assert os.path.exists(anno_path), 'The gound truth path : {} is wrong '.format(anno_path)
def _load_samples(self, index):
fetch_data_pickle = index // self.data_size
fetch_data_slice = index % self.data_size
if int(sys.version[0]) > 2:
with open(self.img_pickle_list[fetch_data_pickle], 'rb') as f:
shuffled_img = pickle.load(f)
else:
with open(self.img_pickle_list[fetch_data_pickle], 'rb') as f:
raise NotImplementedError("Can't load by python 2")
import pdb;pdb.set_trace()
shuffled_img = pickle.load(f, encoding = 'latin1')
# self.img_pickle_list.append(self.img_pickle_list[0])
# del self.img_pickle_list[0]
# original shape is (N.C,H,W) change to (N,W,H,C)
# shuffled_img = shuffled_img.transpose(0,3,2,1)
# self.data_size = shuffled_img.shape[0]
# self.count = 1
shuffled_img = shuffled_img[fetch_data_slice,...]
shuffled_img = shuffled_img.transpose(1,2,0)
shuffled_img = shuffled_img.astype(np.uint8)
shuffled_img = cv2.cvtColor(shuffled_img, cv2.COLOR_BGR2RGB)
return shuffled_img
def _load_anno(self, index):
fetch_data_pickle = index // self.data_size
fetch_data_slice = index % self.data_size
with open(self.anno_pickle_list[fetch_data_pickle], 'rb') as f:
shuffled_anno = pickle.load(f)
# self.anno_pickle_list.append(self.anno_pickle_list[0])
# del self.anno_pickle_list[0]
# shuffled_anno is a list
# inside the list is a array
# batch_size = len(shuffled_anno)
shuffled_anno = shuffled_anno[fetch_data_slice]
# BBOXes = []
# Labels = []
# for each_b in shuffled_anno:
# bboxes = []
# labels = []
# for sets in each_b:
# x0,y0 = sets[3], sets[4]
# x1,y1 = sets[3]+sets[5], sets[4]+sets[6]
# bboxes.append(np.array([x0,y0,x1,y1]))
# labels.append(sets[7])
# BBOXes.append(np.array(bboxes))
# Labels.append(np.array(labels))
# BBOXes = np.array(BBOXes) # [batchsize, bbox_of_each_frame, x0y0x1y1]
# LABELes = np.array(Labels) # [batchsize, labels_of_each_frame, label_classes]
bboxes = []
labels = []
for sets in shuffled_anno:
x0,y0 = sets[3], sets[4]
x1,y1 = sets[3]+sets[5], sets[4]+sets[6]
bboxes.append(np.array([x0,y0,x1,y1]))
labels.append(sets[7])
BBOXes = np.array(bboxes) # [batchsize, bbox_of_each_frame, x0y0x1y1]
LABELes = np.array(labels) # [batchsize, labels_of_each_frame, label_classes]
logging.debug("========= BBOXes shape:{} =======".format(BBOXes.shape))
logging.debug("========= LABELes shape:{} =======".format(LABELes.shape))
return BBOXes, LABELes
| StarcoderdataPython |
112239 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import csv as csv
import argparse
from matplotlib.backends.backend_pdf import PdfPages
import cPickle as pickle
def outputFigure(dataDic,filename,xRange,yRange,title,xLabel,yLabel,loc,
maxCores,line):
fig = plt.figure()
fig.suptitle(title)
fig.set_tight_layout(True)
local = loc
for key in dataDic.keys():
local=local+1
pt = fig.add_subplot(local)
pt.set_xlabel(xLabel)
pt.set_ylabel(yLabel)
title='Depth:'+ str(key)+',Leaves: '+str(dataDic[key][2])
pt.set_title(title)
pt.plot(dataDic[key][0],dataDic[key][1],'bs')
pt.axis([0,xRange,0,yRange])
if line:
pt.plot([0,maxCores+1],[0,maxCores+1])
plt.tight_layout()
fig.savefig(filename)
def main():
parser = argparse.ArgumentParser(description="command line args")
inputFiles = 4
outputFiles = 4
parser.add_argument('-i','--inputF',help='input file name',required=True)
parser.add_argument('-o','--output',help='output file name',required=True)
args = parser.parse_args()
inputFile = args.inputF
outputFile = args.output
for f in range(1,inputFiles+1):
inFile = inputFile+str(f)+".p"
outFile = outputFile+str(f)+".png"
data = pickle.load(open(inFile,"rb"))
overhead = {}
figureLocation = 220
maxCores = 32
for key in data.keys():
seqTime = data[key][3][0]
o = [x-seqTime for x in data[key][3]]
over = [(x/y)*100 for x,y in zip(o,data[key][1])]
overhead[key] = [data[key][0][1:],over[1:],data[key][2]]
m = max(over)
outputFigure(overhead,outFile,maxCores+1,m,"Overhead",'number of cores',
'percent overhead',figureLocation,maxCores,False)
data.clear()
overhead.clear()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1676496 | <filename>COVID19_prediction/COVID_model/model_util.py
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 17:18:28 2020
@author: XT
"""
from __future__ import print_function
import random
import joblib
import numpy as np
import pandas as pd
import requests
from sklearn import metrics
SR = 16000 # sample rate
import os # noqa: E402
import sys # noqa: E402
import librosa # noqa: E402
import model_params as params # noqa: E402
sys.path.append("../vggish")
from vggish_input import waveform_to_examples # noqa: E402
SR_VGG = params.SR_VGG
def get_resort(files):
"""Re-sort the files under data path.
:param files: file list
:type files: list
:return: alphabetic orders
:rtype: list
"""
name_dict = {}
for sample in files:
name = sample.lower()
name_dict[name] = sample
re_file = [name_dict[s] for s in sorted(name_dict.keys())]
np.random.seed(222)
np.random.shuffle(re_file)
return re_file
def get_resort_test(files):
"""Re-sort the files under data path.
:param files: file list
:type files: list
:return: alphabetic orders
:rtype: list
"""
name_dict = {}
for sample in files:
name = sample.lower()
name_dict[name] = sample
re_file = [name_dict[s] for s in sorted(name_dict.keys())]
return re_file
def get_aug(y, type):
"""Augment data for training, validation and testing.
:param data_path: path
:type data_path: str
:param is_aug: using augmentation
:type is_aug: bool
:return: batch
:rtype: list
"""
if type == "noise":
y_aug = y + 0.005 * np.random.normal(0, 1, len(y))
if type == "pitchspeed":
step = np.random.uniform(-6, 6)
y_aug = librosa.effects.pitch_shift(y, SR, step)
yt_n = y_aug / np.max(np.abs(y_aug)) # re-normolized the sound
return yt_n
def spec_augment(spec: np.ndarray, num_mask=2, freq_masking_max_percentage=0.1, time_masking_max_percentage=0.2):
spec = spec.copy()
for i in range(num_mask):
all_frames_num, all_freqs_num = spec.shape
freq_percentage = random.uniform(0.0, freq_masking_max_percentage)
num_freqs_to_mask = int(freq_percentage * all_freqs_num)
f0 = np.random.uniform(low=0.0, high=all_freqs_num - num_freqs_to_mask)
f0 = int(f0)
spec[:, f0 : f0 + num_freqs_to_mask] = 0
time_percentage = random.uniform(0.0, time_masking_max_percentage)
num_frames_to_mask = int(time_percentage * all_frames_num)
t0 = np.random.uniform(low=0.0, high=all_frames_num - num_frames_to_mask)
t0 = int(t0)
spec[t0 : t0 + num_frames_to_mask, :] = 0
return spec
def load_data(data_path, is_aug):
"""Load data for training, validation and testing.
:param data_path: path
:type data_path: str
:param is_aug: using augmentation
:type is_aug: bool
:return: batch
:rtype: list
"""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb")) # load positive samples
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb")) # load negative samples
data.update(data2)
train_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["train_covid_id"]):
for temp in data["train_covid_id"][uid]:
train_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["train_noncovid_id"]):
for temp in data["train_noncovid_id"][uid]:
train_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
total = len(train_task)
# upsampling by repeating some covid to balance the class
np.random.seed(1)
add_covid = np.random.choice(range(covidcnt), (noncvcnt - covidcnt) * 2, replace=False)
add_sample = [train_task[i] for i in add_covid]
train_task = train_task + add_sample
total = len(train_task)
print("add covid:", noncvcnt - covidcnt, "total:", total)
"""
#down sample
np.random.seed(1)
add_covid = np.random.choice(range(covidcnt, covidcnt + noncvcnt), covidcnt, replace=False)
add_sample = [train_task[i] for i in add_covid]
train_task = train_task[:covidcnt] + add_sample
print('delete noncovid:', noncvcnt-covidcnt)
total = len(train_task)
"""
if is_aug: # only works for train
for i, type in enumerate(["_augnoise.pk", "_augpitch.pk"]): #
data_aug = joblib.load(open(data_path + type, "rb"))
aug_covid = data_aug["covid"]
aug_noncovid = data_aug["noncovid"]
np.random.seed(i + 2) # random and different
add_covid = np.random.choice(range(covidcnt), (noncvcnt - covidcnt) * 2, replace=False)
add_sample = [aug_covid[i] for i in add_covid]
train_task = train_task + aug_covid + add_sample + aug_noncovid
vad_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["vad_covid_id"]):
for temp in data["vad_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["vad_noncovid_id"]):
for temp in data["vad_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
test_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["test_covid_id"]):
for temp in data["test_covid_id"][uid]:
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["test_noncovid_id"]):
for temp in data["test_noncovid_id"][uid]:
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
test_task = test_task + test_task[:5]
# suffle samples
np.random.seed(222)
np.random.shuffle(train_task)
np.random.seed(222)
np.random.shuffle(vad_task)
np.random.seed(222)
np.random.shuffle(test_task)
return train_task, vad_task, test_task
def load_vad_data(data_path):
"""Load vad data only."""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb"))
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb"))
data.update(data2)
vad_task = []
covidcnt = 0
noncvcnt = 0
# i = 0
for uid in get_resort(data["train_covid_id"]):
for temp in data["train_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["train_noncovid_id"]):
for temp in data["train_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
for uid in get_resort(data["vad_covid_id"]):
for temp in data["vad_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["vad_noncovid_id"]):
for temp in data["vad_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
np.random.seed(222)
np.random.shuffle(vad_task)
return vad_task
def load_test_data(data_path):
"""Load test data only."""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb"))
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb"))
data.update(data2)
test_task = []
covidcnt = 0
noncvcnt = 0
i = 0
for uid in get_resort_test(data["test_covid_id"]):
for temp in data["test_covid_id"][uid]:
i += 1
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort_test(data["test_noncovid_id"]):
for temp in data["test_noncovid_id"][uid]:
i += 1
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
return test_task
def load_test_dict_data(data_path):
"""load dict data with labal."""
print("start to load data:", data_path)
data = joblib.load(open(data_path, "rb"))
return data
def get_input(sample):
"""transfer audio input into spectrogram."""
vgg_b = waveform_to_examples(sample["breath"], SR_VGG)
vgg_c = waveform_to_examples(sample["cough"], SR_VGG)
vgg_v = waveform_to_examples(sample["voice"], SR_VGG)
index = vgg_b.shape[0]
index2 = vgg_c.shape[0] + index
vgg_input = np.concatenate((vgg_b, vgg_c, vgg_v), axis=0)
labels = sample["label"]
symptoms = [[1] * 13] # sample['sym']
return vgg_input, [[index]], [[index2]], labels, symptoms
def get_metrics(probs, labels):
"""calculate metrics.
:param probs: list
:type probs: float
:param labels: list
:type labels: int
:return: metrics
"""
probs = np.array(probs)
probs = np.squeeze(probs)
predicted = []
for i in range(len(probs)):
if probs[i][0] > 0.5:
predicted.append(0)
else:
predicted.append(1)
label = np.array(labels)
label = np.squeeze(label)
predicted = np.array(predicted)
predicted = np.squeeze(predicted)
# pre = metrics.precision_score(label, predicted)
# acc = metrics.accuracy_score(label, predicted)
auc = metrics.roc_auc_score(label, probs[:, 1])
precision, recall, _ = metrics.precision_recall_curve(label, probs[:, 1])
# rec = metrics.recall_score(label, predicted)
TN, FP, FN, TP = metrics.confusion_matrix(label, predicted).ravel()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# PPV = TP/(TP + FP)
# NPV = TN/(TN + FN)
fpr, tpr, thresholds = metrics.roc_curve(label, probs[:, 1])
index = np.where(tpr > 0.9)[0][0] - 1
print(
"AUC:"
+ "{:.2f}".format(auc)
+ " Sensitivity:"
+ "{:.2f}".format(TPR)
+ " Specificity:"
+ "{:.2f}".format(TNR)
+ " spe@90%sen:"
+ "{:.2f}".format(1 - fpr[index])
)
return auc, TPR, TNR, 1 - fpr[index]
def get_metrics_t(probs, label):
predicted = []
for i in range(len(probs)):
if probs[i] > 0.5:
predicted.append(1)
else:
predicted.append(0)
auc = metrics.roc_auc_score(label, probs)
TN, FP, FN, TP = metrics.confusion_matrix(label, predicted).ravel()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP * 1.0 / (TP + FN)
# Specificity or true negative rate
TNR = TN * 1.0 / (TN + FP)
return auc, TPR, TNR
def get_CI(data, AUC, Sen, Spe):
AUCs = []
TPRs = []
TNRs = []
for s in range(1000):
np.random.seed(s) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
try:
get_metrics_t(sample_pro, sample_label)
except ValueError:
np.random.seed(1001) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
else:
auc, TPR, TNR = get_metrics_t(sample_pro, sample_label)
AUCs.append(auc)
TPRs.append(TPR)
TNRs.append(TNR)
q_0 = pd.DataFrame(np.array(AUCs)).quantile(0.025)[0] # 2.5% percentile
q_1 = pd.DataFrame(np.array(AUCs)).quantile(0.975)[0] # 97.5% percentile
q_2 = pd.DataFrame(np.array(TPRs)).quantile(0.025)[0] # 2.5% percentile
q_3 = pd.DataFrame(np.array(TPRs)).quantile(0.975)[0] # 97.5% percentile
q_4 = pd.DataFrame(np.array(TNRs)).quantile(0.025)[0] # 2.5% percentile
q_5 = pd.DataFrame(np.array(TNRs)).quantile(0.975)[0] # 97.5% percentile
print(
str(AUC.round(2))
+ "("
+ str(q_0.round(2))
+ "-"
+ str(q_1.round(2))
+ ")"
+ "&"
+ str(Sen.round(2))
+ "("
+ str(q_2.round(2))
+ "-"
+ str(q_3.round(2))
+ ")"
"&" + str(Spe.round(2)) + "(" + str(q_4.round(2)) + "-" + str(q_5.round(2)) + ")"
)
def is_exists(path):
"""Check directory exists."""
if not os.path.exists(path):
print("Not exists: {}".format(path))
return False
return True
def maybe_create_directory(dirname):
"""Check directory exists or create it."""
if not os.path.exists(dirname):
os.makedirs(dirname)
def maybe_download(url, dirname):
"""Check resource exists or download it"""
resource_name = url.split("/")[-1]
resource_dest = os.path.join(dirname, resource_name)
if not os.path.isfile(resource_dest):
r = requests.get(url)
with open(resource_dest, "wb") as f:
f.write(r.content)
# model - already trained keras model with dropout
def mc_dropout(predictions, T):
# predictions shape: (I, T, C) T - monte carlo samples, I input size, C number of classes
# shape: (I, C)
mean = np.mean(predictions, axis=1)
mean = np.squeeze(mean)
print("mean:", mean.shape)
# shape: (I)
variance = -1 * np.sum(np.log(mean) * mean, axis=1)
return (mean, variance)
| StarcoderdataPython |
1747679 | import rich_click as click
# Show the positional arguments
click.rich_click.SHOW_ARGUMENTS = True
# Uncomment this line to group the arguments together with the options
# click.rich_click.GROUP_ARGUMENTS_OPTIONS = True
@click.command()
@click.argument("input", type=click.Path(), required=True)
@click.option(
"--type",
default="files",
show_default=True,
help="Type of file to sync",
)
@click.option("--all", is_flag=True, help="Sync all the things?")
@click.option("--debug/--no-debug", "-d/-n", default=False, help="Enable debug mode")
def cli(input, type, all, debug):
"""
My amazing tool does all the things.
This is a minimal example based on documentation
from the 'click' package.
You can try using --help at the top level and also for
specific group subcommands.
"""
print(f"Debug mode is {'on' if debug else 'off'}")
if __name__ == "__main__":
cli()
| StarcoderdataPython |
3301761 | <reponame>ksetdekov/HSE_DS
n, m = map(int, input().split())
matrix = [list(map(int, input().split())) for _ in range(n)]
r_max, c_max = (0, 0)
highest = matrix[0][0]
for r in range(n):
for c in range(m):
if matrix[r][c] > highest:
r_max, c_max = (r, c)
highest = matrix[r][c]
print(r_max, c_max)
| StarcoderdataPython |
1621036 | <reponame>TonyBrother32/Django-shop<filename>geekshop/mainapp/urls.py
from django.urls import path
from . import views
app_name = 'mainapp'
urlpatterns = [
path('', views.products, name='products'),
path('<int:category_id>/', views.category, name='category'),
path('product/<int:product_id>/', views.product, name='product'),
] | StarcoderdataPython |
1728016 | <gh_stars>10-100
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
import yaml
from contextlib import contextmanager
from PIL import Image
from io import BytesIO
from glob import glob
from tqdm import tqdm
from zipfile import ZipFile
from .codec import IWAFile
from .unicode_utils import fix_unicode
def ensure_directory_exists(prefix, path):
"""Ensure that a path's directory exists."""
parts = os.path.split(path)
try:
os.makedirs(os.path.join(*([prefix] + list(parts[:-1]))))
except OSError:
pass
def file_reader(path, progress=True):
if path.endswith('.key'):
return zip_file_reader(path, progress)
else:
return directory_reader(path, progress)
def zip_file_reader(path, progress=True):
zipfile = ZipFile(path, "r")
for _file in zipfile.filelist:
_file.filename = _file.filename.encode("cp437").decode("utf-8")
iterator = sorted(zipfile.filelist, key=lambda x: x.filename)
if progress:
iterator = tqdm(iterator)
for zipinfo in iterator:
if zipinfo.filename.endswith('/'):
continue
if progress:
iterator.set_description("Reading {}...".format(zipinfo.filename))
with zipfile.open(zipinfo) as handle:
yield (zipinfo.filename, handle)
def directory_reader(path, progress=True):
# Python <3.5 doesn't support glob with recursive, so this will have to do.
iterator = set(sum([glob(path + ('/**' * i)) for i in range(10)], []))
iterator = sorted(iterator)
if progress:
iterator = tqdm(iterator)
for filename in iterator:
if os.path.isdir(filename):
continue
rel_filename = filename.replace(path + '/', '')
if progress:
iterator.set_description("Reading {}...".format(rel_filename))
with open(filename, 'rb') as handle:
yield (rel_filename, handle)
def file_sink(path, raw=False, subfile=None):
if path == "-":
if subfile:
return cat_sink(subfile, raw)
else:
return ls_sink()
if path.endswith('.key'):
return zip_file_sink(path)
return dir_file_sink(path, raw=raw)
@contextmanager
def dir_file_sink(target_dir, raw=False):
def accept(filename, contents):
ensure_directory_exists(target_dir, filename)
target_path = os.path.join(target_dir, filename)
if isinstance(contents, IWAFile) and not raw:
target_path += ".yaml"
with open(target_path, 'wb') as out:
if isinstance(contents, IWAFile):
if raw:
out.write(contents.to_buffer())
else:
yaml.dump(
contents.to_dict(),
out,
default_flow_style=False,
encoding="utf-8",
Dumper=Dumper,
)
else:
out.write(contents)
accept.uses_stdout = False
yield accept
@contextmanager
def ls_sink():
def accept(filename, contents):
print(filename)
accept.uses_stdout = True
yield accept
@contextmanager
def cat_sink(subfile, raw):
def accept(filename, contents):
if filename == subfile:
if isinstance(contents, IWAFile):
if raw:
sys.stdout.buffer.write(contents.to_buffer())
else:
print(
yaml.dump(
contents.to_dict(),
default_flow_style=False,
encoding="utf-8",
Dumper=Dumper,
).decode('ascii')
)
else:
sys.stdout.buffer.write(contents)
accept.uses_stdout = True
yield accept
@contextmanager
def zip_file_sink(output_path):
files_to_write = {}
def accept(filename, contents):
files_to_write[filename] = contents
accept.uses_stdout = False
yield accept
print("Writing to %s..." % output_path)
with ZipFile(output_path, 'w') as zipfile:
for filename, contents in tqdm(
iter(list(files_to_write.items())), total=len(files_to_write)
):
if isinstance(contents, IWAFile):
zipfile.writestr(filename, contents.to_buffer())
else:
zipfile.writestr(filename, contents)
def process_file(filename, handle, sink, replacements=[], raw=False, on_replace=None):
contents = None
if '.iwa' in filename and not raw:
contents = handle.read()
if filename.endswith('.yaml'):
file = IWAFile.from_dict(
yaml.load(fix_unicode(contents.decode('utf-8')), Loader=Loader)
)
filename = filename.replace('.yaml', '')
else:
file = IWAFile.from_buffer(contents, filename)
file_has_changed = False
for replacement in replacements:
# Replacing in a file is expensive, so let's
# avoid doing so if possible.
if replacement.should_replace(file):
file_has_changed = True
break
if file_has_changed:
data = file.to_dict()
for replacement in replacements:
data = replacement.perform_on(data, on_replace=on_replace)
sink(filename, IWAFile.from_dict(data))
else:
sink(filename, file)
return
if filename.startswith("Data/"):
file_has_changed = False
for replacement in replacements:
find_parts = replacement.find.split(".")
if len(find_parts) != 2:
continue
repl_filepart, repl_ext = find_parts
data_filename = filename.replace("Data/", "")
if data_filename.startswith(repl_filepart):
# Scale this file to the appropriate size
image = Image.open(handle)
with open(replacement.replace, 'rb') as f:
read_image = Image.open(f)
with BytesIO() as output:
read_image.thumbnail(image.size, Image.ANTIALIAS)
read_image.save(output, image.format)
sink(filename, output.getvalue())
file_has_changed = True
break
if file_has_changed:
return
sink(filename, contents or handle.read())
def process(input_path, output_path, replacements=[], subfile=None, raw=False):
completed_replacements = []
def on_replace(replacement, old, new):
completed_replacements.append((old, new))
with file_sink(output_path, subfile=subfile) as sink:
if not sink.uses_stdout:
print("Reading from %s..." % input_path)
for filename, handle in file_reader(input_path, not sink.uses_stdout):
try:
process_file(filename, handle, sink, replacements, raw, on_replace)
except Exception as e:
raise ValueError("Failed to process file %s due to: %s" % (filename, e))
return completed_replacements
| StarcoderdataPython |
4821394 | import sys
from os import path
sys.dont_write_bytecode = True
mydir = path.abspath(path.dirname(sys.argv[0]) or ".")
sys.path[:] = [mydir] + [
p
for p in sys.path
if path.isabs(p)
and path.exists(p)
and not (path.samefile(p, ".") or path.samefile(p, mydir))
]
if __name__ == "__main__":
import cp2077gui
cp2077gui.DatamineVirtuosoFixer()
| StarcoderdataPython |
1767795 | from django.contrib import admin
from blog.blog.models import Blog, Category
class BlogAdmin(admin.ModelAdmin):
exclude = ['posted']
prepopulated_fields = {'slug': ('title',)}
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('title',)}
admin.site.register(Blog, BlogAdmin)
admin.site.register(Category, CategoryAdmin)
| StarcoderdataPython |
29431 | <gh_stars>0
# plot rotation period vs orbital period
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import re
from gyro import gyro_age
import teff_bv as tbv
import scipy.stats as sps
from calc_completeness import calc_comp
# np.set_printoptions(threshold=np.nan, linewidth=9)
plotpar = {'axes.labelsize': 18,
'text.fontsize': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
DATA_DIR = "/Users/ruthangus/projects/turnip/turnip/data/"
def save_data(nbins):
fnames = glob.glob(os.path.join(DATA_DIR, "/koi_results/*h5"))
koi, period, errp, errm, lnerrp, lnerrm = [], [], [], [], [], []
for i, fn in enumerate(fnames):
df = pd.read_hdf(fn, key="samples")
phist, bins = np.histogram(df.ln_period.values, nbins)
ln_p = bins[phist == max(phist)][0]
period.append(np.exp(ln_p))
lnerrp.append(np.percentile(df.ln_period.values, 84) - ln_p)
lnerrm.append(ln_p - np.percentile(df.ln_period.values, 16))
errp.append(np.exp(lnerrp[i]/ln_p))
errm.append(np.exp(lnerrm[i]/ln_p))
koi.append(re.findall('\d+', fn)[0])
table = pd.DataFrame({"koi": np.array(koi), "period": np.array(period),
"errp": np.array(errp), "errm": np.array(errm)})
table.to_csv("period_point_estimates.csv")
def make_histogram():
df = pd.read_csv("period_point_estimates.csv")
plt.clf()
plt.hist(df.period, 20)
plt.savefig("gp_period_hist")
def make_df():
df = pd.read_csv("period_point_estimates.csv")
planets = pd.read_csv(os.path.join(DATA_DIR, "cumulative.csv"),
skiprows=155)
kois = []
for i, k in enumerate(planets.kepoi_name.values):
# print(planets.kepoi_name.values[i])
# print(type(planets.kepoi_name.values[i]))
koi_str = re.findall('\d+', planets.kepoi_name.values[i])[0]
kois.append(int(koi_str))
planets["koi"] = kois
joint = pd.merge(planets, df, on="koi")
joint.to_csv("planet_periods.csv")
def plot_periods():
df = pd.read_csv("planet_periods.csv")
m = np.log(df.period.values) > 1
lnporb = np.log(df.koi_period.values[m])
lnprot = np.log(df.period.values[m])
porb = df.koi_period.values[m]
prot = df.period.values[m]
radius = np.log(df.koi_prad.values[m])
teff = df.koi_steff.values[m]
plt.clf()
plt.scatter(porb, prot, s=5*radius, c=teff, vmin=4400, vmax=7000)
plt.loglog()
plt.colorbar()
plt.xlabel("$\ln(\mathrm{Orbital~period})$")
plt.ylabel("$\ln(\mathrm{Rotation~period})$")
plt.subplots_adjust(bottom=.15)
plt.savefig("period_period")
# find the short rotators
m = np.log(df.period.values) < 1
print(df.koi.values[m])
# import kplr
# client = kplr.API()
# for i, k in enumerate(df.koi.values[m]):
# print(k)
# star = client.koi("{}.01".format(k))
# star.get_light_curves(fetch=True)
def plot_radii():
df = pd.read_csv("planet_periods.csv")
m = np.log(df.period.values) > 1
prot = df.period.values[m]
radius = np.log(df.koi_prad.values[m])
teff = df.koi_steff.values[m]
logg = df.koi_slogg.values[m]
feh = np.zeros(len(logg))
gyro = gyro_age(prot, teff, feh, logg)
age = gyro.barnes07("mh")
m = np.isfinite(age)
plt.clf()
plt.scatter(np.log(age[m]), np.log(radius[m]), c=teff[m], s=10, vmin=4400,
vmax=7000)
plt.colorbar()
plt.xlabel("$\ln(\mathrm{Age,~Gyr})$")
plt.ylabel("$\ln(\mathrm{Radius}, R_J)$")
plt.subplots_adjust(bottom=.15)
plt.savefig("age_radius")
l = age[m] < 3.295
print(len(radius[m][l]))
print(len(radius[m][~l]))
plt.clf()
plt.hist(radius[m][l], 50, normed=True, alpha=.5, label="young")
plt.hist(radius[m][~l], 40, normed=True, alpha=.5, label="old")
plt.legend()
plt.xlabel("Radius")
plt.savefig("radius_hist")
print(sps.ks_2samp(radius[m][l], radius[m][~l]))
cum_young = np.cumsum(radius[m][l]) / sum(radius[m][l])
cum_old = np.cumsum(radius[m][~l]) / sum(radius[m][~l])
plt.clf()
plt.plot(cum_young, label="young")
plt.plot(cum_old, label="old")
plt.savefig("radius_cdf")
# # print(np.unique(df.kepid.values[m]))
# for i in np.unique(df.kepid.values[m]):
# print("KIC", str(int(i)).zfill(9))
n = radius[m][l] < .5
n2 = radius[m][~l] < .5
print(len(radius[m][l][n]))
print(len(radius[m][~l][n2]))
plt.clf()
plt.hist(radius[m][l][n], 50, normed=True, alpha=.5, label="young")
plt.hist(radius[m][~l][n2], 40, normed=True, alpha=.5, label="old")
plt.legend()
plt.xlabel("Radius")
plt.savefig("radius_hist_hj")
print(sps.ks_2samp(radius[m][l][n], radius[m][~l][n2]))
n = radius[m] < .5
plt.clf()
plt.scatter(np.log(age[m][n]), np.log(radius[m][n]), c=teff[m][n], s=10,
vmin=4400, vmax=7000)
plt.colorbar()
plt.xlabel("$\ln(\mathrm{Age,~Gyr})$")
plt.ylabel("$\ln(\mathrm{Radius}, R_J)$")
plt.subplots_adjust(bottom=.15)
plt.savefig("age_radius_hj")
def plot_completeness():
df = pd.read_csv("planet_periods.csv")
comp = np.zeros((len(df.kepid.values)))
print(df.kepid.values[:10])
for i, kepid in enumerate(df.kepid.values[:10]):
print(i, "of", len(df.kepid.values))
print("id = ", kepid)
comp[i] = calc_comp(kepid, 365.25, 1.)
print(comp[i])
df["probtot"] = comp
plt.clf()
plt.plot(comp[:10], df.period.values[:10], "k.")
plt.savefig("comp_vs_period")
if __name__ == "__main__":
# save_data(100)
# make_histogram()
# make_df()
# plot_periods()
# plot_radii()
plot_completeness()
| StarcoderdataPython |
54207 | <filename>setup.py
#!/usr/bin/env python3
from app import db
db.create_all()
| StarcoderdataPython |
3394881 | <reponame>schlunsen/mopidy-juliana<gh_stars>0
import logging
import pathlib
import pkg_resources
from mopidy import config, ext
__version__ = pkg_resources.get_distribution("Mopidy-Juliana").version
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = "Mopidy-Juliana"
ext_name = "juliana"
version = __version__
def get_default_config(self):
return config.read(pathlib.Path(__file__).parent / "ext.conf")
def get_config_schema(self):
schema = super().get_config_schema()
# TODO: Comment in and edit, or remove entirely
schema["data_dir"] = config.String()
#schema["password"] = config.Secret()
return schema
def setup(self, registry):
# You will typically only implement one of the following things
# in a single extension.
# TODO: Edit or remove entirely
from .frontend import JulianaFrontend
# TODO: Edit or remove entirely
# from .backend import FoobarBackend
# registry.add("backend", FoobarBackend)
# TODO: Edit or remove entirely
# registry.add(
# "http:static",
# {
# "name": self.ext_name,
# "path": str(pathlib.Path(__file__).parent / "static"),
# },
# )
registry.add(
"http:app", {"name": self.ext_name, "factory": juliana_factory}
)
registry.add("frontend", JulianaFrontend)
##
# Frontend factory
##
def juliana_factory(config, core):
from tornado.web import StaticFileHandler
from .handlers import HttpHandler, ReactRouterHandler, WebsocketHandler
path = pathlib.Path(__file__).parent / "static"
return [
(r"/http/([^/]*)", HttpHandler, {"core": core, "config": config}),
(r"/ws/?", WebsocketHandler, {"core": core, "config": config}),
(r"/assets/(.*)", StaticFileHandler, {"path": path / "assets"}),
(r"/((.*)(?:css|js|json|map)$)", StaticFileHandler, {"path": path}),
(r"/(.*)", ReactRouterHandler, {"path": path / "index.html"}),
] | StarcoderdataPython |
67799 | r"""Main loop for each swarm agent
___ _ _
/ __|_ __ ____ _ _ _ _ __ /_\ __ _ ___ _ _| |_
\__ \ V V / _` | '_| ' \ / _ \/ _` / -_) ' \ _|
|___/\_/\_/\__,_|_| |_|_|_| /_/ \_\__, \___|_||_\__|
|___/
"""
from .control.task_manager import RemoteTaskManager
from drivers import SwarmBot
import settings
if __name__ == '__main__':
task_server = RemoteTaskManager(
remote="http://{}:{}".format(
settings.TASK_MANAGER_IP,
settings.MUTEX_SERVER_PORT))
robot = SwarmBot()
while True:
# vision loop
pass
| StarcoderdataPython |
1625265 | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import lale.docstrings
import lale.operators
class _BaselineClassifierImpl:
def __init__(self):
pass
def fit(self, X, y):
label_to_count = {}
for label in y:
label_to_count[label] = label_to_count.get(label, 0) + 1
majority_label = None
for label, count in label_to_count.items():
if majority_label is None or count > label_to_count[majority_label]:
majority_label = label
self._majority_label = majority_label
return self
def predict(self, X):
result = np.full((X.shape[0],), self._majority_label)
return result
def score(self, X, y):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"relevantToOptimizer": [],
"additionalProperties": False,
}
]
}
_input_fit_schema = {
"required": ["X", "y"],
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array"},
},
"y": {
"description": "Target class labels.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"description": "Features; the outer array is over samples.",
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
}
},
}
_output_predict_schema = {
"description": "Predicted class label per sample.",
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"type": "array", "items": {"type": "string"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Baseline classifier always predicts the majority class.",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.baseline_classifier.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "classifier"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
BaselineClassifier = lale.operators.make_operator(
_BaselineClassifierImpl, _combined_schemas
)
lale.docstrings.set_docstrings(BaselineClassifier)
| StarcoderdataPython |
1642306 | <filename>Program/PI/pub.py<gh_stars>0
import paho.mqtt.client as mqtt
import os
import serial
import time
import random
from time import strftime
from datetime import datetime
import requests
import json
import schedule
import numpy as np
import tensorflow as tf
import random
import time
model2 = tf.keras.models.load_model('./my_model')
def on_message(client, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
def on_publish(client, obj, mid):
print("mid: " + str(mid))
# gettiing dict with temperature, date and icon for forecast
def day_forecast():
temp_day = []
for i in forecast_response['list']:
foo = '12:00:00'
if foo in i['dt_txt']:
dictor = {
'date': i['dt'],
'temp': i['main']['temp'],
'icon': i['weather'][0]['icon'],
'date_txt': i['dt_txt']
}
temp_day.append(dictor)
# This for loop is selecting all DT from respoonse and making list of it
temport = []
for d in temp_day:
temport.append(d['date'])
# This loop converting timestamp DT format to week days names and making list of it
dates_formated = []
for value in temport:
dates_formated.append(
datetime.utcfromtimestamp(value).strftime('%A'))
return [temp_day, dates_formated]
def night_forecast():
temp_night = []
for i in forecast_response['list']:
foo = '03:00:00'
if foo in i['dt_txt']:
dictor = {
'date': i['dt_txt'],
'temp': i['main']['temp'],
}
temp_night.append(dictor)
return temp_night
def send_mail(city, temperature, humidity, pressure, wind, description):
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
mail= MIMEMultipart()
sender_email = "<EMAIL>" # replace with sender mail
rec_email = "<EMAIL>" # replace with reciver mail
password = "<PASSWORD>" # replace with sender mail password
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender_email, password)
mail['From']='Weather Notification System'
mail['To'] = rec_email
mail['Subject']='Weather App – Alert'
city = city
temperature = str(temperature)+ " C"
humidity = str(humidity) + " %"
pressure = str(pressure) + " hPa"
wind = str(wind) + " m/s"
description = description
body=" City: "+str(city)+"\n Temperature: "+str(temperature)+"\n Humidity: "+str(humidity)+"\n Pressure: "+str(pressure)+"\n Wind: "+str(wind)+"\n Description: "+ str(description)
mail.attach(MIMEText(body,'plain'))
msg=mail.as_string()
server.sendmail(sender_email, rec_email, msg)
print('Mail Sent')
email = "Email Will Send Your Mail."
def email12():
global email
email = "Email Send At 12PM. Please Check Your Mail."
def email06():
global email
email = "Email Send At 06PM. Please Check Your Mail."
schedule.every().day.at("00:00").do(lambda: send_mail(city_float, temp_float, hum_float, pre_float, wind_float, des_float))
schedule.every().day.at("18:00").do(lambda: send_mail(city_float, temp_float, hum_float, pre_float, wind_float, des_float))
schedule.every().day.at("00:00").do(email12)
schedule.every().day.at("18:00").do(email06)
def generate_sensor_data():
global temp, hum, pre
temp = random.randint(20, 30)
hum = random.randint(60, 90)
pre = random.randint(1000, 1120)
def predict(temp_float, hum_float, pre_float):
input = np.array([[temp_float, hum_float, pre_float]])
pred = model2.predict_classes(input)
suggestion = 0
if pred == [1]:
suggestion = "Most Probably Today Will Rain. So, Don't Miss Your Jacket."
if pred == [2]:
suggestion = "Most Probably Today Will Snow."
else:
suggestion = "I Cannot Predict Whether Rain or Snow."
return suggestion
def check_temp(temp_float, temp):
instuction = 0
if temp_float > temp:
instuction = "Outside Temperature Higher Than Inside."
else:
instuction = "Inside Temperature Higher Than Outside."
return instuction
try:
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_publish = on_publish
# Connect
mqttc.username_pw_set("mqtt_username", "mqtt_passwd") # Replace with mqtt username and passwd
mqttc.connect('AWS_E2C_IP_address', 1883, 60) # Replace your AWS E2C IP_address
# Continue the network loop, exit when an error occurs
while True :
global temp_float, hum_float, pre_float, wind_float, city_float, des_float
generate_sensor_data()
API_KEY = '30ad27b312182fa9f7569003a337536b'
# Replace your city name
city = 'Dambulla'
# getting api
url = f'http://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid={API_KEY}'
response = requests.get(url).json()
# If name of city is wrong spell or unknown
if response.get('cod') != 200:
message = response.get('message', '')
weather = {
'city': city,
'temperature': response['main']['temp'],
'humidity': response['main']['humidity'],
'pressure': response['main']['pressure'],
'wind': response['wind']['speed'],
'description': response['weather'][0]['description'],
'icon': response['weather'][0]['icon'],
}
temp_float = weather.get('temperature')
hum_float = weather.get('humidity')
pre_float = weather.get('pressure')
wind_float = weather.get('wind')
city_float = weather.get('city')
des_float = weather.get('description')
temp_int = round(temp_float)
# This api is showing forecast for five days with days/nights
url_forecast = f'http://api.openweathermap.org/data/2.5/forecast?q={city}&units=metric&appid={API_KEY}'
forecast_response = requests.get(url_forecast).json()
day = day_forecast()
night = night_forecast()
prediction = predict(temp_float, hum_float, pre_float)
instuction = check_temp(temp_float, temp)
# print(prediction)
sensor = {
"temp": temp,
"hum": hum,
"pre": pre
}
api = {
"temperature": temp_int,
"humidity": weather.get('humidity'),
"pressure": weather.get('pressure'),
"wind": weather.get('wind'),
"city" :weather.get('city'),
"description": weather.get('description'),
"icon": weather.get('icon'),
"prediction": prediction,
"instuction": instuction,
"email": email
}
forecast = {
"day": day,
"night": night
}
mqttc.publish("sensor", (json.dumps(sensor)))
mqttc.publish("api", (json.dumps(api)))
mqttc.publish("forecast", (json.dumps(forecast)))
print('published')
schedule.run_pending()
time.sleep(1)
except:
exit
| StarcoderdataPython |
3248005 | <gh_stars>0
"""Tests for runway.cfngin.actions.base."""
# pylint: disable=no-self-use,protected-access,unused-argument
# pyright: basic
import unittest
import botocore.exceptions
from botocore.stub import ANY, Stubber
from mock import MagicMock, PropertyMock, patch
from runway.cfngin.actions.base import BaseAction
from runway.cfngin.blueprints.base import Blueprint
from runway.cfngin.plan import Graph, Plan, Step
from runway.cfngin.providers.aws.default import Provider
from runway.cfngin.session_cache import get_session
from ..factories import MockProviderBuilder, mock_context
MOCK_VERSION = "01234abcdef"
class MockBlueprint(Blueprint):
"""Test blueprint."""
VARIABLES = {
"Param1": {"default": "default", "type": str},
}
@property
def version(self) -> str:
"""Return version."""
return MOCK_VERSION
def create_template(self) -> None:
"""Create template."""
class TestBaseAction(unittest.TestCase):
"""Tests for runway.cfngin.actions.base.BaseAction."""
def setUp(self) -> None:
"""Run before tests."""
self.region = "us-east-1"
self.session = get_session(self.region)
self.provider = Provider(self.session)
self.config_no_persist = {
"stacks": [
{"name": "stack1", "template_path": "."},
{"name": "stack2", "template_path": ".", "requires": ["stack1"]},
]
}
self.config_persist = {
"persistent_graph_key": "test.json",
"stacks": [
{"name": "stack1", "template_path": "."},
{"name": "stack2", "template_path": ".", "requires": ["stack1"]},
],
}
def test_ensure_cfn_bucket_exists(self) -> None:
"""Test ensure cfn bucket exists."""
session = get_session("us-east-1")
provider = Provider(session)
action = BaseAction(
context=mock_context("mynamespace"),
provider_builder=MockProviderBuilder(provider=provider),
)
stubber = Stubber(action.s3_conn)
stubber.add_response(
"head_bucket", service_response={}, expected_params={"Bucket": ANY}
)
with stubber:
action.ensure_cfn_bucket()
def test_ensure_cfn_bucket_does_not_exist_us_east(self) -> None:
"""Test ensure cfn bucket does not exist us east."""
session = get_session("us-east-1")
provider = Provider(session)
action = BaseAction(
context=mock_context("mynamespace"),
provider_builder=MockProviderBuilder(provider=provider),
)
stubber = Stubber(action.s3_conn)
stubber.add_client_error(
"head_bucket",
service_error_code="NoSuchBucket",
service_message="Not Found",
http_status_code=404,
)
stubber.add_response(
"create_bucket", service_response={}, expected_params={"Bucket": ANY}
)
with stubber:
action.ensure_cfn_bucket()
def test_ensure_cfn_bucket_does_not_exist_us_west(self) -> None:
"""Test ensure cfn bucket does not exist us west."""
session = get_session("us-west-1")
provider = Provider(session)
action = BaseAction(
context=mock_context("mynamespace"),
provider_builder=MockProviderBuilder(provider=provider, region="us-west-1"),
)
stubber = Stubber(action.s3_conn)
stubber.add_client_error(
"head_bucket",
service_error_code="NoSuchBucket",
service_message="Not Found",
http_status_code=404,
)
stubber.add_response(
"create_bucket",
service_response={},
expected_params={
"Bucket": ANY,
"CreateBucketConfiguration": {"LocationConstraint": "us-west-1"},
},
)
with stubber:
action.ensure_cfn_bucket()
def test_ensure_cfn_forbidden(self) -> None:
"""Test ensure cfn forbidden."""
session = get_session("us-west-1")
provider = Provider(session)
action = BaseAction(
context=mock_context("mynamespace"),
provider_builder=MockProviderBuilder(provider=provider),
)
stubber = Stubber(action.s3_conn)
stubber.add_client_error(
"head_bucket",
service_error_code="AccessDenied",
service_message="Forbidden",
http_status_code=403,
)
with stubber:
with self.assertRaises(botocore.exceptions.ClientError):
action.ensure_cfn_bucket()
@patch(
"runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock
)
@patch(
"runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock
)
def test_generate_plan_no_persist_exclude(
self, mock_stack_action: PropertyMock, mock_tags: PropertyMock
) -> None:
"""Test generate plan no persist exclude."""
mock_stack_action.return_value = MagicMock()
mock_tags.return_value = {}
context = mock_context(
namespace="test",
extra_config_args=self.config_no_persist,
region=self.region,
)
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(
provider=self.provider, region=self.region
),
)
plan = action._generate_plan(include_persistent_graph=False)
mock_tags.assert_not_called()
self.assertIsInstance(plan, Plan)
# order is different between python2/3 so can't compare dicts
result_graph_dict = plan.graph.to_dict()
self.assertEqual(2, len(result_graph_dict))
self.assertEqual(set(), result_graph_dict["stack1"])
self.assertEqual(set(["stack1"]), result_graph_dict["stack2"])
self.assertEqual(BaseAction.DESCRIPTION, plan.description)
self.assertTrue(plan.require_unlocked)
@patch(
"runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock
)
@patch(
"runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock
)
def test_generate_plan_no_persist_include(
self, mock_stack_action: PropertyMock, mock_tags: PropertyMock
) -> None:
"""Test generate plan no persist include."""
mock_stack_action.return_value = MagicMock()
mock_tags.return_value = {}
context = mock_context(
namespace="test",
extra_config_args=self.config_no_persist,
region=self.region,
)
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(
provider=self.provider, region=self.region
),
)
plan = action._generate_plan(include_persistent_graph=True)
mock_tags.assert_not_called()
self.assertIsInstance(plan, Plan)
# order is different between python2/3 so can't compare dicts
result_graph_dict = plan.graph.to_dict()
self.assertEqual(2, len(result_graph_dict))
self.assertEqual(set(), result_graph_dict["stack1"])
self.assertEqual(set(["stack1"]), result_graph_dict["stack2"])
self.assertEqual(BaseAction.DESCRIPTION, plan.description)
self.assertTrue(plan.require_unlocked)
@patch(
"runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock
)
@patch(
"runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock
)
def test_generate_plan_with_persist_exclude(
self, mock_stack_action: PropertyMock, mock_tags: PropertyMock
) -> None:
"""Test generate plan with persist exclude."""
mock_stack_action.return_value = MagicMock()
mock_tags.return_value = {}
context = mock_context(
namespace="test", extra_config_args=self.config_persist, region=self.region
)
persist_step = Step.from_stack_name("removed", context)
context._persistent_graph = Graph.from_steps([persist_step])
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(
provider=self.provider, region=self.region
),
)
plan = action._generate_plan(include_persistent_graph=False)
self.assertIsInstance(plan, Plan)
# order is different between python2/3 so can't compare dicts
result_graph_dict = plan.graph.to_dict()
self.assertEqual(2, len(result_graph_dict))
self.assertEqual(set(), result_graph_dict["stack1"])
self.assertEqual(set(["stack1"]), result_graph_dict["stack2"])
self.assertEqual(BaseAction.DESCRIPTION, plan.description)
self.assertTrue(plan.require_unlocked)
@patch(
"runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock
)
@patch(
"runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock
)
def test_generate_plan_with_persist_include(
self, mock_stack_action: PropertyMock, mock_tags: PropertyMock
) -> None:
"""Test generate plan with persist include."""
mock_stack_action.return_value = MagicMock()
mock_tags.return_value = {}
context = mock_context(
namespace="test", extra_config_args=self.config_persist, region=self.region
)
persist_step = Step.from_stack_name("removed", context)
context._persistent_graph = Graph.from_steps([persist_step])
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(
provider=self.provider, region=self.region
),
)
plan = action._generate_plan(include_persistent_graph=True)
self.assertIsInstance(plan, Plan)
mock_tags.assert_called_once()
# order is different between python2/3 so can't compare dicts
result_graph_dict = plan.graph.to_dict()
self.assertEqual(3, len(result_graph_dict))
self.assertEqual(set(), result_graph_dict["stack1"])
self.assertEqual(set(["stack1"]), result_graph_dict["stack2"])
self.assertEqual(set(), result_graph_dict["removed"])
self.assertEqual(BaseAction.DESCRIPTION, plan.description)
self.assertTrue(plan.require_unlocked)
@patch(
"runway.context.CfnginContext.persistent_graph_tags", new_callable=PropertyMock
)
@patch(
"runway.cfngin.actions.base.BaseAction._stack_action", new_callable=PropertyMock
)
def test_generate_plan_with_persist_no_lock_req(
self, mock_stack_action: PropertyMock, mock_tags: PropertyMock
) -> None:
"""Test generate plan with persist no lock req."""
mock_stack_action.return_value = MagicMock()
mock_tags.return_value = {}
context = mock_context(
namespace="test", extra_config_args=self.config_persist, region=self.region
)
persist_step = Step.from_stack_name("removed", context)
context._persistent_graph = Graph.from_steps([persist_step])
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(
provider=self.provider, region=self.region
),
)
plan = action._generate_plan(
include_persistent_graph=True, require_unlocked=False
)
self.assertIsInstance(plan, Plan)
mock_tags.assert_called_once()
# order is different between python2/3 so can't compare dicts
result_graph_dict = plan.graph.to_dict()
self.assertEqual(3, len(result_graph_dict))
self.assertEqual(set(), result_graph_dict["stack1"])
self.assertEqual(set(["stack1"]), result_graph_dict["stack2"])
self.assertEqual(set(), result_graph_dict["removed"])
self.assertEqual(BaseAction.DESCRIPTION, plan.description)
self.assertFalse(plan.require_unlocked)
def test_stack_template_url(self) -> None:
"""Test stack template url."""
context = mock_context("mynamespace")
blueprint = MockBlueprint(name="myblueprint", context=context)
region = "us-east-1"
endpoint = "https://example.com"
session = get_session(region)
provider = Provider(session)
action = BaseAction(
context=context,
provider_builder=MockProviderBuilder(provider=provider, region=region),
)
with patch(
"runway.cfngin.actions.base.get_s3_endpoint",
autospec=True,
return_value=endpoint,
):
self.assertEqual(
action.stack_template_url(blueprint),
"%s/%s/stack_templates/%s/%s-%s.json"
% (
endpoint,
"cfngin-mynamespace-us-east-1",
"mynamespace-myblueprint",
"myblueprint",
MOCK_VERSION,
),
)
| StarcoderdataPython |
1766771 | from django.db import models
class ArticleBody(models.Model):
id = models.AutoField(primary_key = True)
body = models.TextField()
readonly_fields = ('id',)
def __str__(self):
return "{0}".format(self.id) | StarcoderdataPython |
1799929 | <reponame>Qcaria/Kinda-Space-Invaders
import pygame
#Imágenes
shipimg = pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/ship.png")
shipimg2 = pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/ship2.png")
bluealien = pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/alienblue.png")
bluealien2 = pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/alienblue2.png")
aliengreen = pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/aliengreen.png")
aliengreen2= pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/aliengreen2.png")
alienred= pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/alienred.png")
alienred2= pygame.image.load("C:/Users/Carlos/PycharmProjects/SpaceInvaders/sprites/alienred2.png")
#Colores
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
#Variables
clock = 0 | StarcoderdataPython |
3290588 | <reponame>cobanov/demc-homework
import sqlalchemy
import csv
import mariadb
import sys
import pandas as pd
from sqlalchemy import create_engine
import pymysql
import numpy
basics_path = './datasets/title.basics.tsv'
akas_path = './datasets/title.akas.tsv'
crew_path = './datasets/title.crew.tsv'
episode_path = './datasets/title.episode.tsv'
principals_path = './datasets/title.principals.tsv'
ratings_path = './datasets/title.ratings.tsv'
basics_query = "INSERT INTO basics VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
akas_query = "INSERT INTO akas (first_name,last_name) VALUES (?, ?)"
crew_query = "INSERT INTO crew (first_name,last_name) VALUES (?, ?)"
episode_query = "INSERT INTO episode (first_name,last_name) VALUES (?, ?)"
principals_query = "INSERT INTO principals (first_name,last_name) VALUES (?, ?)"
ratings_query = "INSERT INTO ratings (first_name,last_name) VALUES (?, ?)"
datasets = {basics_path: basics_query,
akas_path: akas_query,
crew_path: crew_query,
episode_path: episode_query,
principals_path: principals_query,
ratings_path: ratings_query}
# for path, query in datasets.items():
# print(path, query)
def connect_db():
try:
conn = mariadb.connect(
user="root",
password="<PASSWORD>",
host="127.0.0.1",
port=3306,
database="imdb"
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
sys.exit(1)
return conn
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:55347314@127.0.0.1/imdb'
# Test if it works
# engine = sqlalchemy.create_engine(SQLALCHEMY_DATABASE_URI, echo=True)
# data = pd.read_csv(basics_path, delimiter='\t')
# print('data has been read')
# # data = data.replace(r'\N', numpy.nan)
# # print('data has been replaced')v
# conn = connect_db()
# print('connected')
# cur = conn.cursor()
# for x, y in data.iterrows():
# print(x, '/', data.shape[0])
# print(tuple(y))
# cur.execute(basics_query, tuple(y))
# conn.commit()
conn = connect_db()
print('connected')
cur = conn.cursor()
file = open(basics_path)
csv_data = csv.reader(file, delimiter='\t')
skipHeader = True
csv_data.__next__()
i=0
for row in csv_data:
i += 1
cur.execute(basics_query, tuple(row))
print(i )
conn.commit()
| StarcoderdataPython |
3350585 | <reponame>sebras/berkeman-ExpertmakerAccelerator<filename>dscmdhelper.py
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
# common functionality for ds* commands
from __future__ import division, print_function
import sys
from glob import glob
from os.path import join, exists, realpath
from functools import partial
from locale import resetlocale
from configfile import get_config
from jobid import WORKSPACES
from dataset import Dataset
from jobid import get_path, get_workspace_name
def init():
# initialize locale - for number formatting
resetlocale()
# find config files near script location, build WORKSPACES from them
rel = partial(join, sys.path[0])
for fn in glob(rel("*.conf")) + glob(rel("../*.conf")) + glob(rel("conf/*")) + glob(rel("../conf/*")):
if not fn.lower().endswith(".template"):
try:
cfg = get_config(fn, False)
except Exception:
continue
WORKSPACES.update({k: v[0] for k, v in cfg['workdir'].items()})
def name2ds(n):
if exists(n):
# it's a path - dig out parts, maybe update WORKSPACES
n = realpath(n)
if n.endswith("/dataset.pickle"):
n = n.rsplit("/", 1)[0]
if exists(join(n, "dataset.pickle")):
# includes ds name
base, jid, name = n.rsplit("/", 2)
n = (jid, name)
else:
# bare jid (no ds name)
base, jid = n.rsplit("/", 1)
n = jid
k = jid.rsplit("-", 1)[0]
if WORKSPACES.get(k, base) != base:
print("### Overriding workdir %s to %s" % (k, base,))
WORKSPACES[k] = base
ds = Dataset(n)
with open(join(get_path(ds.jobid), get_workspace_name(ds.jobid) + "-slices.conf")) as fh:
slices = int(fh.read())
import g
if hasattr(g, 'SLICES'):
assert g.SLICES == slices, "Dataset %s needs %d slices, by we are already using %d slices" % (ds, slices, g.SLICES)
else:
g.SLICES = slices
return ds
| StarcoderdataPython |
3366421 | <filename>pychemia/visual/povray.py
import numpy as np
from pychemia.utils.periodic import atomic_number, covalent_radius, cpk_colors
class StructurePovray:
def __init__(self, structure):
self.structure = structure
self.distance = 10
def create_pov(self):
ret = """
#version 3.7;
#include "colors.inc" // The include files contain
#include "stones.inc" // pre-defined scene elements
#include "glass.inc"
background{rgb 0}
"""
if self.structure.is_crystal:
self.distance = max(self.structure.lattice.lengths)
else:
self.distance = 10
ret += "#declare r=%7.3f;\n #declare s=%7.3f;" % (self.distance, self.distance)
ret += "camera {\n"
ret += "\tlocation <%7.3f, %7.3f, %7.3f>\n" % (1.3 * self.distance, 1.3 * self.distance, -1.3 * self.distance)
ret += "\tlook_at <%7.3f, %7.3f, %7.3f>\n" % tuple(0.5 * sum(self.structure.cell[:]))
ret += "}\n\n"
if self.structure.nsites > 0:
d = self.distance
ret += "light_source { <%7.3f, %7.3f, %7.3f> color White}\n" % (2 * d, 2 * d, 2 * d)
for imagx in np.arange(-1, 2):
for imagy in np.arange(-1, 2):
for imagz in np.arange(-1, 2):
for site in self.structure:
for symbol in site.symbols:
cell = self.structure.cell
x = site.position[0] - imagx * cell[0, 0] - imagy * cell[1, 0] - imagz * cell[2, 0]
y = site.position[1] - imagx * cell[0, 1] - imagy * cell[1, 1] - imagz * cell[2, 1]
z = site.position[2] - imagx * cell[0, 2] - imagy * cell[1, 2] - imagz * cell[2, 2]
if (x - self.distance) ** 2 + (y - self.distance) ** 2 + (z + self.distance) ** 2 < 2:
continue
cr = 0.5 * covalent_radius(symbol)
rgb = cpk_colors[atomic_number(symbol)]
color = 'rgb < %7.3f, %7.3f, %7.3f>' % (rgb[0], rgb[1], rgb[2])
ret += "sphere {\n"
ret += "\t<%7.3f, %7.3f, %7.3f>, %7.3f\n\ttexture {\n" % (x, y, z, cr)
ret += "\t\tpigment { color %s filter 0.4 transmit %7.3f}\n" % \
(color, 1 - 0.9 * np.exp(-0.1 * (abs(imagx) + abs(imagy) + abs(imagz))))
ret += "\t\tnormal { bumps 0.8 scale 0.1 }\n\t\tfinish { phong %7.3f }\n\t}\n}\n\n" % \
np.exp(-0.1 * (abs(imagx) + abs(imagy) + abs(imagz)))
if self.structure.nsites <= 0:
ret += "light_source { <%7.3f, %7.3f, %7.3f> color White}\n" % (x, y, z)
ret += """union{
#include "cell.pov"
scale 1
rotate <0, 0, 0>
pigment{rgb <0.3,0.3,0.9>} finish{phong 0.9 ambient 0.42 reflection 0.1}
}
"""
return ret
def write_povray(self, filename):
wf = open(filename, 'w')
wf.write(self.create_pov())
wf.close()
if self.structure.is_crystal:
self.write_cell('cell.pov')
def write_cell(self, filename):
wf = open(filename, 'w')
ret = ''
for i in range(3):
for j in range(3):
ret += "cylinder { "
if i == j:
ret += " <%7.3f, %7.3f, %7.3f>, " % (0.0, 0.0, 0.0)
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[j])
else:
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[i])
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[i] + self.structure.cell[j])
ret += " %7.3f }\n" % (self.distance / 100.0)
ret += "cylinder { "
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(sum(self.structure.cell[:]))
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(sum(self.structure.cell[:]) - self.structure.cell[i])
ret += " %7.3f }\n" % (self.distance / 100.0)
wf.write(ret)
wf.close()
# ret += "\topen // Remove end caps\n"
# #ret += "\ttexture { %s }\n" % ('T_Stone25 scale 4')
# ret += "\ttexture { %s }\n" % ('pigment { Col_Glass_Old }')
| StarcoderdataPython |
1700986 | <reponame>sjdhaneesh10/mezan<filename>mezan/member/serializer.py
from rest_framework import serializers
from .models import Family
class FamilySerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = Family
#exclude = ['patient_image','patient_address']
fields = '__all__'
'''
(
'id', 'patient_name','patient_dob'
)
'''
datatables_always_serialize = ('id', ) | StarcoderdataPython |
3219718 | import datetime
import time
class EarthquakeUSGS:
"""
@brief Class that holds earthquake data records.
Class that hold earthquake data, for use with USGIS retrieved quake data.
BRIDGES uses scripts to
continually monitor USGIS site (tweets) and retrieve the latest
quake data for use in student projects.
This object is generally not created by the user, to see how its created check
out bridges::data_src_dependent::data_source::get_earthquake_usgs_data()
@sa For an example, check out https://bridgesuncc.github.io/tutorials/Data_EQ_USGS.html
@author <NAME>, <NAME>,
@date 2/18/18, 12/29/20, 1/6/21
"""
def __set_time_from_unix_timestamp(self, tm):
epoch_time = int(tm)
eq_time = epoch_time / 1000
eqt = time.gmtime(eq_time)
self._time = time.strftime("%Y-%m-%d %H:%M:%S", eqt)
def __init__(self, magnitude=None, longit=None, latit=None, location=None,
title=None, url=None, time=None):
"""
@brief constructor
Args:
magnitude: magnitude of quake
latit: latitude position
longit: longitude position
location: location of quake
title: title (has some of eq info in a string)
url: url for more information
time: occurrence time of quake
"""
self._time = int()
if magnitude is not None:
self._magnitude = magnitude
else:
self._magnitude = 0.0
if longit is not None:
self._longit = longit
else:
self._longit = 0.0
if latit is not None:
self._latit = latit
else:
self._latit = 0.0
if location is not None:
self._location = location
else:
self._location = ""
if title is not None:
self._title = title
else:
self._title = ""
if url is not None:
self._url = url
else:
self._url = ""
if time is not None:
self.time = time
@property
def time(self):
"""
@brief Get occurrence time (epoch) of quake
Returns:
Quake occurrence time
"""
return self._time
@time.setter
def time(self, tm) -> None:
"""
@brief Set occurrence time (epoch) of quake
Args:
tm: Quake occurrence time to set
"""
self.__set_time_from_unix_timestamp(tm)
@property
def latit(self) -> float:
"""
@brief Get latitude of quake
Returns:
Quake latitude
"""
return self._latit
@latit.setter
def latit(self, latit: float) -> None:
"""
@brief Set latitude of quake
Args:
latit: quake latitude to set
"""
self._latit = latit
@property
def longit(self) -> float:
"""
@brief Get longitude of quake
Returns:
Quake longitude
"""
return self._longit
@longit.setter
def longit(self, longit: float) -> None:
"""
@brief Set longitude of quake
Args:
longit: quake longitude to set
"""
self._longit = longit
@property
def location(self) -> str:
"""
@brief Get location of quake (typically a city or something of the sort)
Returns:
Quake location
"""
return self._location
@location.setter
def location(self, location: str):
"""
@brief Set location of quake
Args:
location: quake location to set
"""
self._location = location
@property
def title(self) -> str:
"""
@brief Get quake title
Returns:
Quake title
"""
return self._title
@title.setter
def title(self, title: str):
"""
@brief Set title of quake
Args:
title: quake title to set
"""
self._title = title
@property
def url(self) -> str:
"""
@brief Get quake url
Returns:
Quake url
"""
return self._url
@url.setter
def url(self, url: str):
"""
@brief Set url of quake
Args:
url: quake url to set
"""
self._url = url
@property
def magnitude(self) -> float:
"""
@brief Get quake magnitude (Richter scale)
Returns:
Quake magnitude
"""
return self._magnitude
@magnitude.setter
def magnitude(self, magn: float):
"""
Setter for the magnitude of the quake
Args:
magn: magnitude to set
"""
self._magnitude = magnitude
| StarcoderdataPython |
1689336 | <filename>datasets/datasets.py
# coding=utf-8
import sys
import csv
import numpy as np
import cv2
class Data():
def __init__(self):
pass
def get_unit(self):
pass
class Dataset():
def __init__(self, fn=None):
self.X = np.array([])
self.Y = np.array([])
def get_data(self):
pass
class TrainData(Data):
def __init__(self, fn):
super(TrainData, self).__init__()
self.fn = fn
def get_unit(self):
super(TrainData, self).get_unit()
return cv2.imread(self.fn)
class TrainDataset(Dataset):
def __init__(self, fn=None):
super(TrainDataset, self).__init__()
self.fns = np.array([])
self.fn = fn
self.pic_base_dir = "./pic/"
if fn is not None:
self.reader = csv.reader(open(self.fn, "r"))
def setPicBaseDir(self, dir):
self.pic_base_dir = dir
def get_data(self):
super(TrainDataset, self).get_data()
for item in self.reader:
t_data = TrainData(self.pic_base_dir+item[0])
#print("read uri:%s" % str(self.pic_base_dir+item[0]))
self.fns = np.append(self.fns, item[0])
self.Y = np.append(self.Y, item[1]*1)
img = t_data.get_unit()
if self.reader.line_num == 1:
self.X = img[np.newaxis, :, :, :]
else:
self.X = np.concatenate(
[self.X, img[np.newaxis, :, :, :]], axis=0)
if __name__ == "__main__":
ds = TrainDataset("../data/train.csv")
ds.setPicBaseDir("../pic/")
ds.get_data()
print("X shape is :%s" % str(ds.X.shape))
print("Y shape is :%s" % str(ds.Y.shape))
print(str(ds.Y))
print(str(ds.fns))
for item in ds.X:
print(str(item[0:10, 100, 1]))
print("************************************")
| StarcoderdataPython |
163057 | n1=int(input('Digite a idade da primeira pessoa:'))
n2=int(input('Digite a idade da segunda pessoa:'))
n3=int(input('Digite a idade da terceira pessoa:'))
#Maior ou igual a 100
#menor que 100
soma = n1+ n2+ n3
if soma > 100 or soma == 100:
print('Maior ou igual a 100')
else:
print('Menor que 100')
| StarcoderdataPython |
1716266 | <reponame>love3forever/hotroom-api
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017/12/7
# @Author : wangmengcn
# @Email : <EMAIL>
import socket
import re
from time import time, sleep
from datetime import datetime
from threading import Thread
from json import dumps
from . import r
class DouyuDM:
HOST = 'openbarrage.douyutv.com'
PORT = 8601
def __init__(self, room_id):
self.room_id = room_id
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.is_connected = False
self.is_terminated = False
# 登陆和保活消息
self.LOGIN_INFO = "type@=loginreq/username@=ABABA/password@=<PASSWORD>/roomid@={}/".format(room_id)
self.JION_GROUP = "type@=joingroup/rid@={}/gid@=-9999/".format(room_id)
self.KEEP_ALIVE = "type@=keeplive/tick@={}/"
# 消息体解析
self.msg_types = ['@=chatmsg', '@=onlinegift', '@=dgb',
'@=uenter', '@=bc_buy_deserve', '@=ssd',
'@=spbc', '@=ggbb']
self.convert_function_map = {
'@=chatmsg': self._convert_chatmsg,
'@=onlinegift': self._convert_onlinegift,
'@=dgb': self._convert_dgb,
'@=uenter': self._convert_uenter,
'@=bc_buy_deserve': self._convert_bc_buy_deserve,
'@=ssd': self._convert_ssd,
'@=spbc': self._convert_spbc,
'@=ggbb': self._convert_ggbb
}
@staticmethod
def transform_msg(content):
# 发送消息前转换消息为目标结构
length = bytearray([len(content) + 9, 0x00, 0x00, 0x00])
code = length
magic = bytearray([0xb1, 0x02, 0x00, 0x00])
end = bytearray([0x00])
trscont = bytes(content.encode('utf-8'))
return bytes(length + code + magic + trscont + end)
def connect_to_server(self):
# 链接到弹幕服务器
try:
self.socket.connect((self.HOST, self.PORT))
except socket.error as e:
print(str(e))
else:
self.is_connected = True
print('connected to danmu server')
def print_danmu(self):
# 打印弹幕信息
msgs = self.send_and_get_msg()
for msg in msgs:
self._convert_danmu(msg)
def publish_danmu(self):
# 将弹幕消息转发到redis channel中广播
danmu_channel = 'channel:{}'.format(self.room_id)
msgs = self.send_and_get_msg()
for msg in msgs:
danmu_info = dumps(self._convert_danmu(msg))
if danmu_info:
r.publish(danmu_channel, danmu_info)
def terminate(self):
# 终止弹幕获取
self.is_terminated = True
def send_and_get_msg(self):
# 接受弹幕消息
if self.is_connected:
# 发送登陆信息并加入指定弹幕频道
self.socket.sendall(self.transform_msg(self.LOGIN_INFO))
self.socket.sendall(self.transform_msg(self.JION_GROUP))
keep_aliver = self._keep_connect_alive()
next(keep_aliver)
loop_begin = datetime.now()
while not self.is_terminated:
now = time()
sleep(0.1)
# 更新keepalive
running_time = datetime.now()
if (running_time - loop_begin).seconds > 40:
keep_alive_info = self.KEEP_ALIVE.format(now)
keep_alive_info = self.transform_msg(keep_alive_info)
self.is_terminated = keep_aliver.send(keep_alive_info)
loop_begin = datetime.now()
try:
danmu_msg = self.socket.recv(1000)
except socket.error as e:
print(str(e))
else:
yield danmu_msg
def _keep_connect_alive(self):
# 保活socket链接
while self.is_connected:
keep_alive_info = yield False
if keep_alive_info:
try:
self.socket.sendall(keep_alive_info)
except socket.error as e:
print('error in keepalive:' + str(e))
print('*' * 10 + 'keepalive' + '*' * 10)
sleep(1)
def _convert_danmu(self, danmu_msg):
# 根据消息类型,将消息解析转发到对应方法
for flag in self.msg_types:
if flag in danmu_msg:
return self.convert_function_map.get(flag)(danmu_msg)
def _convert_chatmsg(self, chat_msg):
# 转换普通聊天信息
chat_dict = dict()
user_name = re.search("\/nn@=(.+?)\/", chat_msg)
if user_name:
chat_dict.setdefault('username', user_name.group(1))
chat_content = re.search("\/txt@=(.+?)\/", chat_msg)
if chat_content:
chat_dict.setdefault('chatcontent', chat_content.group(1))
user_level = re.search("\/level@=(.+?)\/", chat_msg)
if user_level:
chat_dict.setdefault('userlevel', user_level.group(1))
chat_date = datetime.now()
chat_dict.setdefault('date', chat_date.isoformat(' '))
for k, v in chat_dict.items():
print('{} >>> {}:{}'.format(self.room_id, k, v))
return chat_dict
def _convert_onlinegift(self, onlinegift):
# 转换在线礼物信息
onlinegift_dict = dict()
username = re.search("\/nn@=(.+?)\/", onlinegift)
if username:
onlinegift_dict.setdefault('username', username.group(1))
sil = re.search("\/sil@=(.+?)\/", onlinegift)
if sil:
onlinegift_dict.setdefault('sil', sil.group(1))
print('{} >>user:{} 获得鱼丸{}个'.format(self.room_id, username, sil))
return onlinegift_dict
def _convert_dgb(self, dgb):
# 转换赠送礼物信息
dgb_dict = dict()
username = re.search("\/nn@=(.+?)\/", dgb)
if username:
dgb_dict.setdefault('username', username.group(1))
hits = re.search("\/hits@=(.+?)\/", dgb)
if hits:
dgb_dict.setdefault('hits', hits.group(1))
gift_type = re.search("\/gs@=(.+?)\/", dgb)
if gift_type:
dgb_dict.setdefault('gift_type', gift_type.group(1))
print('{} >>> {}送出{}{}连击'.format(self.room_id, dgb_dict.get('username', None),
dgb_dict.get('gift_type', None),
dgb_dict.get('hits', None)))
return dgb_dict
def _convert_uenter(self, uenter):
# 转换用户进入直播间信息
uenter_dict = dict()
username = re.search("\/nn@=(.+?)\/", uenter)
if username:
uenter_dict.setdefault('username', username.group(1))
print('{} >>>欢迎:{} 进入直播间'.format(self.room_id, uenter_dict.setdefault('username', None)))
return uenter_dict
def _convert_bc_buy_deserve(self, bc_buy_deserve):
# 转换酬勤赠送信息
return None
def _convert_ssd(self, ssd):
# 转换超级弹幕信息
return None
def _convert_spbc(self, spbc):
# 转换房间内赠送礼物信息
spbc_dict = dict()
sender_name = re.search("\/sn@=(.+?)\/", spbc)
if sender_name:
spbc_dict.setdefault('sender_name', sender_name.group(1))
reciver_name = re.search("\/dn@=(.+?)\/", spbc)
if reciver_name:
spbc_dict.setdefault('reciver_name', reciver_name.group(1))
gift_num = re.search("\/gc@=(.+?)\/", spbc)
if gift_num:
spbc_dict.setdefault('gift_num', gift_num.group(1))
gift_name = re.search("\/gn@=(.+?)\/", spbc)
if gift_name:
spbc_dict.setdefault('gift_name', gift_name.group(1))
print('{} >>> {}赠送给{} {}个{}'.format(self.room_id, spbc_dict.get('sender_name', None),
spbc_dict.get('reciver_name', None),
spbc_dict.get('gift_num', None),
spbc_dict.get('gift_name', None)))
return spbc_dict
def _convert_ggbb(self, ggbb):
# 转换房间用户抢红包信息
ggbb_dict = dict()
username = re.search("\/dnk@=(.+?)\/", ggbb)
if username:
ggbb_dict.setdefault('username', username.group(1))
sender_name = re.search("\/snk@=(.+?)\/", ggbb)
if sender_name:
ggbb_dict.setdefault('sender_name', sender_name.group(1))
gift_num = re.search("\/sl@=(.+?)\/", ggbb)
if gift_num:
ggbb_dict.setdefault('gift_num', gift_num.group(1))
gift_type = re.search("\/rpt@=(.+?)\/", ggbb)
if gift_type:
ggbb_dict.setdefault('gift_type', gift_type.group(1))
print('{} >>> {} 获得了来自{}的{}个{}'.format(self.room_id, ggbb_dict.get('username', None),
ggbb_dict.get('sender_name', None),
ggbb_dict.get('gift_num', None),
ggbb_dict.get('gift_type', None)))
return ggbb_dict
if __name__ == '__main__':
rooms = ['67373', '71017']
danmu_client = []
for room in rooms:
danmu = DouyuDM(room)
danmu_client.append(danmu)
danmu.connect_to_server()
danmu_thread = Thread(target=danmu.print_danmu)
danmu_thread.start()
| StarcoderdataPython |
38129 | <gh_stars>0
# Downloaded from Google Sheets, edited out the first line, and then...
# reader = csv.DictReader(open('from-sheets.csv', newline=''))
import csv
import toml
reader = csv.reader(open('from-sheets.csv', newline=''))
rows = [row for row in reader]
final_bold = {}
final_regular = {}
for (i, row) in enumerate(rows):
if i == 0:
continue
# Columns:
# 0: glyph_id
# 1: Bold, from PDF
# 2: Regular, from PDF
# 3: Bold, from S
# 4: Regular, from S
# 5 and 6: Images
# 7: Bold, from U
# 8: Regular, from U
glyph_id = row[0]
# Order of preference: Bold 1 > 7 > 3, and Regular; 2 > 8 > 4
def get_final(cols):
if cols[0]:
if cols[1] and cols[2]:
assert cols[0] == cols[1] == cols[2], (glyph_id, cols[0], cols[1], cols[2])
return cols[0]
if not cols[1] and not cols[2]:
return cols[0]
if cols[1] and not cols[2]:
assert cols[0] == cols[1]
return cols[0]
if not cols[1] and cols[2]:
assert cols[0] == cols[2]
return cols[0]
else:
if cols[1] and cols[2]:
if cols[1] == 'ों' and cols[2] == 'र्<CCprec>े':
return cols[1]
assert cols[1] == cols[2], (cols[1], cols[2])
return cols[1]
if cols[1] and not cols[2]:
return cols[1]
if not cols[1] and cols[2]:
return cols[2]
if not cols[1] and not cols[2]:
return None
final_bold[glyph_id] = get_final([row[1], row[7], row[3]])
final_regular[glyph_id] = get_final([row[2], row[8], row[4]])
# Manually add these
final_bold['0003'] = final_regular['0003'] = ' '
final_bold['0262'] = final_regular['025E'] = ''
toml.dump(final_bold, open('from-csv-bold.toml', 'w'))
toml.dump(final_regular, open('from-csv-regular.toml', 'w'))
| StarcoderdataPython |
1699322 | <gh_stars>0
from __future__ import print_function
from __future__ import division
from functools import partial
import tensorflow as tf
from layers import Attention, FeedForward, Encoder, Softmax
from tensorflow.contrib.seq2seq import sequence_loss
import nmt.all_constants as ac
import nmt.utils as ut
class Model(object):
def __init__(self, config, mode):
super(Model, self).__init__()
self.logger = ut.get_logger(config['log_file'])
ENC_SCOPE = 'encoder'
DEC_SCOPE = 'decoder'
ATT_SCOPE = 'attention'
OUT_SCOPE = 'outputer'
SFM_SCOPE = 'softmax'
batch_size = config['batch_size']
feed_input = config['feed_input']
grad_clip = config['grad_clip']
beam_size = config['beam_size']
beam_alpha = config['beam_alpha']
num_layers = config['num_layers']
rnn_type = config['rnn_type']
score_func_type = config['score_func_type']
src_vocab_size = config['src_vocab_size']
trg_vocab_size = config['trg_vocab_size']
src_embed_size = config['src_embed_size']
trg_embed_size = config['trg_embed_size']
enc_rnn_size = config['enc_rnn_size']
dec_rnn_size = config['dec_rnn_size']
input_keep_prob = config['input_keep_prob']
output_keep_prob = config['output_keep_prob']
attention_maps = {
ac.SCORE_FUNC_DOT: Attention.DOT,
ac.SCORE_FUNC_GEN: Attention.GEN,
ac.SCORE_FUNC_BAH: Attention.BAH
}
score_func_type = attention_maps[score_func_type]
if mode != ac.TRAINING:
batch_size = 1
input_keep_prob = 1.0
output_keep_prob = 1.0
# Placeholder
self.src_inputs = tf.placeholder(tf.int32, [batch_size, None])
self.src_seq_lengths = tf.placeholder(tf.int32, [batch_size])
self.trg_inputs = tf.placeholder(tf.int32, [batch_size, None])
self.trg_targets = tf.placeholder(tf.int32, [batch_size, None])
self.target_weights = tf.placeholder(tf.float32, [batch_size, None])
# First, define the src/trg embeddings
with tf.variable_scope(ENC_SCOPE):
self.src_embedding = tf.get_variable('embedding',
shape=[src_vocab_size, src_embed_size],
dtype=tf.float32)
with tf.variable_scope(DEC_SCOPE):
self.trg_embedding = tf.get_variable('embedding',
shape=[trg_vocab_size, trg_embed_size],
dtype=tf.float32)
# Then select the RNN cell, reuse if not in TRAINING mode
if rnn_type != ac.LSTM:
raise NotImplementedError
reuse = mode != ac.TRAINING # if dev/test, reuse cell
encoder_cell = ut.get_lstm_cell(ENC_SCOPE, num_layers, enc_rnn_size, output_keep_prob=output_keep_prob, seed=ac.SEED, reuse=reuse)
att_state_size = dec_rnn_size
decoder_cell = ut.get_lstm_cell(DEC_SCOPE, num_layers, dec_rnn_size, output_keep_prob=output_keep_prob, seed=ac.SEED, reuse=reuse)
# The model
encoder = Encoder(encoder_cell, ENC_SCOPE)
decoder = Encoder(decoder_cell, DEC_SCOPE)
outputer = FeedForward(enc_rnn_size + dec_rnn_size, att_state_size, OUT_SCOPE, activate_func=tf.tanh)
self.softmax = softmax = Softmax(att_state_size, trg_vocab_size, SFM_SCOPE)
# Encode source sentence
encoder_inputs = tf.nn.embedding_lookup(self.src_embedding, self.src_inputs)
encoder_inputs = tf.nn.dropout(encoder_inputs, input_keep_prob, seed=ac.SEED)
encoder_outputs, last_state = encoder.encode(encoder_inputs,
sequence_length=self.src_seq_lengths,
initial_state=None)
# Define an attention layer over encoder outputs
attention = Attention(ATT_SCOPE, score_func_type, encoder_outputs, enc_rnn_size, dec_rnn_size, common_dim=enc_rnn_size if score_func_type==Attention.BAH else None)
# This function takes an decoder's output, make it attend to encoder's outputs and
# spit out the attentional state which is used for predicting next target word
def decoder_output_func(h_t):
alignments, c_t = attention.calc_context(self.src_seq_lengths, h_t)
c_t_h_t = tf.concat([c_t, h_t], 1)
output = outputer.transform(c_t_h_t)
return output, alignments
# Fit everything in the decoder & start decoding
decoder_inputs = tf.nn.embedding_lookup(self.trg_embedding, self.trg_inputs)
decoder_inputs = tf.nn.dropout(decoder_inputs, input_keep_prob, seed=ac.SEED)
attentional_outputs = decoder.decode(decoder_inputs,
decoder_output_func, att_state_size,
feed_input=feed_input, initial_state=last_state,
reuse=False)
attentional_outputs = tf.reshape(attentional_outputs, [-1, att_state_size])
# Loss
logits = softmax.calc_logits(attentional_outputs)
logits = tf.reshape(logits, [batch_size, -1, trg_vocab_size])
loss = sequence_loss(logits,
self.trg_targets,
self.target_weights,
average_across_timesteps=False,
average_across_batch=False)
if mode != ac.TRAINING:
self.loss = tf.stop_gradient(tf.reduce_sum(loss))
max_output_length = 3 * self.src_seq_lengths[0]
tensor_to_state = partial(ut.tensor_to_lstm_state, num_layers=config['num_layers'])
beam_outputs = decoder.beam_decode(self.trg_embedding, ac.BOS_ID, ac.EOS_ID,
decoder_output_func, att_state_size,
softmax.calc_logprobs, trg_vocab_size,
max_output_length, tensor_to_state,
alpha=beam_alpha, beam_size=beam_size, feed_input=feed_input,
initial_state=last_state, reuse=True)
self.probs, self.scores, self.symbols, self.parents, self.alignments = beam_outputs
# If in training, do the grad backpropagate
if mode == ac.TRAINING:
self.loss = tf.reduce_sum(loss)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), grad_clip)
self.lr = tf.Variable(1.0, trainable=False, name='lr')
if config['optimizer'] == ac.ADADELTA:
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=self.lr, rho=0.95, epsilon=1e-6)
else:
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
# Finally, log out some model's stats
if mode == ac.TRAINING:
def num_params(var):
shape = var.get_shape().as_list()
var_count = 1
for dim in shape:
var_count = var_count * dim
return var_count
self.logger.info('{} model:'.format('train' if mode == ac.TRAINING else 'dev/test'))
self.logger.info('Num trainable variables {}'.format(len(tvars)))
self.logger.info('Num params: {:,}'.format(sum([num_params(v) for v in tvars])))
self.logger.info('List of all trainable parameters:')
for v in tvars:
self.logger.info(' {}'.format(v.name))
| StarcoderdataPython |
3352730 | <filename>tests/integration_tests/data_steward/cdr_cleaner/cleaning_rules/remove_ehr_data_without_consent_test.py
"""
Integration test for remove_ehr_data_without_consent module
Original Issues: DC-1644
The intent is to remove all ehr data for unconsented participants for EHR.
"""
# Python Imports
import os
from dateutil import parser
# Project Imports
from common import PERSON, VISIT_OCCURRENCE, OBSERVATION
from common import JINJA_ENV
from app_identity import PROJECT_ID
from cdr_cleaner.cleaning_rules.remove_ehr_data_without_consent import (
RemoveEhrDataWithoutConsent, EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE)
from tests.integration_tests.data_steward.cdr_cleaner.cleaning_rules.bigquery_tests_base import \
BaseTest
PERSON_DATA_TEMPLATE = JINJA_ENV.from_string("""
insert into `{{project_id}}.{{dataset_id}}.person` (person_id, gender_concept_id, year_of_birth, race_concept_id, ethnicity_concept_id)
VALUES (1, 0, 0, 0, 0),
(2, 0, 0, 0, 0)
""")
VISIT_OCCURRENCE_DATA_TEMPLATE = JINJA_ENV.from_string("""
insert into `{{project_id}}.{{dataset_id}}.visit_occurrence`
(visit_occurrence_id,
person_id,
visit_concept_id,
visit_start_date,
visit_end_date,
visit_type_concept_id)
VALUES (1, 1, 0, '2020-01-01', '2020-01-01', 0),
(2, 1, 0, '2020-01-01', '2020-01-01', 0),
(3, 1, 0, '2020-01-01', '2020-01-01', 0),
(4, 2, 0, '2020-01-01', '2020-01-01', 0),
(5, 2, 0, '2020-01-01', '2020-01-01', 0),
(6, 2, 0, '2020-01-01', '2020-01-01', 0)
""")
MAPPING_VISIT_OCCURRENCE_TEMPLATE = JINJA_ENV.from_string("""
insert into `{{project_id}}.{{dataset_id}}._mapping_visit_occurrence`
(visit_occurrence_id, src_dataset_id)
VALUES (1, 'rdr2021'),
(2, 'rdr2021'),
(3, 'unioned_ehr'),
(4, 'unioned_ehr'),
(5, 'unioned_ehr'),
(6, 'rdr2021')
""")
OBSERVATION_DATA_TEMPLATE = JINJA_ENV.from_string("""
insert into `{{project_id}}.{{dataset_id}}.observation`
(observation_id,
person_id,
observation_concept_id,
observation_date,
observation_datetime,
observation_type_concept_id,
value_source_concept_id,
observation_source_value )
VALUES(1, 1, 0, '2020-01-01', '2020-01-01 00:00:00 UTC', 0, 1586100, 'EHRConsentPII_ConsentPermission'),
(2, 1, 0, '2021-01-02', '2021-01-02 00:00:00 UTC', 0, 1586100, 'EHRConsentPII_ConsentPermission'),
(3, 1, 0, '2020-05-01', '2020-05-01 00:00:00 UTC', 0, 123, 'test_value_0'),
(4, 2, 0, '2020-03-01', '2020-03-01 00:00:00 UTC', 0, 234, 'test_value_1'),
(5, 2, 0, '2020-01-05', '2020-01-05 00:00:00 UTC', 0, 345, 'test_value_2'),
(6, 2, 0, '2020-05-05', '2020-05-05 00:00:00 UTC', 0, 456, 'test_value_3')
""")
MAPPING_OBSERVATION_TEMPLATE = JINJA_ENV.from_string("""
insert into `{{project_id}}.{{dataset_id}}._mapping_observation`
(observation_id, src_dataset_id)
VALUES(1, 'rdr2021'),
(2, 'rdr2021'),
(3, 'unioned_ehr'),
(4, 'unioned_ehr'),
(5, 'unioned_ehr'),
(6, 'rdr2021')
""")
class NoDataAfterDeathTest(BaseTest.CleaningRulesTestBase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
super().initialize_class_vars()
# Set the test project identifier
cls.project_id = os.environ.get(PROJECT_ID)
# Set the expected test datasets
cls.dataset_id = os.environ.get('COMBINED_DATASET_ID')
cls.sandbox_id = cls.dataset_id + '_sandbox'
cls.rule_instance = RemoveEhrDataWithoutConsent(cls.project_id,
cls.dataset_id,
cls.sandbox_id)
# Generates list of fully qualified table names and their corresponding sandbox table names
cls.fq_table_names.extend([
f'{cls.project_id}.{cls.dataset_id}.{OBSERVATION}',
f'{cls.project_id}.{cls.dataset_id}.{PERSON}',
f'{cls.project_id}.{cls.dataset_id}.{VISIT_OCCURRENCE}',
f'{cls.project_id}.{cls.dataset_id}._mapping_{OBSERVATION}',
f'{cls.project_id}.{cls.dataset_id}._mapping_{VISIT_OCCURRENCE}',
])
cls.fq_sandbox_table_names.extend([
f'{cls.project_id}.{cls.sandbox_id}.{cls.rule_instance.issue_numbers[0].lower()}_{OBSERVATION}',
f'{cls.project_id}.{cls.sandbox_id}.{cls.rule_instance.issue_numbers[0].lower()}_{VISIT_OCCURRENCE}',
f'{cls.project_id}.{cls.sandbox_id}.{EHR_UNCONSENTED_PARTICIPANTS_LOOKUP_TABLE}'
])
# call super to set up the client, create datasets
cls.up_class = super().setUpClass()
def setUp(self):
"""
Create empty tables for the rule to run on
"""
# Create the person, observation, _mapping_observation, visit_occurrence, _mapping_visit_occurrence
# tables required for the test
super().setUp()
person_data_query = PERSON_DATA_TEMPLATE.render(
project_id=self.project_id, dataset_id=self.dataset_id)
visit_occurrence_data_query = VISIT_OCCURRENCE_DATA_TEMPLATE.render(
project_id=self.project_id, dataset_id=self.dataset_id)
observation_data_query = OBSERVATION_DATA_TEMPLATE.render(
project_id=self.project_id, dataset_id=self.dataset_id)
mapping_observation_query = MAPPING_OBSERVATION_TEMPLATE.render(
project_id=self.project_id, dataset_id=self.dataset_id)
mapping_visit_query = MAPPING_VISIT_OCCURRENCE_TEMPLATE.render(
project_id=self.project_id, dataset_id=self.dataset_id)
# Load test data
self.load_test_data([
person_data_query, visit_occurrence_data_query,
observation_data_query, mapping_observation_query,
mapping_visit_query
])
def test_remove_ehr_data_without_consent(self):
"""
1. person_id=1, has valid consent status
2. person_id=2, does not have valid consent record
"""
# Expected results list
tables_and_counts = [{
'fq_table_name':
f'{self.project_id}.{self.dataset_id}.{VISIT_OCCURRENCE}',
'fq_sandbox_table_name':
f'{self.project_id}.{self.sandbox_id}.{self.rule_instance.sandbox_table_for(VISIT_OCCURRENCE)}',
'loaded_ids': [1, 2, 3, 4, 5, 6],
'sandboxed_ids': [4, 5],
'fields': ['visit_occurrence_id', 'person_id'],
'cleaned_values': [(1, 1), (2, 1), (3, 1), (6, 2)]
}, {
'fq_table_name':
f'{self.project_id}.{self.dataset_id}.{OBSERVATION}',
'fq_sandbox_table_name':
f'{self.project_id}.{self.sandbox_id}.{self.rule_instance.sandbox_table_for(OBSERVATION)}',
'loaded_ids': [1, 2, 3, 4, 5, 6],
'sandboxed_ids': [4, 5],
'fields': [
'observation_id', 'person_id', 'value_source_concept_id',
'observation_source_value'
],
'cleaned_values': [
(1, 1, 1586100, 'EHRConsentPII_ConsentPermission'),
(2, 1, 1586100, 'EHRConsentPII_ConsentPermission'),
(3, 1, 123, 'test_value_0'), (6, 2, 456, 'test_value_3')
]
}]
self.default_test(tables_and_counts)
| StarcoderdataPython |
166597 | <gh_stars>0
from scowclient import ScowClient
import json
def listProcs():
sclient = ScowClient()
jsonObj = sclient.get('processDefinitions')
prettyPrintJson(jsonObj['processDefinitions'][0:4])
def prettyPrintJson(obj):
print json.dumps(obj, sort_keys=True, indent=4)
def main():
listProcs()
if __name__ == "__main__":
main()
| StarcoderdataPython |
129315 | ##########################################################################################
# Machine Environment Config
DEBUG_MODE = False
USE_CUDA = not DEBUG_MODE
CUDA_DEVICE_NUM = 0
##########################################################################################
# Path Config
import os
import sys
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, "..") # for problem_def
sys.path.insert(0, "../..") # for utils
##########################################################################################
# import
import logging
from utils.utils import create_logger, copy_all_src
from MOTSPTrainer_3obj import TSPTrainer as Trainer
##########################################################################################
# parameters
env_params = {
'problem_size': 100,
'pomo_size': 100,
}
model_params = {
'embedding_dim': 128,
'sqrt_embedding_dim': 128**(1/2),
'encoder_layer_num': 6,
'qkv_dim': 16,
'head_num': 8,
'logit_clipping': 10,
'ff_hidden_dim': 512,
'eval_type': 'argmax',
}
optimizer_params = {
'optimizer': {
'lr': 1e-4,
'weight_decay': 1e-6
},
'scheduler': {
'milestones': [180,],
'gamma': 0.1
}
}
trainer_params = {
'use_cuda': USE_CUDA,
'cuda_device_num': CUDA_DEVICE_NUM,
'epochs': 200,
'train_episodes': 100 * 1000,
'train_batch_size': 64,
'logging': {
'model_save_interval': 5,
'img_save_interval': 10,
'log_image_params_1': {
'json_foldername': 'log_image_style',
'filename': 'style_tsp_100.json'
},
'log_image_params_2': {
'json_foldername': 'log_image_style',
'filename': 'style_loss_1.json'
},
},
'model_load': {
'enable': False, # enable loading pre-trained model
'path': './result/saved_MOTSP100_3obj_model', # directory path of pre-trained model and log files saved.
'epoch': 200, # epoch version of pre-trained model to laod.
}
}
logger_params = {
'log_file': {
'desc': 'train__tsp_n100',
'filename': 'run_log'
}
}
##########################################################################################
# main
def main():
if DEBUG_MODE:
_set_debug_mode()
create_logger(**logger_params)
_print_config()
trainer = Trainer(env_params=env_params,
model_params=model_params,
optimizer_params=optimizer_params,
trainer_params=trainer_params)
copy_all_src(trainer.result_folder)
trainer.run()
def _set_debug_mode():
global trainer_params
trainer_params['epochs'] = 2
trainer_params['train_episodes'] = 10
trainer_params['train_batch_size'] = 4
def _print_config():
logger = logging.getLogger('root')
logger.info('DEBUG_MODE: {}'.format(DEBUG_MODE))
logger.info('USE_CUDA: {}, CUDA_DEVICE_NUM: {}'.format(USE_CUDA, CUDA_DEVICE_NUM))
[logger.info(g_key + "{}".format(globals()[g_key])) for g_key in globals().keys() if g_key.endswith('params')]
##########################################################################################
if __name__ == "__main__":
main() | StarcoderdataPython |
3301182 | """Kea subnet-id sanity-check"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_control
import srv_msg
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_able():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FIXED The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks, but was corrected to subnet-id 999.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_able_double_restart():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FIXED The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks, but was corrected to subnet-id 999.')
srv_msg.forge_sleep('13', 'seconds')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '987654321')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
# Pause the Test.
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_unable():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_del_unable():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_fix_del_able():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_warn():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"warn"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"warn"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:33')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '2')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_del_renew():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL_DISCARD The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks and was dropped.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '2')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('999,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_msg.lease_file_doesnt_contain('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.lease_file_doesnt_contain('999,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:22')
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_del():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
srv_control.clear_leases('logs')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"del"}')
srv_control.open_control_channel()
srv_control.add_hooks('libdhcp_lease_cmds.so')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.log_contains('DHCPSRV_LEASE_SANITY_FAIL_DISCARD The lease 2001:db8::1 with subnet-id 666 failed subnet-id checks and was dropped.')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '7654321')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"ip-address": "2001:db8::1"}}')
srv_msg.send_ctrl_cmd_via_socket('{"command":"lease6-get","arguments":{"subnet-id":666,"identifier-type":"duid", "identifier": "00:03:00:01:f6:f5:f4:f3:f2:01"}}')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
# Pause the Test.
# File stored in kea-leases6.csv MUST contain line or phrase: 2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:22
# File stored in kea-leases6.csv MUST contain line or phrase: 999,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:22
@pytest.mark.v6
@pytest.mark.kea_only
@pytest.mark.subnet_id_sanity_check
@pytest.mark.abc
def test_v6_sanity_check_subnet_id_none():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"none"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '200fdf8:f53e:61e4::18-2001:dbfc00:db20:35b:7399::5')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"none"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
srv_msg.forge_sleep('2', 'seconds')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
@pytest.mark.v6
@pytest.mark.sharednetworks
@pytest.mark.sharedsubnets
@pytest.mark.kea_only
def test_v6_sanity_check_subnet_id():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:db8::1')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-get","arguments": {} }')
srv_msg.send_ctrl_cmd_via_socket('{"command": "list-commands","arguments": {} }')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::1')
srv_msg.lease_file_contains('2001:db8::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_control.start_srv('DHCP', 'stopped')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '888', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.forge_sleep('12', 'seconds')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8::/64', '2001:db8::1-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '999', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
srv_msg.forge_sleep('12', 'seconds')
# Using UNIX socket on server in path control_socket send {"command": "config-get","arguments": {} }
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'addr', '2001:db8::2')
srv_msg.forge_sleep('10', 'seconds')
# Using UNIX socket on server in path control_socket send {"command": "config-get","arguments": {} }
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:33')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '2')
# Response option 3 MUST contain sub-option 5.
# Response sub-option 5 from option 3 MUST contain address 2001:db8::2.
# Pause the Test.
@pytest.mark.v6
@pytest.mark.sharednetworks
@pytest.mark.sharedsubnets
@pytest.mark.kea_only
def test_v6_sanity_check_shared_subnet_id():
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:a::/64', '2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b-2001:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
srv_control.set_conf_parameter_subnet('id', '666', '0')
srv_control.config_srv_another_subnet_no_interface('2001:db8:b::/64',
'200fc00:db20:35b:7399::5-2001:fdf8:f53e:61e4::18')
srv_control.set_conf_parameter_subnet('id', '777', '1')
srv_control.shared_subnet('0', '0')
srv_control.shared_subnet('1', '0')
srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', '0')
srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix-del"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
srv_msg.send_ctrl_cmd_via_socket('{"command": "config-get","arguments": {} }')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_sets_value('Client', 'ia_id', '1234567')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
# Response sub-option 5 from option 3 MUST contain address 2001:db8:a::1.
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:02')
srv_msg.client_sets_value('Client', 'ia_id', '7654321')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:02')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.lease_file_contains('2001:db8:a::1,00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('666,3000,0,1234567,128,0,0,,f6:f5:f4:f3:f2:01')
srv_msg.lease_file_contains('2001:db8:b::1,00:03:00:01:f6:f5:f4:f3:f2:02')
srv_msg.lease_file_contains('777,3000,0,7654321,128,0,0,,f6:f5:f4:f3:f2:02')
misc.test_setup()
srv_control.config_srv_subnet('2001:db8:a::/64', '200fdf8:f53e:61e4::18-2001:db8:a::1')
srv_control.set_conf_parameter_subnet('id', '888', '0')
srv_control.config_srv_another_subnet_no_interface('2001:db8:b::/64',
'2fdf8:f53e:61e4::18-20fc00:db20:35b:7399::5')
srv_control.set_conf_parameter_subnet('id', '999', '1')
srv_control.shared_subnet('0', '0')
srv_control.shared_subnet('1', '0')
srv_control.set_conf_parameter_shared_subnet('name', '"name-abc"', '0')
srv_control.set_conf_parameter_shared_subnet('interface', '"$(SERVER_IFACE)"', '0')
srv_control.set_conf_parameter_global('sanity-checks', '{"lease-checks":"fix"}')
srv_control.open_control_channel()
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'reconfigured')
srv_msg.forge_sleep('10', 'seconds')
| StarcoderdataPython |
100400 | <reponame>nawafalqari/jsonwriter<filename>tests/tests.py
import __init__ as jsonWriter
db = jsonWriter.file('tests.json')
print(db.get('name')) # Nawaf
print(db.get('age')) # 10
db.set('age', 30, indent=None) # {"name": "Nawaf", "age": 30}
db.set('age', 30, indent=3)
'''
{
"name": "Nawaf",
"age": 30
}
'''
print(db.hasKey('name')) # True
print(db.hasValue('Nawaf')) # True
print(db.hasAll('name')) # True
print(db.hasAll('Nawaf')) # True | StarcoderdataPython |
1775347 | <gh_stars>0
import random
from plugins import AIchat
from plugins import dataManage
# 自动回复部分
screenWords = []
unknown_reply = ['诶?', '你说的话太深奥了', '我也不是很清楚呢', '不知道哦~', '你猜', '这是什么意思呢?', '嘤嘤嘤~我听不懂']
def reply(message, be_at, config, statistics, nickname, group_id, qq, mode):
global screenWords
screenWords = dataManage.read_screen_word()
bot_qq = config['qq']
bot_name = config['name']
need_reply = False
need_at = False
reply_text = ''
reply_image = ''
if be_at:
need_reply, need_at, reply_text, reply_image = forced_reply(config, bot_name, nickname, message)
else:
need_reply, need_at, reply_text, reply_image = self_reply(statistics, config, bot_name, nickname, message)
if need_reply:
for i in screenWords:
if i in reply_text:
if be_at:
reply_text = random.choice(unknown_reply)
else:
need_reply = False
reply_text = ''
reply_image = ''
break
return need_reply, reply_text, reply_image, 0, need_at
def forced_reply(config, bot_name, nickname, message):
need_reply = False
need_at = False
reply_text = ''
reply_image = ''
need_reply, need_at, reply_text, reply_image = reply_word(bot_name, nickname, message)
if not need_reply: # 调用ai
reply_text = AIchat.getReply(config, message)
need_reply = True
return need_reply, need_at, reply_text, reply_image
def self_reply(statistics, config, bot_name, nickname, message):
need_reply = False
need_at = False
reply_text = ''
reply_image = ''
# 强制触发词
if message == 'yjy爬':
reply_text = 'yjy快爬'
need_reply = True
elif message == '来一张涩图':
reply_text = '能不能多读书,少看涩图'
need_reply = True
elif message == '骂我':
reply_text = '咦惹?你是弱0吗'
need_reply = True
elif message == '晚安' or message == '安安' or message == '晚':
reply_list = ['晚安', 'image晚安', '晚安哦' + nickname, '记得要梦见' + bot_name, '快睡吧']
reply_text = reply_list[random.randrange(0, len(reply_list))]
if reply_text == 'image晚安':
reply_text = ''
reply_image = 'data/AutoReply/晚安.png'
need_reply = True
elif message == '早安' or message == '早' or message == '早上好':
reply_list = ['早安', '早鸭~', '早安哦' + nickname, '小懒猪,你比' + bot_name + '晚起了好多', '又是元气满满的一天呢']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '午安' or message == '睡午觉了':
reply_text = '午安呀!' + nickname
need_reply = True
elif message == '中午好':
reply_text = '中午好鸭!' + nickname
need_reply = True
elif message == '晚上好':
reply_text = '晚上好鸭!' + nickname
need_reply = True
if need_reply:
return need_reply, need_at, reply_text, reply_image
# 非强制触发词回复内容
rand = random.randrange(0, 10)
if rand > 4:
return need_reply, need_at, reply_text, reply_image
need_reply, need_at, reply_text, reply_image = reply_word(bot_name, nickname, message)
if not need_reply:
tmpNumber = random.randrange(0, 1000)
if tmpNumber < 10:
if statistics['last_minute'] <= 10:
reply_text = AIchat.getReply(config, message)
need_reply = True
if need_reply:
statistics['last_minute'] += 1
dataManage.save_statistics(statistics)
return need_reply, need_at, reply_text, reply_image
def reply_word(bot_name, nickname, message):
need_reply = False
need_at = False
reply_text = ''
reply_image = ''
if message == '你好':
reply_text = '你好呀,' + nickname + '。小柒很高兴遇见你!'
need_at = True
need_reply = True
elif message == '抱抱':
reply_list = ['抱抱呀!', bot_name + '才不要和你抱抱!', '抱抱', '抱抱' + nickname]
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '贴贴':
reply_list = ['贴贴', 'image贴贴', '快来贴贴,嘿嘿!', '不贴不贴']
reply_text = reply_list[random.randrange(0, len(reply_list))]
if reply_text == 'image贴贴':
reply_text = ''
reply_image = 'data/AutoReply/贴贴.jpg'
need_reply = True
elif message == '晚安' or message == '安安':
reply_list = ['晚安', 'image晚安', '晚安哦' + nickname, '记得要梦见' + bot_name, '快睡吧']
reply_text = reply_list[random.randrange(0, len(reply_list))]
if reply_text == 'image晚安':
reply_text = ''
reply_image = 'data/AutoReply/晚安.png'
need_reply = True
elif message == '早安' or message == '早':
reply_text = '早哦,' + nickname
need_reply = True
elif message == '午安' or message == '睡午觉了':
reply_text = '午安呀!' + nickname
need_reply = True
elif message == '谢谢':
reply_list = ['嘿嘿', '不用谢啦', '要时刻想着' + bot_name, '没事啦']
reply_text = reply_list[random.randrange(0, len(reply_list))]
elif message == '快来' or message == '快来快来':
reply_list = ['游戏启动', '来了来了', '不要着急嘛']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '傻子':
reply_text = '你才是傻子,' + bot_name + '才不傻'
need_reply = True
elif message == '笨蛋':
reply_text = bot_name + '才不要理你了'
need_reply = True
elif message == '蠢货':
reply_text = '哼'
need_reply = True
elif message == '你是猪吗' or message == '猪':
reply_text = '你以为谁都像你一天天哼唧哼唧的'
need_reply = True
elif message == '人工智障':
reply_text = '哎呀呀呀!我已经很努力了QAQ'
need_reply = True
elif message == '爱不爱我' or message == '爱我吗':
reply_list = ['爱你鸭!', bot_name + '超级爱你的~', '不爱,略略略']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '喜不喜欢我' or message == '喜欢我吗':
reply_list = ['喜欢你鸭!', bot_name + '超级喜欢你的~', '不喜欢,略略略']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '我是fw' or message == '我是废物':
reply_text = '在' + bot_name + '心中,' + nickname + '一直都很厉害的哦~'
need_reply = True
elif message == '摸了' or message == '摸鱼' or message == '摸鱼了':
reply_text = nickname + '桑怎么可以摸鱼呢'
need_reply = True
elif message == '也不是不行':
reply_text = nickname + '那就快冲!'
need_reply = True
elif message == '?' or message == '?':
tmpNumber = random.randrange(0, 10)
if tmpNumber == 2:
reply_list = ['怎么啦?', '嗯哼?']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif tmpNumber == 1:
reply_image = 'data/AutoReply/问号.jpg'
need_reply = True
elif message == '好家伙':
tmpNumber = random.randrange(0, 5)
if tmpNumber == 3:
reply_list = ['又发生什么辣', '又有什么大事情吗', '什么大事件']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif tmpNumber == 1:
reply_image = 'data/AutoReply/问号.jpg'
need_reply = True
elif message == '有人ow吗':
reply_text = bot_name + '也想来'
need_reply = True
elif message[-2:] == '快来':
reply_text = bot_name + '来了来了'
need_reply = True
elif message[-3:] == '多好啊':
reply_text = '是呀是呀'
need_reply = True
elif message == '上课':
reply_text = bot_name + '陪你一起上课'
need_reply = True
elif message == '满课':
reply_text = '好惨哦'
need_reply = True
elif message == '谢谢':
reply_text = '嘿嘿'
need_reply = True
elif message == '你们早上都没课的嘛':
reply_text = bot_name + '还没有开始上课呢'
need_reply = True
elif message == '早八' or message == '又是早八' or message == '明天早八' or message == '我明天早八':
reply_list = ['好惨鸭', bot_name + '抱抱你,不要哭', '摸摸头', '不哭不哭,站起来撸']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '我不配':
reply_list = ['人贵有自知之明', bot_name + '抱抱你,不要哭']
reply_text = reply_list[random.randrange(0, len(reply_list))]
need_reply = True
elif message == '你主人是谁':
reply_text = '你猜我的主人是谁~'
need_reply = True
return need_reply, need_at, reply_text, reply_image
| StarcoderdataPython |
3244270 | <filename>venv/lib/python3.6/site-packages/ansible_collections/vyos/vyos/plugins/module_utils/network/vyos/argspec/ntp_global/ntp_global.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the
# cli_rm_builder.
#
# Manually editing this file is not advised.
#
# To update the argspec make the desired changes
# in the module docstring and re-run
# cli_rm_builder.
#
#############################################
"""
The arg spec for the vyos_ntp module
"""
class Ntp_globalArgs(object): # pylint: disable=R0903
"""The arg spec for the vyos_ntp module"""
argument_spec = {
"config": {
"type": "dict",
"options": {
"allow_clients": {"type": "list", "elements": "str"},
"listen_addresses": {"type": "list", "elements": "str"},
"servers": {
"type": "list",
"elements": "dict",
"options": {
"server": {"type": "str"},
"options": {
"type": "list",
"elements": "str",
"choices": [
"noselect",
"dynamic",
"preempt",
"prefer",
],
},
},
},
},
},
"running_config": {"type": "str"},
"state": {
"type": "str",
"choices": [
"deleted",
"merged",
"overridden",
"replaced",
"gathered",
"rendered",
"parsed",
],
"default": "merged",
},
} # pylint: disable=C0301
| StarcoderdataPython |
1636373 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.urls import re_path
from django.contrib import admin
from django.contrib.sites.models import Site
from django.views import defaults as default_views
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views.generic.base import RedirectView
# from rest_framework.authtoken.views import obtain_auth_token
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from core.views import index
from onetimelink import presettings
# admin.site.unregister(Site)
admin.site.site_header = 'MyTemplate Administration'
admin.site.index_title = 'MyTemplate'
admin.site.site_title = 'Admin'
schema_view = get_schema_view(
openapi.Info(
title='MyTemplate API',
default_version='v1',
description='For MyTemplate',
contact=openapi.Contact(email='<EMAIL>'),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
url(r'^favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('img/favicon.ico'))),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
url(r'^accounts/', include('allauth.urls')),
url(r'^$', index, name='home'),
url(r'^%s/' % presettings.DYNAMIC_LINK_URL_BASE_COMPONENT, include('onetimelink.urls')),
url(r'^v1/', ([url(r'swagger-ui/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui')], None, 'v1')),
re_path('api/v1/', include('backend.v1.urls', namespace='v1')),
url(r'^v2/', ([url(r'swagger-ui/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui')], None, 'v2')),
re_path('api/v2/', include('backend.v2.urls', namespace='v2')),
]
urlpatterns += static('static', document_root=settings.STATIC_ROOT)
urlpatterns += static('media', document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| StarcoderdataPython |
3334544 | <gh_stars>1-10
# Copyright 2016-2021 <NAME>, 43ravens
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NEMO_Nowcast framework message object.
"""
import attr
import yaml
@attr.s
class Message:
"""Construct a :py:class:`nemo_nowcast.message.Message` instance.
"""
#: Name of the worker or manager sending the message.
source = attr.ib()
#: Key of a message type that is defined for source in the message
#: registry section of the configuration data structure.
type = attr.ib()
#: Content of message; must be serializable by YAML such that it can be
#: deserialized by :py:func:`yaml.safe_load`.
payload = attr.ib(default=None)
def serialize(self):
"""Construct a message data structure and transform it into a string
suitable for sending.
:returns: Message data structure serialized using YAML.
"""
return yaml.dump(
{"source": self.source, "type": self.type, "payload": self.payload}
)
@classmethod
def deserialize(cls, message):
"""Transform received message from str to message data structure.
:arg str message: Message dict serialized using YAML.
:returns: :py:class:`nemo_nowcast.lib.Message` instance
"""
msg = yaml.safe_load(message)
return cls(source=msg["source"], type=msg["type"], payload=msg["payload"])
| StarcoderdataPython |
27463 | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.utils.text import slugify
class SharedFolder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
users = models.ManyToManyField(User, through='Collaborator', related_name='shared_folders')
class Meta:
verbose_name = 'Shared Folder'
verbose_name_plural = 'Shared Folders'
ordering = ('name',)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
if not self.pk:
super(SharedFolder, self).save(*args, **kwargs)
base_slug = slugify(self.name)
if len(base_slug) > 0:
base_slug = slugify(u'{0} {1}'.format(self.name, self.pk))
else:
base_slug = self.pk
i = 0
unique_slug = base_slug
while SharedFolder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(SharedFolder, self).save(*args, **kwargs)
class Collaborator(models.Model):
READ = 'R'
WRITE = 'W'
ADMIN = 'A'
ACCESS_TYPES = (
(READ, 'Read'),
(WRITE, 'Write'),
(ADMIN, 'Admin'),
)
user = models.ForeignKey(User,on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder,on_delete=models.CASCADE)
joined_at = models.DateTimeField(auto_now_add=True)
is_owner = models.BooleanField(default=False)
access = models.CharField(max_length=1, choices=ACCESS_TYPES, default=READ)
class Meta:
verbose_name = 'Collaborator'
verbose_name_plural = 'Collaborators'
def save(self, *args, **kwargs):
if self.is_owner:
self.access = Collaborator.ADMIN
super(Collaborator, self).save(*args, **kwargs)
class Document(models.Model):
ARTICLE = 'article'
BOOK = 'book'
BOOKLET = 'booklet'
CONFERENCE = 'conference'
INBOOK = 'inbook'
INCOLLECTION = 'incollection'
INPROCEEDINGS = 'inproceedings'
MANUAL = 'manual'
MASTERSTHESIS = 'mastersthesis'
MISC = 'misc'
PHDTHESIS = 'phdthesis'
PROCEEDINGS = 'proceedings'
TECHREPORT = 'techreport'
UNPUBLISHED = 'unpublished'
ENTRY_TYPES = (
(ARTICLE, 'Article'),
(BOOK, 'Book'),
(BOOKLET, 'Booklet'),
(CONFERENCE, 'Conference'),
(INBOOK, 'Inbook'),
(INCOLLECTION, 'Incollection'),
(INPROCEEDINGS, 'Inproceedings'),
(MANUAL, 'Manual'),
(MASTERSTHESIS, 'Master\'s Thesis'),
(MISC, 'Misc'),
(PHDTHESIS, 'Ph.D. Thesis'),
(PROCEEDINGS, 'Proceedings'),
(TECHREPORT, 'Tech Report'),
(UNPUBLISHED, 'Unpublished'),
)
# Bibtex required fields
bibtexkey = models.CharField('Bibtex key', max_length=255, null=True, blank=True)
entry_type = models.CharField('Document type', max_length=13, choices=ENTRY_TYPES, null=True, blank=True)
# Bibtex base fields
address = models.CharField(max_length=2000, null=True, blank=True)
author = models.TextField(max_length=1000, null=True, blank=True)
booktitle = models.CharField(max_length=1000, null=True, blank=True)
chapter = models.CharField(max_length=1000, null=True, blank=True)
crossref = models.CharField('Cross-referenced', max_length=1000, null=True, blank=True)
edition = models.CharField(max_length=1000, null=True, blank=True)
editor = models.CharField(max_length=1000, null=True, blank=True)
howpublished = models.CharField('How it was published', max_length=1000, null=True, blank=True)
institution = models.CharField(max_length=1000, null=True, blank=True)
journal = models.CharField(max_length=1000, null=True, blank=True)
month = models.CharField(max_length=50, null=True, blank=True)
note = models.CharField(max_length=2000, null=True, blank=True)
number = models.CharField(max_length=1000, null=True, blank=True)
organization = models.CharField(max_length=1000, null=True, blank=True)
pages = models.CharField(max_length=255, null=True, blank=True)
publisher = models.CharField(max_length=1000, null=True, blank=True)
school = models.CharField(max_length=1000, null=True, blank=True)
series = models.CharField(max_length=500, null=True, blank=True)
title = models.CharField(max_length=1000, null=True, blank=True)
publication_type = models.CharField(max_length=1000, null=True, blank=True) # Type
volume = models.CharField(max_length=1000, null=True, blank=True)
year = models.CharField(max_length=50, null=True, blank=True)
# Extra fields
abstract = models.TextField(max_length=4000, null=True, blank=True)
coden = models.CharField(max_length=1000, null=True, blank=True)
doi = models.CharField('DOI', max_length=255, null=True, blank=True)
isbn = models.CharField('ISBN', max_length=255, null=True, blank=True)
issn = models.CharField('ISSN', max_length=255, null=True, blank=True)
keywords = models.CharField(max_length=2000, null=True, blank=True)
language = models.CharField(max_length=1000, null=True, blank=True)
url = models.CharField('URL', max_length=1000, null=True, blank=True)
# Parsifal management field
user = models.ForeignKey(User, null=True, related_name='documents',on_delete=models.CASCADE)
review = models.ForeignKey('reviews.Review', null=True, related_name='documents',on_delete=models.CASCADE)
shared_folder = models.ForeignKey(SharedFolder, null=True, related_name='documents',on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document'
verbose_name_plural = 'Documents'
def __unicode__(self):
return self.title
def document_file_upload_to(instance, filename):
return u'library/{0}/'.format(instance.document.user.pk)
class DocumentFile(models.Model):
document = models.ForeignKey(Document, related_name='files',on_delete=models.CASCADE)
document_file = models.FileField(upload_to='library/')
filename = models.CharField(max_length=255)
size = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = 'Document File'
verbose_name_plural = 'Document Files'
def __unicode__(self):
return self.filename
class Folder(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User, related_name='library_folders',on_delete=models.CASCADE)
documents = models.ManyToManyField(Document)
class Meta:
verbose_name = 'Folder'
verbose_name_plural = 'Folders'
ordering = ('name',)
unique_together = (('name', 'user'),)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
base_slug = slugify(self.name)
if len(base_slug) > 0:
unique_slug = base_slug
else:
base_slug = unique_slug = 'untitled-folder'
i = 0
while Folder.objects.filter(slug=unique_slug).exists():
i += 1
unique_slug = u'{0}-{1}'.format(base_slug, i)
self.slug = unique_slug
super(Folder, self).save(*args, **kwargs)
| StarcoderdataPython |
174649 | from django.urls import path
from blog.views import (
BlogPostView,
CommentView,
CreatePostView,
DeletePostView,
DownVoteView,
HotPostsView,
IndexView,
UpdatePostView,
UpvoteView
)
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('hot_posts/', HotPostsView.as_view(), name='hot_posts'),
path('post/create/', CreatePostView.as_view(), name='create_post'),
path('post/<int:post_id>/', BlogPostView.as_view(), name='blog_post'),
path('post/<int:post_id>/update/', UpdatePostView.as_view(), name='update_post'),
path('post/<int:post_id>/delete/', DeletePostView.as_view(), name='delete_post'),
path('post/<int:post_id>/upvote/', UpvoteView.as_view(), name='upvote_post'),
path('post/<int:post_id>/downvote/', DownVoteView.as_view(), name='downvote_post'),
path('post/<int:post_id>/comment/', CommentView.as_view(), name='comment_post')
]
| StarcoderdataPython |
97497 | from smtp_project import EmailExecutor
# ainda vou fazer com django
| StarcoderdataPython |
3380994 | import os
import pandas as pd
import numpy as np
import sys
import seaborn as sns
import matplotlib.pyplot as plt
sys.path.append('../')
from load_paths import load_box_paths
datapath, projectpath, wdir,exe_dir, git_dir = load_box_paths()
from processing_helpers import *
"""Define function methods"""
def load_data(column_list=None, remove_nas=False):
"""Read in only relevant columns """
if column_list == None:
column_list =['icu_length', 'hosp_length', 'age_group','res_county','res_state','hosp_yn', 'icu_yn', 'death_yn']
df_full = pd.read_csv(os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'il_cdc_thru_0811.csv'),
usecols=column_list)
df = df_full.copy()
"""Remove Missings and Unknowns """
if remove_nas:
df = df.dropna(subset=["hosp_length"])
df = df.dropna(subset=["age_group"])
df = df.dropna(subset=["death_yn"])
df = df[df['age_group'] != 'Unknown' ]
df = df[df['icu_yn'] != 'Unknown' ]
df = df[df['icu_yn'] != 'Missing' ]
#print(df)
return df
def LOS_descriptive_tables(groupList, channel='hosp_length', sortByList=None, fname=None):
df_summary = df.groupby(groupList)[channel].agg(
[np.mean, CI_2pt5, CI_25, CI_50, CI_75, CI_97pt5]).reset_index()
if sortByList != None:
df_summary = df_summary.sort_values(by=sortByList)
if fname is not None:
df_summary.to_csv(os.path.join(plot_path,f'summary_{"_".join(groupList)}_{channel}_{fname}.csv'))
return df_summary
### Simple histogram, not age structured\
def plot_hist(df, channel='hosp_length') :
plt.rcParams.update({'figure.figsize':(7,5), 'figure.dpi':100})
x = df[channel]
plt.hist(x, bins=50)
plt.gca().set(title=channel, ylabel='Frequency');
return plt
### Function for age structured plot
def plot_hist_by_grp(df, channel='hosp_length',groups = None, grp_name = None, truncate_at=20) :
## Get age groups
if groups == None:
groups = ['0 - 9 Years', '10 - 19 Years', '20 - 29 Years', '30 - 39 Years', '40 - 49 Years', '50 - 59 Years',
'60 - 69 Years', '70 - 79 Years', '80+ Years']
if grp_name == None:
grp_name = 'age_group'
palette = sns.color_palette('husl', len(groups))
fig = plt.figure(figsize=(10, 6))
fig.subplots_adjust(right=0.97, left=0.1, hspace=0.4, wspace=0.3, top=0.90, bottom=0.05)
fig.suptitle(x=0.5, y=0.999, t='Hospital LOS')
for c, grp in enumerate(groups):
if len(groups)==9:
ax = fig.add_subplot(3, 3, c + 1)
else:
ax = fig.add_subplot(4, 4, c + 1)
mdf = df[df[grp_name] == grp]
if truncate_at is not None:
mdf.loc[mdf[channel] >truncate_at, channel] = truncate_at
median = np.median(mdf[channel])
ax.hist(mdf[channel], bins=50, color=palette[0])
ax.set_title(groups[c])
ax.axvline(x=median, color='#737373', linestyle='--')
ax.set(xlabel='', ylabel='Frequency')
plt.savefig(os.path.join(plot_path, f'{channel}_by_{grp_name}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{channel}_by_{grp_name}.pdf'), format='PDF')
return plt
def plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn", groups = None, grp_name = None,truncate_at=None) :
## Get age groups
if groups == None:
groups = ['0 - 9 Years', '10 - 19 Years', '20 - 29 Years', '30 - 39 Years', '40 - 49 Years', '50 - 59 Years',
'60 - 69 Years', '70 - 79 Years', '80+ Years']
if grp_name == None:
grp_name = 'age_group'
palette = sns.color_palette('Set1', len(groups))
fig = plt.figure(figsize=(10, 6))
fig.subplots_adjust(right=0.97, left=0.1, hspace=0.4, wspace=0.3, top=0.90, bottom=0.05)
fig.suptitle(x=0.5, y=0.999, t='Hospital LoS by ICU admission status ')
for c, grp in enumerate(groups):
if len(groups)==9:
ax = fig.add_subplot(3, 3, c + 1)
else:
ax = fig.add_subplot(4, 4, c + 1)
mdf = df[df[grp_name] == grp]
if truncate_at is not None:
mdf.loc[mdf[channel] > truncate_at, channel] = truncate_at
ax.hist(mdf[mdf[color_channel]=='Yes'][channel], bins=50, color=palette[0], label="ICU yes", alpha=0.6)
ax.hist(mdf[mdf[color_channel]=='No'][channel], bins=50, color=palette[1], label="ICU no", alpha=0.6)
ax.axvline(x=np.median(mdf[mdf[color_channel]=='Yes'][channel]), color=palette[0], linestyle='--')
ax.axvline(x=np.median(mdf[mdf[color_channel]=='No'][channel]), color=palette[1], linestyle='--')
ax.set(xlabel='', ylabel='Frequency')
ax.set_title(groups[c] ) #,fontweight="bold"
ax.legend()
plotname = f'{channel}_colorby_{color_channel}_by_{grp_name}'
if truncate_at is not None:
plotname = plotname +'_truncated'
plt.savefig(os.path.join(plot_path, f'{plotname}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plotname}.pdf'), format='PDF')
return plt
if __name__ == '__main__':
"""Basic descriptive tables"""
plot_path = os.path.join(projectpath, 'Plots + Graphs','Age Model - MS')
df=load_data(remove_nas=True)
pd.crosstab(index=df['age_group'], columns='count')
LOS_descriptive_tables(channel='hosp_length',groupList=['age_group', 'death_yn'])
LOS_descriptive_tables(channel='hosp_length',groupList=['age_group', 'icu_yn'], sortByList=['icu_yn','age_group'])
df = df[df['hosp_length'] !=0 ]
LOS_descriptive_tables(groupList=['age_group', 'death_yn'])
LOS_descriptive_tables(groupList=['age_group', 'death_yn'], sortByList=['death_yn','age_group'],fname='_by_death_yn')
LOS_descriptive_tables(groupList=['age_group', 'icu_yn'], sortByList=['icu_yn','age_group'],fname='icu_yn')
## Same histogra, with colors by ICU_yn
plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn")
plot_hist_by_grp_2(df, channel='hosp_length',color_channel = "icu_yn", truncate_at=20)
"""Compare by region"""
df = load_data(remove_nas=True)
df = df.dropna(subset=["res_county"])
df = merge_county_covidregions(df_x=df, key_x='res_county', key_y='County')
pd.crosstab(index=df['covid_region'], columns='count')
LOS_descriptive_tables(channel='hosp_length',groupList=['covid_region', 'death_yn'])
LOS_descriptive_tables(channel='hosp_length',groupList=['covid_region', 'icu_yn'], sortByList=['icu_yn','covid_region'])
df = df[df['hosp_length'] !=0 ]
LOS_descriptive_tables(groupList=['covid_region', 'death_yn'])
LOS_descriptive_tables(groupList=['covid_region', 'death_yn'], sortByList=['death_yn','covid_region'],fname='_by_death_yn')
LOS_descriptive_tables(groupList=['covid_region', 'icu_yn'], sortByList=['icu_yn','covid_region'],fname='icu_yn')
plot_hist_by_grp(df=df, grp_name='covid_region', groups=list(range(1,12)))
plot_hist_by_grp_2(df=df, grp_name='covid_region', groups=list(range(1,12))) | StarcoderdataPython |
1776472 | <reponame>hofbi/driver-awareness<gh_stars>0
"""Plot SA evaluation"""
import argparse
import os
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import tikzplotlib
def plot_data(input_dir, output_dir, main_scenario, show_plot):
"""plot SA evaluation data"""
sa_df = pd.read_csv(input_dir.joinpath(main_scenario))
bag_frequency = 0.05
sa_df["time"] = sa_df.index * bag_frequency
actual = sa_df["actual_sa_mean"]
sigma = sa_df["actual_sa_sigma"]
act_high = actual + sigma
act_low = actual - sigma
plt.figure("Average SA for a turn with 3 users")
plt.plot(sa_df["time"], sa_df["optimal_sa"], label="$SA_{opt}$")
plt.plot(sa_df["time"], actual, label="$SA_{act}$")
plt.fill_between(sa_df["time"], act_high, act_low, facecolor="yellow", alpha=0.5)
plt.legend()
plt.xlabel("Time [s]", labelpad=-5)
plt.ylabel("SA [1]")
if not show_plot:
tikzplotlib.save(output_dir.joinpath("sa_measure.tex"))
clear_plot_data()
actual_headers = sa_df.filter(regex=r"actual_sa_\d").columns
plt.figure("Individual SAs for a turn with 3 users", figsize=[24, 3])
for index, name in enumerate(actual_headers):
plt.subplot(f"2{int(len(actual_headers)/2)}{index+1}")
plt.plot(sa_df["time"], sa_df["optimal_sa"], label="$SA_{opt}$")
plt.plot(sa_df["time"], sa_df[name], label="$SA_{act}$")
plt.ylim(bottom=0)
plt.legend()
if index >= len(actual_headers) / 2:
plt.xlabel("Time [s]")
if index == 0 or index == len(actual_headers) / 2:
plt.ylabel("SA [1]", labelpad=-2)
plt.title(f"User {index+1}", y=0.75, x=0.1)
if not show_plot:
tikzplotlib.save(output_dir.joinpath("sa_measure_all.tex"))
clear_plot_data()
plt.figure("SA ratio")
ratio = sa_df["sa_mean"]
ratio_sigma = sa_df["sa_sigma"]
ratio_high = ratio + ratio_sigma
ratio_low = ratio - ratio_sigma
plt.plot(sa_df["time"], ratio)
plt.fill_between(
sa_df["time"], ratio_high, ratio_low, facecolor="yellow", alpha=0.5
)
plt.xlabel("Time [s]")
plt.ylabel("SA [1]", labelpad=-5)
if not show_plot:
tikzplotlib.save(output_dir.joinpath("sa_ratio.tex"))
clear_plot_data()
print(
"Scenario & \
$\\overline{SA}_{opt}$ & $\\overline{SA}_{act}$ & $\\overline{SA}_{ratio}$ ($\\pm\\sigma$) \\\\"
)
for file_name in [
input_dir.joinpath(file_name)
for file_name in os.listdir(input_dir)
if file_name.endswith(".csv")
]:
sa_df = pd.read_csv(file_name)
print(
f"{file_name.stem} & {sa_df['optimal_sa'].mean():.2f} & \
{sa_df['actual_sa_mean'].mean():.2f} & {sa_df['sa_mean'].mean():.2f} \
$\\pm$({sa_df['sa_sigma'].mean():.2f}) \\\\" # noqa: W605
)
def clear_plot_data():
"""clear plot data"""
plt.clf()
plt.close()
def main():
"""main"""
parser = argparse.ArgumentParser(
description="Plot SA evaluation.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--eval_dir",
type=str,
default=Path(__file__).parent.joinpath("data"),
help="Path to the sa measurements",
)
parser.add_argument(
"--main_scenario",
type=str,
default="sa_turn_with_three_users.csv",
help="Name of the main scenario csv file",
)
parser.add_argument(
"-o",
"--out_dir",
type=str,
default=Path(__file__).parent,
help="Path to the output directory",
)
parser.add_argument("-s", "--show", action="store_true", help="Show the plot")
args = parser.parse_args()
plot_data(Path(args.eval_dir), Path(args.out_dir), args.main_scenario, args.show)
if args.show:
plt.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| StarcoderdataPython |
3225815 | import os
from os import listdir
from os.path import isfile, join
import glob
import argparse
from rasa.cli.test import run_nlu_test
from rasa.test import perform_nlu_cross_validation
from rasa.shared.data import get_nlu_directory
args = argparse.Namespace
# onlyfiles = [f for f in listdir("pipelines/") if isfile(join("pipelines/", f))]
# print (onlyfiles)
pipelines = glob.glob("pipelines\\*")
print(pipelines)
nlu_data = get_nlu_directory("data")
additional_args = {
"loglevel": None,
"model": "models",
"stories": "tests",
"max_stories": None,
"endpoints": None,
"fail_on_prediction_errors": False,
"url": None,
"evaluate_model_directory": False,
"nlu": "data",
"config": None,
"cross_validation": True,
"folds": 5,
"runs": 3,
"percentages": [0, 25, 50, 75],
"disable_plotting": False,
"successes": False,
"no_errors": False,
"out": "results",
"errors": True,
}
for i in range(len(pipelines)):
try:
os.system(
"rasa train nlu --config {} --out stack_models\\{}".format(
pipelines[i], pipelines[i]
)
)
perform_nlu_cross_validation(
"{}".format(pipelines[i]), nlu_data, "results\\{}".format(pipelines[i]), {}
)
except OSError:
print("{} cannot find the model".format(pipelines[i]))
continue
| StarcoderdataPython |
3367626 | import os
import pytest
import shutil
@pytest.fixture()
def sample_service(tmpdir):
shutil.copytree(os.path.dirname(__file__)+'/fixture/sample_service',
str(tmpdir / 'sample_service'))
target = str(tmpdir / 'sample_service')
os.chdir(target)
return target
| StarcoderdataPython |
80597 | <filename>backend/lola-backend/config.py
class Config(object):
DEBUG = True
DEVELOPMENT = True
class ProductionConfig(Config):
DEBUG = False
DEVELOPMENT = False
| StarcoderdataPython |
3355628 | from triton.dns.message.domains.domain import Domain
from .base import ResourceRecord
class CNAME(ResourceRecord):
class _Binary(ResourceRecord._Binary):
@property
def full(self):
return self.resource_record.cname.sub_encode(self.resource_record.cname.label)
id = 5
repr = ['cname']
@classmethod
def parse_bytes(cls, answer, read_len):
instance = cls(answer)
instance.cname = Domain.decode(answer.message)
return instance
@classmethod
def parse_dict(cls, answer, data):
instance = cls(answer)
instance.cname = Domain(data.get('cname'), None)
return instance
@property
def __dict__(self):
return {'cname': self.cname.label}
@classmethod
def from_json(cls, answer, data):
instance = cls(answer)
instance.address = Domain(data.get('address'), None)
return instance
| StarcoderdataPython |
59013 | <reponame>angelaaaateng/awesome-panel<gh_stars>0
"""This module provides the SOCIAL_LINKS list of social links"""
from package.awesome_panel.application.models import SocialLink
SOCIAL_LINKS = [SocialLink(name="Twitter")]
| StarcoderdataPython |
3381135 | <reponame>mbaragiola/drf-demo-app
from rest_framework.serializers import ModelSerializer
from apps.tables.models import Table
class TableSerializer(ModelSerializer):
class Meta:
model = Table
fields = ['table_name', 'fields', ]
# TODO: Validations could be added here.
| StarcoderdataPython |
1629954 | <reponame>MaximKuklin/3D_Object_Detection_Diploma
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mean_average_precision as map
import tqdm
import _init_paths
import os
import cv2
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
from opts import opts
from utils.image import get_affine_transform
from detectors.detector_factory import detector_factory
from datasets.dataset_factory import dataset_factory
from Objectron.objectron.dataset import iou
from Objectron.objectron.dataset import box
# from mean_average_precision.detection_map import DetectionMAP
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']
# class Detection3DMAP(DetectionMAP):
# def __init__(self, n_class, pr_samples=11, overlap_threshold=0.5):
# super().__init__(n_class, pr_samples=pr_samples, overlap_threshold=overlap_threshold)
#
# def evaluate(self, pred_bb, pred_classes, pred_conf, gt_bb, gt_classes):
# """
# Update the accumulator for the running mAP evaluation.
# For exemple, this can be called for each images
# :param pred_bb: (np.array) Predicted Bounding Boxes [x1, y1, x2, y2] : Shape [n_pred, 4]
# :param pred_classes: (np.array) Predicted Classes : Shape [n_pred]
# :param pred_conf: (np.array) Predicted Confidences [0.-1.] : Shape [n_pred]
# :param gt_bb: (np.array) Ground Truth Bounding Boxes [x1, y1, x2, y2] : Shape [n_gt, 4]
# :param gt_classes: (np.array) Ground Truth Classes : Shape [n_gt]
# :return:
# """
#
# IoUmask = None
# if len(pred_bb) > 0:
# IoUmask = self.compute_IoU_mask(pred_bb, gt_bb, self.overlap_threshold)
# for accumulators, r in zip(self.total_accumulators, self.pr_scale):
# self.evaluate_(IoUmask, accumulators, pred_classes, pred_conf, gt_classes, r)
#
# def compute_IoU_mask(self, prediction, gt, overlap_threshold):
# IoU = np.zeros((len(prediction), len(gt)))
# for i in range(len(prediction)):
# for j in range(len(gt)):
# IoU[i, j] = iou.IoU(prediction[i], gt[j]).iou()
#
# # for each prediction select gt with the largest IoU and ignore the others
# for i in range(len(prediction)):
# maxj = IoU[i, :].argmax()
# IoU[i, :maxj] = 0
# IoU[i, (maxj + 1):] = 0
# # make a mask of all "matched" predictions vs gt
# return IoU >= overlap_threshold
#
# def get_map(self, interpolated=True, class_names=None):
# """
# Plot all pr-curves for each classes
# :param interpolated: will compute the interpolated curve
# :return:
# """
#
# mean_average_precision = []
# # TODO: data structure not optimal for this operation...
# for cls in range(self.n_class):
# precisions, recalls = self.compute_precision_recall_(cls, interpolated)
# average_precision = self.compute_ap(precisions, recalls)
# mean_average_precision.append(average_precision)
#
# mean_average_precision = sum(mean_average_precision) / len(mean_average_precision)
# return mean_average_precision
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.coco = dataset.coco
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.mean, self.std = dataset.mean, dataset.std
self.opt = opt
def grab_frame(self, video_path, frame):
cap = cv2.VideoCapture(video_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, frame)
_, img = cap.read()
cap.release()
return img
def __getitem__(self, index):
img_id = self.images[index]
video_info = self.load_image_func(ids=[img_id])[0]
file_name = video_info['file_name']
image_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
gt_3d_box = []
for k in anns:
bbox = np.array(k['keypoints_3d']).reshape(-1, 3)
gt_3d_box.append(bbox)
gt_3d_box = np.stack(gt_3d_box)
img = cv2.imread(image_path)
images, meta = {}, {}
for scale in [1.0]:
images[scale], meta[scale] = self.pre_process_func(img, scale)
return img_id, {'images': images, 'image': img, 'meta': meta}, gt_3d_box
def __len__(self):
return len(self.images)
def calc_metric(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
torch.cuda.set_device(int(opt.gpus_str))
split = 'test'
Detector = detector_factory[opt.task]
detector = Detector(opt)
Dataset = dataset_factory[opt.dataset]
dataset = Dataset(opt, split)
data_loader = torch.utils.data.DataLoader(
PrefetchDataset(opt, dataset, detector.pre_process),
batch_size=1, shuffle=True, num_workers=4, pin_memory=True
)
frames = []
for idx, (img_id, pre_processed_images, boxes_gt) in enumerate(tqdm.tqdm(data_loader)):
ret = detector.run(pre_processed_images)
boxes_3d = [ret['results'][i][:, 27:-2] for i in ret['results']][0]
probs = [ret['results'][i][:, -2] for i in ret['results']][0]
pred_classes = [ret['results'][i][:, -1] for i in ret['results']][0]
box_pred = [box.Box(vertices=box_pred.reshape(-1, 3)) for box_pred in boxes_3d]
boxes_gt = [box.Box(vertices=box_gt) for box_gt in boxes_gt[0].numpy()]
if len(boxes_gt) == 0 or len(box_pred) == 0:
print()
frames.append([box_pred, pred_classes, probs, boxes_gt, np.zeros((len(boxes_gt)))])
n_class = 1
mAP = Detection3DMAP(n_class, overlap_threshold=0.5)
for frame in frames:
mAP.evaluate(*frame)
mAP_score = mAP.get_map()
print(f"mAP_score: {mAP_score}")
if __name__ == '__main__':
opt = opts().init()
calc_metric(opt)
| StarcoderdataPython |
39056 | <gh_stars>0
import zipfile
import requests
import os
import tempfile
# this script downloads additional pums data files
# legend
# https://www2.census.gov/programs-surveys/acs/tech_docs/pums/data_dict/PUMS_Data_Dictionary_2019.txt
def download_pums_data(year, record_type, state, **_kwargs):
assert record_type in ('person', 'housing')
output_dir = get_pums_data_dir(year, record_type, state)
if os.path.exists(output_dir):
return
download_url = f"https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_{record_type[0]}{state}.zip"
with tempfile.TemporaryDirectory() as temp_download_dir:
temp_download_path = os.path.join(temp_download_dir, "temp.zip")
with open(temp_download_path, 'wb') as resource_file:
resource_file.write(requests.get(download_url).content)
os.makedirs(output_dir, exist_ok=True)
with zipfile.ZipFile(temp_download_path, 'r') as zip_file:
[zip_file.extract(file, output_dir) for file in zip_file.namelist()]
def get_pums_data_dir(year, record_type, state, **_kwargs):
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
return os.path.join(base_dir, f'PUMS_DL_{year}_{record_type}_{state}')
def get_pums_data_path(year, record_type, state, **_kwargs):
"""returns path to the first .csv file"""
data_dir = get_pums_data_dir(year, record_type, state)
for file_name in os.listdir(data_dir):
if file_name.endswith('.csv'):
return os.path.join(data_dir, file_name)
datasets = [
{'public': True, 'year': 2010, 'record_type': 'person', 'state': 'al'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'ct'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'ma'},
{'public': False, 'year': 2010, 'record_type': 'person', 'state': 'vt'},
]
if __name__ == '__main__':
for metadata in datasets:
print("downloading", metadata)
print(get_pums_data_dir(**metadata))
download_pums_data(**metadata)
| StarcoderdataPython |
3201815 | <gh_stars>1-10
from typing import List
import requests
from python_plugin_example.hookimpl import hookimpl
DAD_JOKE_API_ENDPOINT = "https://icanhazdadjoke.com/"
class ICanHazDadJokePlugin:
@hookimpl
def retrieve_joke(self, amount: int) -> List[str]:
headers = {
"Accept": "application/json",
}
values = []
for i in range(amount):
response = requests.get(DAD_JOKE_API_ENDPOINT, headers=headers)
values.append(response.json().get("joke", ""))
jokes = [f"👨 Dad joke: {value}" for value in values]
return jokes
| StarcoderdataPython |
121293 | <reponame>LucasBalbinoSS/Exercicios-Python
# print()
#
# print('NÚMEROS & MATEMÁTICA')
#
# print()
#
# n = str(input('Digite um número de 0 á 9999: '))
#
# print('Unidades:', n[3], '\nDezenas:', n[2], '\nCentenas:', n[1], '\nMilhar:', n[0])
n = int(input('\033[35mDigite um número de zero a 9999: '))
u = n // 1 % 10
d = n // 10 % 10
c = n // 100 % 10
m = n // 1000 % 10
print('Analisando o número %i...' % n, '\nUnidades: %i' % u, '\nDezenas: %i' % d,
'\nCentenas: %i' % c, '\nMilhar: %i' % m) | StarcoderdataPython |
1754808 | <filename>src/akappwid.py
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.lang import Builder
from appkit.src.akstatusbar import AKStatusBar
from appkit.src.akmenu import AKMenu
from appkit.src.akprojects import AKProjects
from appkit.src.projecttabs import AKTabStrip
from appkit.src.cursor import AppCursor
Builder.load_string('''
<AKAppWid>:
width: self.minimum_width
canvas.before:
Color:
rgba: self.app_background_colour
Rectangle:
pos: self.pos
size: self.size
''')
class DummyWidget(Widget):
def __init__(self) -> None:
super().__init__()
self.size_hint = (0,0)
self.size = (0,0)
class AKAppWid(BoxLayout):
def __init__(self, main_app=None, contents=None, **kwargs) -> None:
self.app_background_colour = main_app.prefs.theme.app_background_colour
super().__init__(**kwargs)
if contents == None:
self.contents = [AKMenu, AKTabStrip, AKProjects, AKStatusBar]
else: self.contents = contents
self.ma = main_app
self.size_hint = (1,1)
self.full_screen = self.ma.prefs.full_screen_on_start
self.orientation = 'vertical'
Window.minimum_width = self.ma.prefs.min_window_size[0]
Window.minimum_height = self.ma.prefs.min_window_size[1]
self.cont = []
for i, el in enumerate(self.contents):
print('el', el)
self.cont.append(el(main_app=self.ma))
self.add_widget(self.cont[i].wid)
if self.full_screen:
self.do_full_screen()
self.app_cursor = AppCursor()
def re_build_elements(self):
self.clear_widgets()
if self.full_screen:
self.add_widget(self.ma.projects_wid)
else:
for index, el in enumerate(self.contents):
self.add_widget(self.cont[index].wid)
def update_projects(self):
for proj in self.ma.projects:
if proj.screen not in self.ma.sm.children:
self.ma.sm.add_widget(proj.screen)
self.ma.sm.current = proj.screen.name
def do_full_screen(self):
self.full_screen = True
self.re_build_elements()
Window.fullscreen = True
Window.maximize()
def end_full_screen(self):
Window.fullscreen = False
self.full_screen = False
self.re_build_elements()
def toggle_full_screen(self):
if self.full_screen:
self.end_full_screen()
else: self.do_full_screen()
| StarcoderdataPython |
3314960 | <gh_stars>1-10
# import nltk
# from nltk.corpus import stopwords
# from nltk.stem import WordNetLemmatizer
import csv
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn import tree
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
# 预处理
def preprocessing(text):
# # text=text.decode("utf-8")
# tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
# stops = stopwords.words('english')
# tokens = [token for token in tokens if token not in stops]
#
# tokens = [token.lower() for token in tokens if len(token) >= 3]
# lmtzr = WordNetLemmatizer()
# tokens = [lmtzr.lemmatize(token) for token in tokens]
# preprocessed_text = ' '.join(tokens)
preprocessed_text = text
return preprocessed_text
# 读取数据集
file_path = r'C:\Users\Administrator\Desktop\SMSSpamCollectionjsn.txt'
sms = open(file_path, 'r', encoding='utf-8')
sms_data = []
sms_label = []
csv_reader = csv.reader(sms, delimiter='\t')
for line in csv_reader:
sms_label.append(line[0])
sms_data.append(preprocessing(line[1]))
sms.close()
# print(sms_data)
# 按0.7:0.3比例分为训练集和测试集,再将其向量化
dataset_size = len(sms_data)
trainset_size = int(round(dataset_size * 0.7))
print('dataset_size:', dataset_size, ' trainset_size:', trainset_size)
x_train = np.array([''.join(el) for el in sms_data[0:trainset_size]])
y_train = np.array(sms_label[0:trainset_size])
x_test = np.array(sms_data[trainset_size + 1:dataset_size])
y_test = np.array(sms_label[trainset_size + 1:dataset_size])
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english', strip_accents='unicode', norm='l2')
X_train = vectorizer.fit_transform(x_train)
X_test = vectorizer.transform(x_test)
# 朴素贝叶斯分类器
clf = MultinomialNB().fit(X_train, y_train)
y_nb_pred = clf.predict(X_test)
print(y_nb_pred)
print('nb_confusion_matrix:')
cm = confusion_matrix(y_test, y_nb_pred)
print(cm)
print('nb_classification_report:')
cr = classification_report(y_test, y_nb_pred)
print(cr)
feature_names = vectorizer.get_feature_names()
coefs = clf.coef_
intercept = clf.intercept_
coefs_with_fns = sorted(zip(coefs[0], feature_names))
n = 10
top = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top:
print('\t%.4f\t%-15s\t\t%.4f\t%-15s' % (coef_1, fn_1, coef_2, fn_2))
# 决策树
clf = tree.DecisionTreeClassifier().fit(X_train.toarray(), y_train)
y_tree_pred = clf.predict(X_test.toarray())
print('tree_confusion_matrix:')
cm = confusion_matrix(y_test, y_tree_pred)
print(cm)
print('tree_classification_report:')
print(classification_report(y_test, y_tree_pred))
# SGD
clf = SGDClassifier(alpha=0.0001, n_iter=50).fit(X_train, y_train)
y_SGD_pred = clf.predict(X_test)
print('SGD_confusion_matrix:')
cm = confusion_matrix(y_test, y_SGD_pred)
print(cm)
print('SGD_classification_report:')
print(classification_report(y_test, y_SGD_pred))
# svm
clf = LinearSVC().fit(X_train, y_train)
y_svm_pred = clf.predict(X_test)
print('svm_confusion_matrix:')
cm = confusion_matrix(y_test, y_svm_pred)
print(cm)
print('svm_classification_report:')
print(classification_report(y_test, y_svm_pred))
# RandomForestClassifier
clf = RandomForestClassifier(n_estimators=10)
clf.fit(X_train, y_train)
y_RF_pred = clf.predict(X_test)
print('RF_confusion_matrix:')
print(confusion_matrix(y_test, y_RF_pred))
print('RF_classification_report:')
print(classification_report(y_test, y_RF_pred))
| StarcoderdataPython |
3209319 | <filename>nvk_ds/__init__.py
"""
The ``Prefect`` small datasources data package.
"""
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.