id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
101568 | <reponame>BAMresearch/ctsimu-toolbox
import os # File and path handling
import numpy
import copy
from ..image import *
from ..helpers import *
from .pipeline import Pipeline
from .step import Step
class Step_Noise(Step):
""" Add noise to image according to SNR characteristics.
The noise characteristics must be specified by two lists:
1. grey values
2. assigned SNRs
The characteristics must be sorted by grey values in ascending order.
Linear interpolation will take place for missing grey values.
"""
def __init__(self, sigma=None, greyValues=None, SNR=None):
Step.__init__(self, "Noise")
self.sigma = None
self.greyValues = None
self.SNR = None
self.setSigma(sigma)
self.setNoiseCharacteristics(greyValues, SNR)
def setSigma(self, sigma=None):
if sigma is None:
self.sigma = 0
else:
self.sigma = sigma
def setNoiseCharacteristics(self, greyValues=None, SNR=None):
self.greyValues = greyValues
self.SNR = SNR
def prepare(self):
""" Nothing to prepare for this module. """
if isinstance(self.pipe, Pipeline):
self.prepared = True
return
self.prepared = False
raise Exception("Step must be part of a processing pipeline before it can prepare.")
def run(self, image):
""" Transform given image. """
sigma = copy.deepcopy(image)
if self.greyValues is None or self.SNR is None:
# Assign constant sigma to each pixel:
sigma.erase(self.sigma)
else:
# Map grey values to SNR:
sigma.map(gv_from=self.greyValues, gv_to=self.SNR, bins=1000)
# Calculate sigma from sigma = I / SNR where SNR>0:
sigma.px = numpy.where(sigma.px > 0, image.px / sigma.px, 0)
image.noise(sigma.px)
return image | StarcoderdataPython |
97594 | <filename>Basic/__main2.py
# import __main
print("Hello, Python welcomes you!!! " + __name__) | StarcoderdataPython |
1769429 | from .biothings_transformer import BioThingsTransformer
class SemmedTransformer(BioThingsTransformer):
def wrap(self, res):
result = {}
for pred, val in res.items():
tmp = []
if isinstance(val, list) and len(val) > 0:
for item in val:
if item["@type"] == self.edge["association"]["output_type"] or (
item["@type"] == "DiseaseOrPhenotypicFeature"
and self.edge["association"]["output_type"] == "Disease"
):
item["UMLS"] = item.pop("umls")
item["pubmed"] = item.pop("pmid")
tmp.append(item)
if len(tmp) > 0:
result[pred] = tmp
return result
def jsonTransform(self, res):
return res
| StarcoderdataPython |
1656050 | import day14.src as src
def test_run_race():
results = src.run_race(src.load_data(src.TEST_INPUT_FILE), 1000)
assert results['Comet'] == 1120
assert results['Dancer'] == 1056
def test_part1():
assert src.part1(src.TEST_INPUT_FILE, 1000) == 1120
def test_part1_full():
assert src.part1(src.FULL_INPUT_FILE) == 2696
def test_score_race():
results = src.score_race(src.load_data(src.TEST_INPUT_FILE), 1000)
assert results['Comet'] == 312
assert results['Dancer'] == 689
def test_part2():
assert src.part2(src.TEST_INPUT_FILE, 1000) == 689
def test_part2_full():
assert src.part2(src.FULL_INPUT_FILE) == 1084
| StarcoderdataPython |
3268396 | """Utils for ViViT-based regression models."""
from typing import Any
from absl import logging
import flax
import jax.numpy as jnp
import ml_collections
import numpy as np
from scenic.common_lib import debug_utils
from scenic.projects.vivit import model_utils as vivit_model_utils
average_frame_initializer = vivit_model_utils.average_frame_initializer
central_frame_initializer = vivit_model_utils.central_frame_initializer
def initialise_from_train_state(
config,
train_state: Any,
restored_train_state: Any,
restored_model_cfg: ml_collections.ConfigDict,
restore_output_proj: bool,
vivit_transformer_key: str = 'Transformer',
log_initialised_param_shapes: bool = True) -> Any:
"""Updates the train_state with data from restored_train_state.
We do not reuse this from vivit/model_utils in order to handle position
embeddings and input embeddings differently in init_posemb and
init_embedding, respectively.
This function is written to be used for 'fine-tuning' experiments. Here, we
do some surgery to support larger resolutions (longer sequence length) in
the transformer block, with respect to the learned pos-embeddings.
Args:
config: Configurations for the model being updated.
train_state: A raw TrainState for the model.
restored_train_state: A TrainState that is loaded with parameters/state of a
pretrained model.
restored_model_cfg: Configuration of the model from which the
restored_train_state come from. Usually used for some asserts.
restore_output_proj: If true, load the final output projection. Set to False
if finetuning to a new dataset.
vivit_transformer_key: The key used for storing the subtree in the
parameters that keeps Transformer weights, that are supposed to be
initialized from the given pre-trained model.
log_initialised_param_shapes: If true, print tabular summary of all the
variables in the model once they have been initialised.
Returns:
Updated train_state.
"""
# Inspect and compare the parameters of the model with the init-model.
params = flax.core.unfreeze(train_state.optimizer.target)
if config.init_from.get('checkpoint_format', 'scenic') == 'big_vision':
restored_params = restored_train_state.optimizer['target']
else:
restored_params = restored_train_state.optimizer.target
restored_params = flax.core.unfreeze(restored_params)
# Start moving parameters, one-by-one and apply changes if needed.
for m_key, m_params in restored_params.items():
if m_key == 'output_projection':
if restore_output_proj:
params[m_key] = m_params
else:
logging.info('Not restoring output projection.')
pass
elif m_key == 'pre_logits':
if config.model.representation_size is None:
# We don't have representation_size in the new model, so let's ignore
# if from the pretained model, in case it has it.
# Note, removing the key from the dictionary is necessary to prevent
# obscure errors from the Flax optimizer.
params.pop(m_key, None)
else:
assert restored_model_cfg.model.representation_size
params[m_key] = m_params
elif m_key in {'Transformer', 'SpatialTransformer', 'TemporalTransformer'}:
key_to_load = vivit_transformer_key
is_temporal = False
if m_key == 'TemporalTransformer':
key_to_load = m_key
is_temporal = True
for tm_key, tm_params in m_params.items():
if tm_key == 'posembed_input': # Might need resolution change.
init_posemb(params[key_to_load], m_params, config, restored_model_cfg,
is_temporal=is_temporal)
elif 'encoderblock' in tm_key:
vivit_model_utils.init_encoderblock(
params[key_to_load], m_params, tm_key, config)
else: # Other parameters of the Transformer encoder.
params[key_to_load][tm_key] = tm_params
elif m_key == 'embedding':
init_embedding(params, m_params, config)
else:
if m_key in train_state.optimizer.target:
params[m_key] = m_params
else:
logging.info('Skipping %s. In restored model but not in target', m_key)
if log_initialised_param_shapes:
logging.info('Parameter summary after initialising from train state')
debug_utils.log_param_shapes(params)
return train_state.replace(
optimizer=train_state.optimizer.replace(target=flax.core.freeze(params)))
def init_posemb(to_params, from_params, config, restored_model_cfg,
is_temporal):
"""Initialize the positional embeddings."""
if config.init_from.restore_positional_embedding:
posemb = to_params['posembed_input']['pos_embedding']
restored_posemb = from_params['posembed_input']['pos_embedding']
if restored_posemb.shape != posemb.shape:
# Rescale the grid of pos, embeddings.
# Default parameter shape is (1, N, 768)
logging.info('Adapting positional embeddings from %s to %s',
restored_posemb.shape, posemb.shape)
ntok = posemb.shape[1]
if restored_model_cfg.model.classifier == 'token':
# The first token is the CLS token.
cls_tok = restored_posemb[:, :1]
restored_posemb_grid = restored_posemb[0, 1:]
else:
cls_tok = restored_posemb[:, :0]
restored_posemb_grid = restored_posemb[0]
if config.model.classifier == 'token':
ntok -= 1
if ((config.model.classifier == 'token') !=
(restored_model_cfg.model.classifier == 'token')):
logging.warning('Only one of target and restored model uses'
'classification token')
if restored_posemb_grid == ntok:
# In case the following `if` is not going to run, lets add batch dim:
restored_posemb = restored_posemb_grid[None, ...]
if len(restored_posemb_grid) != ntok: # We need a resolution change.
if is_temporal:
if config.init_from.restore_temporal_embedding_for_goal:
restored_posemb_grid = (
vivit_model_utils.interpolate_1d_positional_embeddings(
restored_posemb_grid, ntok))
else:
restored_posemb_grid = (
vivit_model_utils.interpolate_1d_positional_embeddings(
restored_posemb_grid, ntok - 1))
elif config.init_from.positional_embed_size_change == 'resize':
restored_posemb_grid = (
vivit_model_utils.interpolate_positional_embeddings(
restored_posemb_grid, ntok))
elif config.init_from.positional_embed_size_change == 'tile':
restored_posemb_grid = (
vivit_model_utils.tile_positional_embeddings(
restored_posemb_grid, ntok))
elif config.init_from.positional_embed_size_change == 'resize_tile':
temp_encoding = config.model.temporal_encoding_config
if temp_encoding.method == 'temporal_sampling':
tokens_per_frame = int(ntok / temp_encoding.n_sampled_frames)
elif temp_encoding.method == '3d_conv':
n_frames = (
config.dataset_configs.num_frames //
config.model.patches.size[2])
tokens_per_frame = ntok // n_frames
else:
raise AssertionError(
f'Unknown temporal encoding {temp_encoding.method}')
restored_posemb_grid = (
vivit_model_utils.interpolate_positional_embeddings(
restored_posemb_grid, tokens_per_frame))
restored_posemb_grid = restored_posemb_grid[0]
restored_posemb_grid = vivit_model_utils.tile_positional_embeddings(
restored_posemb_grid, ntok)
else:
raise AssertionError(
'Unknown positional embedding size changing method')
# Attach the CLS token again.
if config.model.classifier == 'token':
restored_posemb = jnp.array(
np.concatenate([cls_tok, restored_posemb_grid], axis=1))
else:
restored_posemb = restored_posemb_grid
if is_temporal and not config.init_from.restore_temporal_embedding_for_goal:
logging.info('Not restoring temporal embedding for goal')
restored_posemb = jnp.array(
np.concatenate(
[restored_posemb,
to_params['posembed_input']['pos_embedding'][:, -1:]], axis=1))
to_params['posembed_input']['pos_embedding'] = restored_posemb
else:
logging.info('Not restoring positional encodings from pretrained model')
def init_embedding(to_params, from_params, config):
"""Initialize input embedding."""
if config.init_from.get('restore_input_embedding', True):
input_kernel = to_params['embedding']['kernel']
restored_kernel = from_params['kernel']
restored_bias = from_params['bias']
if input_kernel.shape != restored_kernel.shape:
# Kernel dimensions are [t, h, w, c_in, c_out].
# assert config.model.temporal_encoding_config.method == '3d_conv', (
# 'Input kernel dimensions should only differ if 3d_conv is the'
# 'temporal encoding method')
assert (input_kernel.shape[1:] == restored_kernel.shape
or input_kernel.shape[1:] == restored_kernel.shape[1:]), (
'All filter dimensions besides the temporal dimension should '
'be equal. {} vs {}'.format(
input_kernel.shape, restored_kernel.shape))
kernel_init_method = config.model.temporal_encoding_config.kernel_init_method
if kernel_init_method == 'reduce_mean_initializer':
logging.info('Initializing 2D input kernel with mean temporal frame.')
restored_kernel = np.mean(restored_kernel, axis=0)
restored_kernel = np.expand_dims(restored_kernel, axis=0)
elif kernel_init_method == 'reduce_sum_initializer':
logging.info(
'Initializing 2D input kernel with sum of temporal frames.')
restored_kernel = np.sum(restored_kernel, axis=0)
restored_kernel = np.expand_dims(restored_kernel, axis=0)
elif kernel_init_method == 'last_frame_initializer':
logging.info('Initializing 2D input kernel with last temporal frame.')
restored_kernel = restored_kernel[:1]
else:
raise AssertionError(
'Unknown input kernel initialization {}'.format(kernel_init_method))
to_params['embedding']['kernel'] = restored_kernel
to_params['embedding']['bias'] = restored_bias
else:
logging.info('Not restoring input embedding parameters')
| StarcoderdataPython |
4821925 | from flask import Blueprint
web_module = Blueprint(
"web",
__name__,
url_prefix = "",
template_folder = "templates",
static_folder = "web_static"
)
from . import views | StarcoderdataPython |
154840 | #!/usr/bin/env python3
# EasyGoPiGo3 documentation: https://gopigo3.readthedocs.io/en/latest
#
########################################################################
# This example demonstrates using the distance sensor with the GoPiGo
# In this examples, the GoPiGo keeps reading from the distance sensor
# When it close to the an obstacle, it stops.
#
# http://www.dexterindustries.com/GoPiGo/
# Copyright (c) 2017 Dexter Industries Released under the MIT license
# History
# ------------------------------------------------
# Author Date Comments
# Karan 21 Aug 14 Initial Authoring
# Loring 10/12/21 Convert to EasyGoPiGo3, test with Python 3.5
#
########################################################################
#
# ! Attach Distance sensor to I2C Port.
#
########################################################################
#--------------------------------- IMPORTS -------------------------------------#
import time # Import time library for sleep function
import easygopigo3 as easy # Import the GoPiGo3 library
gpg = easy.EasyGoPiGo3() # Create a EasyGoPiGo3 object
# Initialize a distance sensor object
distance_sensor = gpg.init_distance_sensor()
# Initialize a servo object on Servo Port 1
servo = gpg.init_servo("SERVO1")
# Set servo pointing straight ahead at 90 degrees
# You may have to change the degrees to adapt to your servo
# All servos line up slightly differently
# Less than 90 moves the servo to the right
# Greater than 90 moves the servo to the left
servo.rotate_servo(90)
gpg.set_speed(200) # Set initial speed
AVOIDANCE_DISTANCE = 12 # Distance in inches from obstacle where the GoPiGo should stop
def main():
print("Press ENTER to start")
input() # Wait for input to start
gpg.forward() # Start moving forward, GoPiGo will continue moving forward until it receives another movement command
running = True # Boolean/flag to control the while loop
while running == True: # Loop while running == True
dist = distance_sensor.read_inches() # Find the distance of the object in front
print("Dist:", dist, 'inches') # Print feedback to the console
# If the object is closer than the "distance_to_stop" distance, stop the GoPiGo
if dist < AVOIDANCE_DISTANCE:
print("Stopping") # Print feedback to the console
gpg.stop() # Stop the GoPiGo
running = False # Set running to false to break out of the loop
# sleep is blocking code, nothing else can happen during sleep
time.sleep(.1) # 100 milliseconds
# If a standalone program, call the main function
# Else, use as a module
if __name__ == '__main__':
main()
| StarcoderdataPython |
85573 | <filename>portal/apps/core/utils.py
# -*- coding: utf-8 -*-
import re
from time import timezone
from datetime import datetime, tzinfo, timedelta
from dateutil import tz
from math import copysign
from os import path
import pytz
from pytz import country_timezones, country_names
import requests
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.utils.deconstruct import deconstructible
class TZ(tzinfo):
def utcoffset(self, dt):
return timedelta(
seconds=int(copysign(timezone, int(datetime.now(pytz.timezone(settings.TIME_ZONE)).strftime('%z'))))
)
tz_obj = TZ()
def datetime_isoformat(dt):
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, 0, tz_obj).isoformat()
def datetime_timezone():
local_tz, dt = tz.gettz(settings.TIME_ZONE), datetime.now()
dt = datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, 0, local_tz)
timezone_countries = {
timezone: country for country, timezones in country_timezones.iteritems() for timezone in timezones
}
tz_name = dt.strftime(u'%Z')
result = [tz_name if tz_name[0].isalpha() else u'GMT' + tz_name]
tz_parts = settings.TIME_ZONE.split(u'/')
if len(tz_parts) > 1:
result.append(tz_parts[-1])
try:
result.append(country_names[timezone_countries[settings.TIME_ZONE]])
except KeyError:
pass
return u'(%s)' % u', '.join(result)
def get_pdf_pdf_upload_to(instance, filename):
try:
publication_slug = instance.publication.slug
except AttributeError:
publication_slug = instance.edition.publication.slug
timestamp = instance.date_published.strftime('%Y%m%d')
return path.join(
'editions', publication_slug, timestamp, instance.get_pdf_filename())
def get_pdf_cover_upload_to(instance, filename):
timestamp = instance.date_published.strftime('%Y%m%d')
return path.join(
'editions', instance.publication.slug, timestamp,
instance.get_cover_filename())
def get_supplement_directory(instance):
if instance.edition:
date_strftime = instance.edition.date_published.strftime('%Y%m%d')
directory = path.join('editions', date_strftime, 'supplement')
else:
date_strftime = instance.date_created.strftime('%Y%m%d')
directory = path.join('supplements', date_strftime)
return directory
def get_supplement_pdf_upload_to(instance, filename):
directory = get_supplement_directory(instance)
name = instance.slug.replace('-', '_')
return path.join(directory, '%s.pdf' % name)
def get_pdfpage_pdf_upload_to(instance, filename):
pass
def get_pdfpage_snapshot_upload_to(instance, filename):
pass
def get_pdfpageimage_file_upload_to(instance, filename):
pass
def add_punctuation(text):
valid_chars = u'AÁBCDEÉFGHIÍJKLMNÑOÓPQRSTUÚVWXYZaábcdeéfghiíjklmnñoópqrstuúvwxyz0123456789"'
if text != '':
if text[-1] in valid_chars:
return u'%s.' % text
return text
def update_article_url_in_coral_talk(article_id, new_url_path):
requests.post(
settings.TALK_URL + 'api/graphql',
headers={'Content-Type': 'application/json', 'Authorization': 'Bearer ' + settings.TALK_API_TOKEN},
data='{"operationName":"updateStory","variables":{"input":{"id":%d,"story":{"url":"%s://%s%s"}'
',"clientMutationId":"url updated"}},"query":"mutation updateStory($input: UpdateStoryInput!)'
'{updateStory(input:$input){story{id}}}"}' % (
article_id, settings.URL_SCHEME, settings.SITE_DOMAIN, new_url_path),
).json()['data']['updateStory']['story']
@deconstructible
class CT(object):
__content_type_id__ = None
def contenttype_id(self):
if not self.__class__.__content_type_id__:
self.__class__.__content_type_id__ = \
ContentType.objects.get_for_model(self).pk
return self.__class__.__content_type_id__
def __eq__(self, other):
return self.__content_type_id__ == other.__content_type_id__
def smart_quotes(value):
value = re.sub(r"(?![^<>]*>)(\")\b", u"“", value)
value = re.sub(r"\b(?![^<>]*>)(\")", u"”", value)
value = re.sub(ur"\"(?=[¿¡\‘\'\(\[ÑÁÉÍÓÚñáéíóú])", u"“", value)
value = re.sub(ur"(?<=[?!\’\'\)ÑÁÉÍÓÚñáéíóú\.\%\]])\"", u"”", value)
return value
| StarcoderdataPython |
1780185 | class Config(object):
ENVIRONMENT = None
DEBUG = False
TESTING = False
| StarcoderdataPython |
1634946 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from torch.nn import init
import math
from net import MLP, StateTransition
class GNN(nn.Module):
def __init__(self, config, state_net=None, out_net=None):
super(GNN, self).__init__()
self.config = config
# hyperparameters and general properties
self.convergence_threshold = config.convergence_threshold
self.max_iterations = config.max_iterations
self.n_nodes = config.n_nodes
self.state_dim = config.state_dim
self.label_dim = config.label_dim
self.output_dim = config.output_dim
self.state_transition_hidden_dims = config.state_transition_hidden_dims
self.output_function_hidden_dims = config.output_function_hidden_dims
# node state initialization
# self.node_state = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device) # (n,d_n)
self.node_state = torch.rand(*[self.n_nodes, self.state_dim]).to(self.config.device) # (n,d_n)
self.converged_states = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device)
# state and output transition functions
if state_net is None:
self.state_transition_function = StateTransition(self.state_dim, self.label_dim,
mlp_hidden_dim=self.state_transition_hidden_dims,
activation_function=config.activation)
else:
self.state_transition_function = state_net
if out_net is None:
self.output_function = MLP(self.state_dim, self.output_function_hidden_dims, self.output_dim)
else:
self.output_function = out_net
self.graph_based = self.config.graph_based
def reset_parameters(self):
self.state_transition_function.mlp.init()
self.output_function.init()
def forward(self,
edges,
agg_matrix,
node_labels,
node_states=None,
graph_agg=None
):
n_iterations = 0
# convergence loop
# state initialization
node_states = self.node_state if node_states is None else node_states
# while n_iterations < self.max_iterations:
# with torch.no_grad(): # without memory consumption
# new_state = self.state_transition_function(node_states, node_labels, edges, agg_matrix)
# n_iterations += 1
# # convergence condition
#
# # if torch.dist(node_states, new_state) < self.convergence_threshold: # maybe uses broadcst?
# # break
# # with torch.no_grad():
# # distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
# distance = torch.norm(input=new_state - node_states,
# dim=1) # checked, they are the same (in cuda, some bug)
# #
# # diff =torch.norm(input=new_state - node_states, dim=1) - torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) )
#
# check_min = distance < self.convergence_threshold
# node_states = new_state
#
# if check_min.all():
# break
# node_states = self.state_transition_function(node_states, node_labels, edges, agg_matrix) # one more to propagate gradient only on last
while n_iterations < self.max_iterations:
new_state = self.state_transition_function(node_states, node_labels, edges, agg_matrix)
n_iterations += 1
# convergence condition
with torch.no_grad():
# distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
distance = torch.norm(input=new_state - node_states,
dim=1) # checked, they are the same (in cuda, some bug)
check_min = distance < self.convergence_threshold
node_states = new_state
if check_min.all():
break
states = node_states
self.converged_states = states
if self.graph_based:
states = torch.matmul(graph_agg, node_states)
output = self.output_function(states)
return output, n_iterations
| StarcoderdataPython |
1752013 | from __future__ import print_function
############################################################################################
#
# The MIT License (MIT)
#
# Intel AI DevJam IDC Demo Classification Server
# Copyright (C) 2018 <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Title: IDC Classification DevCloud Trainer
# Description: Trains a custom Inception V3 model for classification of invasive ductal carcinoma (IDC).
# Acknowledgements: Uses code from chesterkuo imageclassify-movidius (https://github.com/chesterkuo/imageclassify-movidius)
# Uses data from paultimothymooney Predict IDC in Breast Cancer Histology Images (Kaggle)
# Config: Configuration can be found in required/confs.json
# Last Modified: 2018-08-07
#
# Usage:
#
# $ python3.5 Trainer.py DataSort
# $ python3.5 Trainer.py Train
#
############################################################################################
print("")
print("")
print("!! Welcome to the IDC Classification DevCloud Trainer, please wait while the program initiates !!")
print("")
import os, sys
print("-- Running on Python "+sys.version)
print("")
import time, math, random, json, glob
import tools.inception_preprocessing
from sys import argv
from datetime import datetime
import tensorflow as tf
import numpy as np
from builtins import range
from tools.inception_v3 import inception_v3, inception_v3_arg_scope
from tools.DataSort import DataSort
from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.framework import graph_util
slim = tf.contrib.slim
print("-- Imported Required Modules")
print("")
config = tf.ConfigProto(intra_op_parallelism_threads=12, inter_op_parallelism_threads=2, allow_soft_placement=True, device_count = {'CPU': 12})
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["OMP_NUM_THREADS"] = "12"
os.environ["KMP_BLOCKTIME"] = "30"
os.environ["KMP_SETTINGS"] = "1"
os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0"
print("-- Setup Environment Settings")
print("")
class Trainer():
def __init__(self):
self._confs = {}
self.labelsToName = {}
with open('model/confs.json') as confs:
self._confs = json.loads(confs.read())
#Creates a Dataset class providing TFRecord files to feed in the examples into a queue in parallel.
def getSplit(self, split_name):
'''
Obtains the split - training or validation - to create a Dataset class for feeding the examples into a queue later on. This function will
set up the decoder and dataset information all into one Dataset class so that you can avoid the brute work later on.
Your file_pattern is very important in locating the files later.
INPUTS:
- split_name(str): 'train' or 'validation'. Used to get the correct data split of tfrecord files
OUTPUTS:
- dataset (Dataset): A Dataset class object where we can read its various components for easier batch creation later.
'''
#Check whether the split_name is train or validation
if split_name not in ['train', 'validation']:
raise ValueError('The split_name %s is not recognized. Please input either train or validation as the split_name' % (split_name))
#Create the full path for a general file_pattern to locate the tfrecord_files
file_pattern_path = os.path.join(self._confs["ClassifierSettings"]["dataset_dir"], self._confs["ClassifierSettings"]["file_pattern"] % (split_name))
#Count the total number of examples in all of these shard
num_samples = 0
file_pattern_for_counting = '200label_' + split_name
tfrecords_to_count = [os.path.join(self._confs["ClassifierSettings"]["dataset_dir"], file) for file in os.listdir(self._confs["ClassifierSettings"]["dataset_dir"]) if file.startswith(file_pattern_for_counting)]
#print(tfrecords_to_count)
for tfrecord_file in tfrecords_to_count:
for record in tf.python_io.tf_record_iterator(tfrecord_file):
num_samples += 1
#Create a reader, which must be a TFRecord reader in this case
reader = tf.TFRecordReader
#Create the keys_to_features dictionary for the decoder
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='jpg'),
'image/class/label': tf.FixedLenFeature(
[], tf.int64, default_value=tf.zeros([], dtype=tf.int64)),
}
#Create the items_to_handlers dictionary for the decoder.
items_to_handlers = {
'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
}
#Start to create the decoder
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
#Create the labels_to_name file
labels_to_name_dict = self.labelsToName
#Actually create the dataset
dataset = slim.dataset.Dataset(
data_sources = file_pattern_path,
decoder = decoder,
reader = reader,
num_readers = 4,
num_samples = num_samples,
num_classes = self._confs["ClassifierSettings"]["num_classes"],
labels_to_name = labels_to_name_dict,
items_to_descriptions = self.items_to_descriptions)
return dataset
def loadBatch(self, dataset, is_training=True):
'''
Loads a batch for training.
INPUTS:
- dataset(Dataset): a Dataset class object that is created from the get_split function
- batch_size(int): determines how big of a batch to train
- height(int): the height of the image to resize to during preprocessing
- width(int): the width of the image to resize to during preprocessing
- is_training(bool): to determine whether to perform a training or evaluation preprocessing
OUTPUTS:
- images(Tensor): a Tensor of the shape (batch_size, height, width, channels) that contain one batch of images
- labels(Tensor): the batch's labels with the shape (batch_size,) (requires one_hot_encoding).
'''
#First create the data_provider object
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
common_queue_capacity = 24 + 3 * self._confs["ClassifierSettings"]["batch_size"],
common_queue_min = 24)
#Obtain the raw image using the get method
raw_image, label = data_provider.get(['image', 'label'])
#Perform the correct preprocessing for this image depending if it is training or evaluating
image = tools.inception_preprocessing.preprocess_image(raw_image, self._confs["ClassifierSettings"]["image_size"], self._confs["ClassifierSettings"]["image_size"], is_training)
#As for the raw images, we just do a simple reshape to batch it up
raw_image = tf.image.resize_image_with_crop_or_pad(raw_image, self._confs["ClassifierSettings"]["image_size"], self._confs["ClassifierSettings"]["image_size"])
#Batch up the image by enqueing the tensors internally in a FIFO queue and dequeueing many elements with tf.train.batch.
images, raw_images, labels = tf.train.batch(
[image, raw_image, label],
batch_size = self._confs["ClassifierSettings"]["batch_size"],
num_threads = 4,
capacity = 4 * self._confs["ClassifierSettings"]["batch_size"],
allow_smaller_final_batch = True)
return images, raw_images, labels
def sort(self):
humanStart = datetime.now()
clockStart = time.time()
print("-- Loading & Preparing Training Data ")
print("-- STARTED: ", humanStart)
print("")
photoPaths, classes = DataSort.processFilesAndClasses()
class_id = [int(i) for i in classes]
class_names_to_ids = dict(zip(classes, class_id))
num_validation = int(self._confs["ClassifierSettings"]["validation_size"] * len(photoPaths))
print('\n Checking ')
print((class_names_to_ids))
print(len(class_names_to_ids))
sys.stdout.write('\n num_validation: %d, num class number %d \n' % ( num_validation, len(classes) ))
sys.stdout.flush()
# Divide the training datasets into train and test:
random.seed(self._confs["ClassifierSettings"]["random_seed"])
random.shuffle(photoPaths)
training_filenames = photoPaths[num_validation:]
validation_filenames = photoPaths[:num_validation]
#print(training_filenames)
#print(validation_filenames)
#print(class_names_to_ids)
# First, convert the training and validation sets.
DataSort.convertToTFRecord('train', training_filenames, class_names_to_ids)
DataSort.convertToTFRecord('validation', validation_filenames, class_names_to_ids)
# Finally, write the labels file:
labels_to_class_names = dict(zip(class_id, classes))
#print(labels_to_class_names)
#labels_to_class_names = dict(zip(range(len(class_names)), class_names))
DataSort.writeLabels(labels_to_class_names)
humanEnd = datetime.now()
clockEnd = time.time()
print('\nFinished converting the %s dataset!' % (self._confs["ClassifierSettings"]["tfrecord_filename"]))
print("")
print("-- Loaded & Prepared Training Data ")
print("-- ENDED: ", humanEnd)
print("-- TIME: {0}".format(clockEnd - clockStart))
print("")
Trainer = Trainer()
DataSort = DataSort()
def run():
humanStart = datetime.now()
clockStart = time.time()
print("-- Training Starting ")
print("-- STARTED: ", humanStart)
print("")
#Open the labels file
Trainer.labels = open(Trainer._confs["ClassifierSettings"]["labels_file"], 'r')
#Create a dictionary to refer each label to their string name
for line in Trainer.labels:
label, string_name = line.split(':')
string_name = string_name[:-1] #Remove newline
Trainer.labelsToName[int(label)] = string_name
#Create a dictionary that will help people understand your dataset better. This is required by the Dataset class later.
Trainer.items_to_descriptions = {
'image': 'A 3-channel RGB coloured image that is ex: office, people',
'label': 'A label that ,start from zero'
}
#Create the log directory here. Must be done here otherwise import will activate this unneededly.
if not os.path.exists(Trainer._confs["ClassifierSettings"]["log_dir"]):
os.mkdir(Trainer._confs["ClassifierSettings"]["log_dir"])
#======================= TRAINING PROCESS =========================
#Now we start to construct the graph and build our model
with tf.Graph().as_default() as graph:
tf.logging.set_verbosity(tf.logging.INFO) #Set the verbosity to INFO level
#First create the dataset and load one batch
dataset = Trainer.getSplit('train')
images, _, labels = Trainer.loadBatch(dataset)
#Know the number steps to take before decaying the learning rate and batches per epoch
num_batches_per_epoch = dataset.num_samples // Trainer._confs["ClassifierSettings"]["batch_size"]
num_steps_per_epoch = num_batches_per_epoch #Because one step is one batch processed
decay_steps = int(Trainer._confs["ClassifierSettings"]["num_epochs_before_decay"] * num_steps_per_epoch)
#Create the model inference
with slim.arg_scope(inception_v3_arg_scope()):
logits, end_points = inception_v3(images, num_classes = dataset.num_classes, is_training = True)
#Perform one-hot-encoding of the labels (Try one-hot-encoding within the load_batch function!)
one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
#Performs the equivalent to tf.nn.sparse_softmax_cross_entropy_with_logits but enhanced with checks
loss = tf.losses.softmax_cross_entropy(onehot_labels = one_hot_labels, logits = logits)
total_loss = tf.losses.get_total_loss() #obtain the regularization losses as well
#Create the global step for monitoring the learning_rate and training.
global_step = get_or_create_global_step()
#Define your exponentially decaying learning rate
lr = tf.train.exponential_decay(
learning_rate = Trainer._confs["ClassifierSettings"]["initial_learning_rate"],
global_step = global_step,
decay_steps = decay_steps,
decay_rate = Trainer._confs["ClassifierSettings"]["learning_rate_decay_factor"],
staircase = True)
#Now we can define the optimizer that takes on the learning rate
optimizer = tf.train.AdamOptimizer(learning_rate = lr)
#optimizer = tf.train.RMSPropOptimizer(learning_rate = lr, momentum=0.9)
#Create the train_op.
train_op = slim.learning.create_train_op(total_loss, optimizer)
#State the metrics that you want to predict. We get a predictions that is not one_hot_encoded.
predictions = tf.argmax(end_points['Predictions'], 1)
probabilities = end_points['Predictions']
accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, labels)
metrics_op = tf.group(accuracy_update, probabilities)
#Now finally create all the summaries you need to monitor and group them into one summary op.
tf.summary.scalar('losses/Total_Loss', total_loss)
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('learning_rate', lr)
my_summary_op = tf.summary.merge_all()
#Now we need to create a training step function that runs both the train_op, metrics_op and updates the global_step concurrently.
def train_step(sess, train_op, global_step, epochCount):
'''
Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
'''
#Check the time for each sess run
start_time = time.time()
total_loss, global_step_count, _ = sess.run([train_op, global_step, metrics_op])
time_elapsed = time.time() - start_time
#Run the logging to print some results
logging.info(' Epch %.2f Glb Stp %s: Loss: %.4f (%.2f sec/step)', epochCount, global_step_count, total_loss, time_elapsed)
return total_loss, global_step_count
#Define your supervisor for running a managed session. Do not run the summary_op automatically or else it will consume too much memory
sv = tf.train.Supervisor(logdir = Trainer._confs["ClassifierSettings"]["log_dir"], summary_op = None)
#Run the managed session
with sv.managed_session() as sess:
for step in range(num_steps_per_epoch * Trainer._confs["ClassifierSettings"]["dev_cloud_epochs"]):
#At the start of every epoch, show the vital information:
if step % num_batches_per_epoch == 0:
logging.info('Epoch %s/%s', step/num_batches_per_epoch + 1, Trainer._confs["ClassifierSettings"]["dev_cloud_epochs"])
learning_rate_value, accuracy_value = sess.run([lr, accuracy])
logging.info('Current Learning Rate: %s', learning_rate_value)
logging.info('Current Streaming Accuracy: %s', accuracy_value)
# optionally, print your logits and predictions for a sanity check that things are going fine.
logits_value, probabilities_value, predictions_value, labels_value = sess.run([logits, probabilities, predictions, labels])
print('logits: \n', logits_value[:5])
print('Probabilities: \n', probabilities_value[:5])
print('predictions: \n', predictions_value[:100])
print('Labels:\n:', labels_value[:100])
#Log the summaries every 10 step.
if step % 10 == 0:
loss, _ = train_step(sess, train_op, sv.global_step, step/num_batches_per_epoch + 1)
summaries = sess.run(my_summary_op)
sv.summary_computed(sess, summaries)
#If not, simply run the training step
else:
loss, _ = train_step(sess, train_op, sv.global_step, step/num_batches_per_epoch + 1)
#We log the final training loss and accuracy
logging.info('Final Loss: %s', loss)
logging.info('Final Accuracy: %s', sess.run(accuracy))
#Once all the training has been done, save the log files and checkpoint model
logging.info('Finished training! Saving model to disk now.')
checkpoint_file = tf.train.latest_checkpoint(Trainer._confs["ClassifierSettings"]["log_dir"])
with tf.Graph().as_default() as graph:
#images = tf.placeholder(shape=[None, image_size, image_size, 3], dtype=tf.float32, name = 'Placeholder_only')
images = tf.placeholder("float", [1, Trainer._confs["ClassifierSettings"]["image_size"], Trainer._confs["ClassifierSettings"]["image_size"], 3], name="input")
with slim.arg_scope(inception_v3_arg_scope()):
logits, end_points = inception_v3(images, num_classes = Trainer._confs["ClassifierSettings"]["num_classes"], is_training = False)
probabilities = tf.nn.softmax(logits)
saver = tf.train.Saver(slim.get_variables_to_restore())
#Setup graph def
input_graph_def = graph.as_graph_def()
output_node_names = "InceptionV3/Predictions/Softmax"
output_graph_name = "model/DevCloudIDC.pb"
with tf.Session(config=config) as sess:
saver.restore(sess, checkpoint_file)
#Exporting the graph
print ("Exporting graph...")
output_graph_def = graph_util.convert_variables_to_constants(
sess,
input_graph_def,
output_node_names.split(","))
with tf.gfile.GFile(output_graph_name, "wb") as f:
f.write(output_graph_def.SerializeToString())
humanEnd = datetime.now()
clockEnd = time.time()
print("")
print("-- Training Ending ")
print("-- ENDED: ", humanEnd)
print("-- TIME: {0}".format(clockEnd - clockStart))
print("")
def main(argv):
if argv[0] == "Train":
run()
elif argv[0] == "DataSort":
Trainer.sort()
else:
print("**ERROR** Check Your Parameters")
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
157990 | #!/usr/bin/python
# Copyright (C) 2019 EASYSOFT-IN
# All rights exclusively reserved for EASYSOFT-IN,
# unless otherwise expressly agreed.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This file contains class for easin Args Manager
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import argparse
import os
import pprint
import shutil
import sys
import uuid
import bcrypt
import jinja2
import pkg_resources
from jinja2 import BaseLoader
from jinja2 import Environment
from sbr.easinAnalyser import Analyser
from sbr.easinAnalyser import ConfigLoader
from sbr.easinModels import Helper
from sbr.easinModels import Project
from sbr.easinTemplates import templates
DefaultOutput_Dir = "./tmp-out"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Generator Class
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Generator:
"""A Generator class"""
Entity_Template = "entity.java"
EntityRepo_Template = "Repository.java"
Controller_Template = "controller.java"
AuthenticationController_Template = "AuthenticationController.java"
log4j2_Template = "log4j2.xml"
pom_Template = "pom.xml"
Data_Template = "data.sql"
properties_Template = "application.yaml"
application_Template = "Application.java"
CommandInitializer_Template = "CommandInitializer.java"
LoggingFilter_Template = "RequestLoggingFilterConfig.java"
SwaggerConfig_Template = "SwaggerConfiguration.java"
webInitializer_Template = "WebInitializer.java"
BaseEntity_Template = "BaseEntity.java"
IController_Template = "IController.java"
IService_Template = "IService.java"
Service_Template = "Service.java"
ExceptionBad_Template = "ResourceBadParameterException.java"
ExceptionNot_Template = "ResourceNotFoundException.java"
ErrorControl_Template = "MyErrorController.java"
StatusControl_Template = "StatusController.java"
READMEFILE_Template = "README.md"
CrudTest_Template = "CrudUnitTest.java"
HelperTests_Template = "HelperTests.java"
Constants_Template = "Constants.java"
Authorities_Template = "AuthoritiesConstants.java"
WebSecurityConfig_Template = "WebSecurityConfig.java"
TokenProvider_Template = "TokenProvider.java"
RestConfig_Template = "RestConfig.java"
JwtAuthenticationEntryPoint_Template = "JwtAuthenticationEntryPoint.java"
JwtAuthenticationFilter_Template = "JwtAuthenticationFilter.java"
UserService_Template = "UserService.java"
UserEntity_Template = "User.java"
AuthToken_Template = "AuthToken.java"
LoginUser_Template = "LoginUser.java"
Template_Dir = ".templates"
def __init__(
self,
outputDir=None,
verbose=False,
tests=False,
security=True,
project=None,
entities=list(),
conf=None,
):
# Setup some params
if not isinstance(project, Project):
Helper.logger.critical("project must be of Project type !.")
raise TypeError("project must be of Project type !.")
if not outputDir:
Helper.logger.critical("The output Dir must be non null")
raise TypeError("The output Dir must be non null")
self.__project = project
self.outputDir = outputDir
self.verbose = verbose
self.tests = tests
self.security = security
self.entities = entities
self.controllers = list()
self.configuration = conf
# check output Directory
if self.outputDir is DefaultOutput_Dir:
self.outputDir = (
"./" + self.__project.name + "-" + self.__project.version
)
# clean folder Output directory
if os.path.exists(self.outputDir) and os.path.isdir(self.outputDir):
shutil.rmtree(self.outputDir)
# Re create folders
os.makedirs(self.outputDir + Project.JAVA_Dir)
self.__srcdir = (
self.outputDir
+ Project.JAVA_Dir
+ self.__project.package.replace(".", "/")
)
os.makedirs(self.outputDir + Project.Resources_Dir)
# prepare template loader
self.__pathTemplate = os.getcwd() + "/" + Generator.Template_Dir
loader = jinja2.FileSystemLoader(searchpath=self.__pathTemplate)
self.templateEnv = jinja2.Environment(loader=loader)
# generating dome mails
self.mail_prefix = Helper.randomString(5)
self.umail_prefix = Helper.randomString(5)
self.amail = self.mail_prefix + <EMAIL>"
self.umail = self.umail_prefix + <EMAIL>"
def generate(self):
Helper.logger.info("Generate Base ==> ")
self.__GenerateBase()
Helper.logger.info("Generate Entities ==> ")
self.__GenerateEntity()
Helper.logger.info("Generate configurations ==> ")
self.__GenerateConfiguration()
if self.security:
Helper.logger.info("Generate Security ==> ")
self.__GenerateSecurity()
if self.tests:
Helper.logger.info("Generate tests ==> ")
self.__GenerateTests()
def __GenerateEntity(self):
# loop in entities
for ent in self.entities:
# Generate
Helper.logger.debug(
f"> Generating Classes for Entity {ent.name} ."
)
output = self.templateEntity.render(
package=self.__project.package + "." + Project.Entities_folder,
security=self.security,
entity=ent,
).encode("utf-8")
f = open(self.entityDirs + "/" + ent.name + ".java", "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug(
f"> Generating Repository for Entity {ent.name} ."
)
output = self.templateRepo.render(
package=self.__project.package
+ "."
+ Project.Repositories_folder,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ "."
+ ent.name,
entityName=ent.name,
).encode("utf-8")
f = open(
self.entityRepoDirs
+ "/"
+ ent.name
+ Project.Repository_prepend
+ ".java",
"wb",
)
f.write(output)
f.close()
# Generate
Helper.logger.debug(
f"> Generating Controller for Entity {ent.name} ."
)
output = self.templateController.render(
projectPackage=self.__project.package,
security=self.security,
roles=self.__project.securityRoles,
package=self.__project.package
+ "."
+ Project.Controllers_folder,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ "."
+ ent.name,
Servicepackage=self.__project.package
+ "."
+ Project.Services_folder
+ "."
+ ent.name
+ Project.Service_prepend,
entityName=ent.name,
mapping=Project.ApiPrefix + ent.name.lower(),
).encode("utf-8")
f = open(
self.controllersDirs
+ "/"
+ ent.name
+ Project.Controller_prepend
+ ".java",
"wb",
)
f.write(output)
f.close()
# Generate
Helper.logger.debug(
f"> Generating Service for Entity {ent.name} ."
)
output = self.templateService.render(
projectPackage=self.__project.package,
package=self.__project.package + "." + Project.Services_folder,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ "."
+ ent.name,
Repositorypackage=self.__project.package
+ "."
+ Project.Repositories_folder
+ "."
+ ent.name
+ Project.Repository_prepend,
Servicepackage=self.__project.package
+ "."
+ Project.Services_folder
+ "."
+ ent.name
+ Project.Service_prepend,
entityName=ent.name,
entity=ent,
).encode("utf-8")
f = open(
self.servicesDirs
+ "/"
+ ent.name
+ Project.Service_prepend
+ ".java",
"wb",
)
f.write(output)
f.close()
if ent.name == "User":
# Generate and overwrite the user entity, repositories and service
Helper.logger.debug(f"> Generating User Class {ent.name} .")
templateUserEntity = Environment(
loader=BaseLoader()
).from_string(templates[Generator.UserEntity_Template])
output = templateUserEntity.render(
package=self.__project.package
+ "."
+ Project.Entities_folder,
security=self.security,
configConstants=self.__project.package
+ "."
+ Project.Conf_folder,
entity=ent,
).encode("utf-8")
f = open(self.entityDirs + "/" + ent.name + ".java", "wb")
f.write(output)
f.close()
if self.security:
# Generate and overwrite the user service
Helper.logger.debug(
"> Generating User Service file for security profile."
)
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.UserService_Template]
)
output = template.render(
projectPackage=self.__project.package,
package=self.__project.package
+ "."
+ Project.Services_folder,
Repositorypackage=self.__project.package
+ "."
+ Project.Repositories_folder
+ "."
+ ent.name
+ Project.Repository_prepend,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ ".User",
entity=ent,
packageConstants=self.__project.package
+ "."
+ Project.Conf_folder,
Repopackage=self.__project.package
+ "."
+ Project.Repositories_folder
+ ".User"
+ Project.Repository_prepend,
).encode("utf-8")
f = open(
self.servicesDirs
+ "/"
+ Generator.UserService_Template,
"wb",
)
f.write(output)
f.close()
def __GenerateBase(self):
# key used as salt for password
self.key = bcrypt.gensalt()
# Preparing templates
self.templateEntity = Environment(loader=BaseLoader()).from_string(
templates[Generator.Entity_Template]
)
self.templateRepo = Environment(loader=BaseLoader()).from_string(
templates[Generator.EntityRepo_Template]
)
self.templateController = Environment(loader=BaseLoader()).from_string(
templates[Generator.Controller_Template]
)
self.templateService = Environment(loader=BaseLoader()).from_string(
templates[Generator.Service_Template]
)
# Creating dir's for entities , repositories, controllers, services
self.entityDirs = self.__srcdir + "/" + Project.Entities_folder
os.makedirs(self.entityDirs)
self.entityRepoDirs = self.__srcdir + "/" + Project.Repositories_folder
os.makedirs(self.entityRepoDirs)
self.controllersDirs = self.__srcdir + "/" + Project.Controllers_folder
os.makedirs(self.controllersDirs)
self.servicesDirs = self.__srcdir + "/" + Project.Services_folder
os.makedirs(self.servicesDirs)
self.appDirs = self.__srcdir + "/" + Project.Conf_folder
os.makedirs(self.appDirs)
# Generate
Helper.logger.debug("> Generating Base entity file ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.BaseEntity_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Entities_folder
).encode("utf-8")
f = open(self.entityDirs + "/" + Generator.BaseEntity_Template, "wb")
f.write(output)
f.close()
# Generate
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.IController_Template]
)
Helper.logger.debug("> Generating IController .")
output = template.render(
package=self.__project.package + "." + Project.Controllers_folder
).encode("utf-8")
f = open(
self.controllersDirs + "/" + Generator.IController_Template, "wb"
)
f.write(output)
f.close()
# Generate
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.IService_Template]
)
Helper.logger.debug("> Generating IService .")
packageService = self.__project.package + "." + Project.Services_folder
output = template.render(
package=self.__project.package + "." + Project.Services_folder
).encode("utf-8")
f = open(self.servicesDirs + "/IService.java", "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating error Controller file ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.ErrorControl_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Controllers_folder,
project=self.__project,
).encode("utf-8")
f = open(
self.controllersDirs + "/" + Generator.ErrorControl_Template, "wb"
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Status Controller file ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.StatusControl_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Controllers_folder
).encode("utf-8")
f = open(
self.controllersDirs + "/" + Generator.StatusControl_Template, "wb"
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Constants config file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.Constants_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Conf_folder,
key=self.key.decode(),
).encode("utf-8")
f = open(self.appDirs + "/" + Generator.Constants_Template, "wb")
f.write(output)
f.close()
def __GenerateConfiguration(self):
exceptionDirs = self.__srcdir + "/" + Project.Exceptions_folder
os.makedirs(exceptionDirs)
# Generate xml logging configuration
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.log4j2_Template]
)
Helper.logger.debug("> Generating Configuration logger ..")
# Generate
output = template.render(logger=self.configuration).encode("utf-8")
f = open(
self.outputDir + Project.Resources_Dir + Generator.log4j2_Template,
"wb",
)
f.write(output)
f.close()
# Generate configurations file
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.properties_Template]
)
Helper.logger.debug("> Generating Configurations file ..")
# Generate
output = template.render(project=self.__project).encode("utf-8")
f = open(
self.outputDir
+ Project.Resources_Dir
+ Generator.properties_Template,
"wb",
)
f.write(output)
f.close()
# Generate pom configuration
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.pom_Template]
)
Helper.logger.debug("> Generating Configuration pom ..")
# Generate
output = template.render(
pom=self.__project,
startClass=self.__project.package
+ "."
+ Generator.application_Template[:-5],
security=self.security,
).encode("utf-8")
f = open(self.outputDir + "/" + Generator.pom_Template, "wb")
f.write(output)
f.close()
# Generate some java files
Helper.logger.debug("> Generating application files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.application_Template]
)
output = template.render(package=self.__project.package).encode(
"utf-8"
)
f = open(self.__srcdir + "/" + Generator.application_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Command Initializer files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.CommandInitializer_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Conf_folder
).encode("utf-8")
f = open(
self.appDirs + "/" + Generator.CommandInitializer_Template, "wb"
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Swagger Config files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.SwaggerConfig_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Conf_folder,
ApiPrefix=Project.ApiPrefix,
project=self.__project,
).encode("utf-8")
f = open(self.appDirs + "/" + Generator.SwaggerConfig_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Logging Filter files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.LoggingFilter_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Conf_folder
).encode("utf-8")
f = open(self.appDirs + "/" + Generator.LoggingFilter_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Web Initializer files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.webInitializer_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Conf_folder,
Apppackage=self.__project.package
+ "."
+ Generator.application_Template[:-5],
).encode("utf-8")
f = open(self.appDirs + "/" + Generator.webInitializer_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Web Exceptions files ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.ExceptionBad_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Exceptions_folder
).encode("utf-8")
f = open(exceptionDirs + "/" + Generator.ExceptionBad_Template, "wb")
f.write(output)
f.close()
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.ExceptionNot_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Exceptions_folder
).encode("utf-8")
f = open(exceptionDirs + "/" + Generator.ExceptionNot_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Read ME File .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.READMEFILE_Template]
)
output = template.render(project=self.__project).encode("utf-8")
f = open(self.outputDir + "/" + Generator.READMEFILE_Template, "wb")
f.write(output)
f.close()
def __GenerateTests(self):
self.__testdir = (
self.outputDir
+ Project.Test_Dir
+ self.__project.package.replace(".", "/")
)
os.makedirs(self.__testdir)
# loop in entities
for ent in self.entities:
# Generate
Helper.logger.debug(
f"> Generating Crud tests file for Entity {ent.name} ."
)
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.CrudTest_Template]
)
if self.security:
output = template.render(
package=self.__project.package,
packageSecurity=self.__project.package
+ "."
+ Project.Security_folder,
security=self.security,
aemail=self.amail,
uemail=self.umail,
apassword=self.apassword,
upassword=self.upassword,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ "."
+ ent.name,
Servicepackage=self.__project.package
+ "."
+ Project.Services_folder
+ "."
+ ent.name
+ Project.Service_prepend,
ServiceBasepackage=self.__project.package
+ "."
+ Project.Services_folder,
EntityBasepackage=self.__project.package
+ "."
+ Project.Entities_folder,
entityName=ent.name,
packageConstants=self.__project.package
+ "."
+ Project.Conf_folder,
mapping=Project.ApiPrefix + ent.name.lower(),
entity=ent,
).encode("utf-8")
else:
output = template.render(
package=self.__project.package,
packageSecurity=self.__project.package
+ "."
+ Project.Security_folder,
security=self.security,
aemail=self.amail,
uemail=self.umail,
Entitypackage=self.__project.package
+ "."
+ Project.Entities_folder
+ "."
+ ent.name,
Servicepackage=self.__project.package
+ "."
+ Project.Services_folder
+ "."
+ ent.name
+ Project.Service_prepend,
ServiceBasepackage=self.__project.package
+ "."
+ Project.Services_folder,
EntityBasepackage=self.__project.package
+ "."
+ Project.Entities_folder,
entityName=ent.name,
packageConstants=self.__project.package
+ "."
+ Project.Conf_folder,
mapping=Project.ApiPrefix + ent.name.lower(),
entity=ent,
).encode("utf-8")
f = open(
self.__testdir + "/" + ent.name + Generator.CrudTest_Template,
"wb",
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating Helper test file ..")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.HelperTests_Template]
)
output = template.render(package=self.__project.package).encode(
"utf-8"
)
f = open(self.__testdir + "/" + Generator.HelperTests_Template, "wb")
f.write(output)
f.close()
def __GenerateSecurity(self):
self.securityDirs = self.__srcdir + "/" + Project.Security_folder
os.makedirs(self.securityDirs)
self.securityApiDirs = (
self.__srcdir
+ "/"
+ Project.Security_folder
+ "/"
+ Project.Security_api_folder
)
os.makedirs(self.securityApiDirs)
# Generate
Helper.logger.debug("> Generating Authorities file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.Authorities_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder,
roles=self.__project.securityRoles,
).encode("utf-8")
f = open(
self.securityDirs + "/" + Generator.Authorities_Template, "wb"
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating WebSecurityConfig file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.WebSecurityConfig_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder,
project=self.__project,
).encode("utf-8")
f = open(
self.securityDirs + "/" + Generator.WebSecurityConfig_Template,
"wb",
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating TokenProvider file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.TokenProvider_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder,
packageConstants=self.__project.package
+ "."
+ Project.Conf_folder,
).encode("utf-8")
f = open(
self.securityDirs + "/" + Generator.TokenProvider_Template, "wb"
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating RestConfig file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.RestConfig_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder
).encode("utf-8")
f = open(self.securityDirs + "/" + Generator.RestConfig_Template, "wb")
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating JwtAuthenticationEntryPoint file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.JwtAuthenticationEntryPoint_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder
).encode("utf-8")
f = open(
self.securityDirs
+ "/"
+ Generator.JwtAuthenticationEntryPoint_Template,
"wb",
)
f.write(output)
f.close()
# Generate
Helper.logger.debug("> Generating JwtAuthenticationFilter file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.JwtAuthenticationFilter_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Security_folder,
packageConstants=self.__project.package
+ "."
+ Project.Conf_folder,
).encode("utf-8")
f = open(
self.securityDirs
+ "/"
+ Generator.JwtAuthenticationFilter_Template,
"wb",
)
f.write(output)
f.close()
# Generate Auth controller
Helper.logger.debug("> Generating Authentication Controller .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.AuthenticationController_Template]
)
output = template.render(
package=self.__project.package + "." + Project.Controllers_folder,
EntitypackageUser=self.__project.package
+ "."
+ Project.Entities_folder
+ ".User",
Securitypackage=self.__project.package
+ "."
+ Project.Security_folder,
ServicepackageUser=self.__project.package
+ "."
+ Project.Services_folder
+ ".User"
+ Project.Service_prepend,
mapping=Project.ApiPrefix + "auth",
).encode("utf-8")
f = open(
self.controllersDirs
+ "/"
+ Generator.AuthenticationController_Template,
"wb",
)
f.write(output)
f.close()
# Generate Security Api files
Helper.logger.debug("> Generating Security Api classe Auth token .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.AuthToken_Template]
)
output = template.render(
package=self.__project.package
+ "."
+ Project.Security_folder
+ "."
+ Project.Security_api_folder,
EntitypackageUser=self.__project.package
+ "."
+ Project.Entities_folder
+ ".User",
).encode("utf-8")
f = open(
self.securityApiDirs + "/" + Generator.AuthToken_Template, "wb"
)
f.write(output)
f.close()
Helper.logger.debug("> Generating Security Api classe Login User .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.LoginUser_Template]
)
output = template.render(
package=self.__project.package
+ "."
+ Project.Security_folder
+ "."
+ Project.Security_api_folder
).encode("utf-8")
f = open(
self.securityApiDirs + "/" + Generator.LoginUser_Template, "wb"
)
f.write(output)
f.close()
# Generate DATA SQL file to be injected in database
Helper.logger.debug("> Generating data.sql file .")
template = Environment(loader=BaseLoader()).from_string(
templates[Generator.Data_Template]
)
passwd = Helper.randomString(5)
upasswd = Helper.randomString(5)
# Usinb bcryt for passwords
pwdhash = bcrypt.hashpw(passwd.encode(), self.key)
upwdhash = bcrypt.hashpw(upasswd.encode(), self.key)
self.apassword = <PASSWORD>
self.upassword = <PASSWORD>
output = template.render(
uuid=uuid.uuid1(),
uuuid=uuid.uuid1(),
password=<PASSWORD>(),
upassword=<PASSWORD>(),
passwordclear=<PASSWORD>,
upasswordclear=<PASSWORD>,
login=self.mail_prefix + "Admin",
ulogin=self.umail_prefix + "User",
mail=self.amail,
umail=self.umail,
).encode("utf-8")
f = open(
self.outputDir
+ Project.Resources_Dir
+ "/"
+ Generator.Data_Template,
"wb",
)
f.write(output)
f.close()
""" To string type method """
def __str__(self):
return str(self.__class__) + ": " + str(self.__dict__)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The Main function
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def main(argv):
# Check Parameter
parser = argparse.ArgumentParser(
prog="SBR Generator",
description="SBR generator: Generate Spring Boot Rest source code.",
)
version = pkg_resources.require("sbr")[0].version
parser.add_argument("--version", action="version", version=version)
parser.add_argument(
"-v",
"--mode-verbose",
dest="verbose",
action="store_true",
help="Enable verbose traces",
)
parser.add_argument(
"-t",
"--enable-tests",
dest="tests",
action="store_true",
help="Enable tests",
)
parser.add_argument(
"-s",
"--disable-security",
dest="security",
action="store_false",
help="Disable security",
)
parser.add_argument(
"-c",
"--config-file",
dest="configfile",
action="store",
help="The Yaml config file ",
required=True,
)
parser.add_argument(
"-o",
"--outputDir",
dest="outputDir",
action="store",
default=DefaultOutput_Dir,
help="The Output folder where to store generated source code",
)
args = parser.parse_args()
Helper.logger.info(f"[ ok ] verbose : '{args.verbose}' ")
Helper.logger.info(f"[ ok ] tests : '{args.tests}' ")
Helper.logger.info(f"[ ok ] security : '{args.security}' ")
Helper.logger.info(f"[ ok ] configfile : '{args.configfile}' ")
Helper.logger.info(f"[ ok ] outputDir : '{args.outputDir}' ")
# Load configuration
econ = ConfigLoader(args.configfile, args.verbose)
if args.verbose:
pp = pprint.PrettyPrinter(indent=4)
Helper.logger.info("The config Project is ==> ")
pp.pprint(econ.configuration["project"])
# Analyse Configuration
Helper.logger.info("Analysing Configuration .. ")
analyser = Analyser(econ.project, econ.configuration)
# Generate ...
Helper.logger.info("Generate .. ")
gen = Generator(
args.outputDir,
args.verbose,
args.tests,
args.security,
econ.project,
analyser.AllEntities,
analyser.Configuration,
)
gen.generate()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# The Default function
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def run():
"""Calls :func:`main` passing the CLI arguments extracted from :obj:`sys.argv`
This function can be used as entry point to create console scripts with setuptools.
"""
main(sys.argv[1:])
if __name__ == "__main__":
# ^ This is a guard statement that will prevent the following code from
# being executed in the case someone imports this file instead of
# executing it as a script.
# https://docs.python.org/3/library/__main__.html
# After installing your project with pip, users can also run your Python
# modules as scripts via the ``-m`` flag, as defined in PEP 338::
#
# sbr-gen -v ....
#
run()
| StarcoderdataPython |
3375982 | <reponame>DXYyang/shenNeng_gasAnalysis
def density_plot(data,k): #自定义作图函数
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
p = data.plot(kind='kde', linewidth = 2, subplots = True, sharex = False)
[p[i].set_ylabel(u'密度') for i in range(k)]
plt.legend()
return plt
| StarcoderdataPython |
3231319 | <filename>ask_api_examples/list_five_models_offset.py
"""List five models written in C, starting at item 20 from the full
list.
"""
from ask_api_examples import make_query
query = '[[Programming language::Python]]|limit=5|offset=20'
def main():
r = make_query(query, __file__)
return r
if __name__ == '__main__':
print main()
| StarcoderdataPython |
126416 | #Init
records_storage = dict()
line_break = "-----------------------------------------------\n"
run_program = True
# Define Functions
def add_item(key, value):
records_storage[key] = value
def delete_item(key):
try:
del records_storage[key]
except KeyError:
print(f"Key '{key}' could not be found")
except:
print("Something else went wrong")
def display_data():
print("Values: ", records_storage)
print(line_break)
# Run Process
while run_program:
action = str(input("What would you like to do? \n[A] - Add Data\n[B] - Delete Data\n[C] - End]\n")).lower()
if(action == 'a'):
print("\nAdd Data Selected")
key = str(input("Enter Key: "))
value = str(input("Enter Value:"))
add_item(key, value)
display_data()
elif(action == 'b'):
print("\nDelete Data Selected")
key = str(input("Enter Key: "))
delete_item(key)
display_data()
elif(action == 'c'):
print("\nTHANK YOU")
break;
else:
print(f"\nThe command '{action}' was not found. Please try again")
print(line_break) | StarcoderdataPython |
3371940 | """Files for data exploration."""
| StarcoderdataPython |
103198 |
def test_add_two_params():
expected = 5
actual = add(2, 3)
assert expected == actual
def test_add_three_params():
expected = 9
actual = add(2, 3, 4)
assert expected == actual
def add(a, b, c=None):
if c is None:
return a + b
else:
return a + b + c | StarcoderdataPython |
1763732 | ## install tensorflow to anaconda
print("Are you running anacocnda? Y/N")
print("Scanning system for anaconda...")
import os
import sys
import subprocess
import time
import shutil
import requests
import json
import re
import random
import string
import numpy as np
## Design A UX to check if the user wants to install miniforge, anaconda,transformers, gradio and huggingface
def install_miniforge():
print("Do you want to install miniforge? Y/N")
miniforge = input()
if miniforge == "Y":
print("installing miniforge")
subprocess.call(["curl", "-L","", "-o", "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh"])
subprocess.call(["bash", "Miniforge3-MacOSX-arm64.sh"])
subprocess.call(["curl", "-L","", "-o", ""], shell=True)
subprocess.call(["bash", "Miniforge3-MacOSX-arm64.sh"])
## install Miniforge3-MacOSX-arm64.sh""
## prompt the ai to install brew and miniforge using the terminal
## install miniforge
os.call = ("brew", "install", "miniforge")
print("Done.")
else:
print("You chose not to install miniforge")
print("Continuing...")
os.sys.exit()
print(install_miniforge()) | StarcoderdataPython |
1711851 | <filename>src/migrations/versions/adbff74fc1de_.py
"""empty message
Revision ID: adbff74fc1de
Revises:
Create Date: 2021-01-30 10:30:33.372825
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "adb<PASSWORD>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"saved_games",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("timestamp", sa.String(), nullable=True),
sa.Column("xPASS", sa.String(), nullable=True),
sa.Column("oPASS", sa.String(), nullable=True),
sa.Column("board", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("saved_games")
# ### end Alembic commands ###
| StarcoderdataPython |
180399 |
import dash_core_components as dcc
import dash_html_components as html
layout = html.Div([
html.H3('App 1'),
dcc.Dropdown(
id='app-1-dropdown',
options=[
{'label': 'App 1 - {}'.format(i), 'value': i} for i in [
'NYC', 'MTL', 'LA'
]
]
),
html.Div(id='app-1-display-value'),
dcc.Link('Go to App 2', href='/apps/app2')
])
| StarcoderdataPython |
108107 |
class dtype(object):
def __init__(self, name):
self.type_name = name
float = dtype('float')
double = dtype('double')
half = dtype('half')
uint8 = dtype('uint8')
int16 = dtype('int16')
int32 = dtype('int32')
int64 = dtype('int64')
| StarcoderdataPython |
135216 | #programa que vai gerar cinco números aleatórios e colocar em uma tupla.
# Depois disso, mostre a listagem de números gerados e também indique o menor e o maior valor que estão na tupla
from random import randint
n = (randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10))
print(f'Eu sortiei os valores: {n}')
print(f'Os valores de máximo e mínimo são {max(n)} e {min(n)}.')
| StarcoderdataPython |
1708590 | from logging.config import dictConfig
import psycopg2
import logging
import os
def migrate(conn):
cur = conn.cursor()
# Check to see if the google_calendar_info table exists
cur.execute("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE tablename = 'google_calendar_info');")
exists = cur.fetchone()
logging.info(" 'google_calendar_info' Table Exists: {}".format(exists))
if not exists[0]:
# If the table does not exist, create the table
logging.info(" Creating 'google_calendar_info' Table.")
# Create the google_calendar_info table
cur.execute("""
CREATE TABLE google_calendar_info(
id serial UNIQUE,
res_hall_id int,
auth_state varchar(30),
token <PASSWORD>,
calendar_id varchar(60),
PRIMARY KEY (res_hall_id),
FOREIGN KEY (res_hall_id) REFERENCES res_hall(id)
);""")
# Drop all Google related columns in the res_hall table
logging.info(" Removing Google Related Columns From 'res_hall' Table.")
cur.execute("""
ALTER TABLE res_hall
DROP COLUMN IF EXISTS calendar_id,
DROP COLUMN IF EXISTS g_cal_token,
DROP COLUMN IF EXISTS g_cal_auth_state
""")
if __name__ == "__main__":
conn = psycopg2.connect(os.environ["DATABASE_URL"])
dictConfig({
'version': 1, # logging module specific-- DO NOT CHANGE
'formatters': {'default': {
'format': '[%(asctime)s.%(msecs)d] %(levelname)s in %(module)s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default'
}},
'root': {
'level': os.environ["LOG_LEVEL"],
'handlers': ['wsgi']
}
})
logging.info("Beginning Migration")
migrate(conn)
logging.info("Committing Changes")
conn.commit()
logging.info(" Finished Committing Changes")
logging.info("Finished Migration")
| StarcoderdataPython |
3221776 | import json
import os
# Nombre del archivo de configuracion
# si se cambia cambiarlo aqui tambien
confFile = "./components.json"
# Pide los datos necesarios para crear un nuevo componente
# y crea los archivos necesarios y lo agrega al archivo de configuracion
print("Add new component\n");
print("*"*30)
comp = {
"name" : None,
"tag" : None
}
comp["name"] = input("\nName: ")
comp["tag"] = input("Tag for component (enter to get from name): ")
if comp["tag"] == None or comp["tag"] == "":
comp["tag"] = comp["name"]
f = open(confFile)
data = json.load(f)
data["components"].append(comp)
print("New component:")
print(comp)
f.close()
os.mkdir(".{}{}".format(data["config"]["baseUrl"], comp["name"]))
print("="*30)
print("Creando archivos...")
path = ".{}{}/{}".format(data["config"]["baseUrl"], comp["name"], comp["name"])
exts = ["js", "css", "html"]
for ex in exts:
tmp = open("{}.{}".format(path,ex),"w")
tmp.write("// Archivo %s" % ex)
tmp.close()
f = open(confFile, "w")
f.write(json.dumps(data, indent=4, sort_keys=True))
f.close()
print("--| Done!") | StarcoderdataPython |
115467 | from validaciones import *
class Punto():
# Representar un punto en un plano
def __init__(self, x=0, y=0):
if es_numero(x) and es_numero(y):
self.x = x
self.y = y
else:
raise TypeError("X e Y Deben ser valores numericos ")
def distancia(self, otro):
r = self.restar(otro)
return (dx*dx + dy*dy)**0.5
def restar(self, otro):
return Punto(self.x - otro.x, self.y - otro.y)
def norma(self):
return(self.x*self.x + self.y*self.y)**0.5
def __str__(self):
return ("(", str(self.x), ", ", str(self.y), ")")
def __add__(self, otro):
return Punto(self.x + otro.x, self.y + otro.y)
def __sub__(self, otro):
return Punto(self.x - otro.x, self.y -otro.y) | StarcoderdataPython |
1686115 | <reponame>statgen/locuszoom-hosted
from django import template
import furl
register = template.Library()
@register.filter(name='add_token')
def add_token(value, token):
"""Generate an absolute URL that includes a private link token as a query param"""
if not token:
return value
return furl.furl(value).add({'token': token}).url
| StarcoderdataPython |
1627237 | # Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import os
import importlib.util
import py_compile
import tempfile
import hy.importer
def test_pyc():
"""Test pyc compilation."""
with tempfile.NamedTemporaryFile(suffix='.hy') as f:
f.write(b'(defn pyctest [s] (+ "X" s "Y"))')
f.flush()
cfile = py_compile.compile(f.name)
assert os.path.exists(cfile)
try:
mod = hy.importer._import_from_path('pyc', cfile)
finally:
os.remove(cfile)
assert mod.pyctest('Foo') == 'XFooY'
| StarcoderdataPython |
1670261 | <reponame>Art-Ev/aequilibrae
from .reference_files import *
| StarcoderdataPython |
3227967 | # Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test module module"""
from unittest import mock
from openstackclient.common import module as osc_module
from openstackclient.tests.unit import fakes
from openstackclient.tests.unit import utils
# NOTE(dtroyer): module_1 must match the version list filter (not --all)
# currently == '*client*'
module_name_1 = 'fakeclient'
module_version_1 = '0.1.2'
module_name_2 = 'zlib'
module_version_2 = '1.1'
# module_3 match openstacksdk
module_name_3 = 'openstack'
module_version_3 = '0.9.13'
# module_4 match sub module of fakeclient
module_name_4 = 'fakeclient.submodule'
module_version_4 = '0.2.2'
# module_5 match private module
module_name_5 = '_private_module.lib'
module_version_5 = '0.0.1'
MODULES = {
module_name_1: fakes.FakeModule(module_name_1, module_version_1),
module_name_2: fakes.FakeModule(module_name_2, module_version_2),
module_name_3: fakes.FakeModule(module_name_3, module_version_3),
module_name_4: fakes.FakeModule(module_name_4, module_version_4),
module_name_5: fakes.FakeModule(module_name_5, module_version_5),
}
class TestCommandList(utils.TestCommand):
def setUp(self):
super(TestCommandList, self).setUp()
self.app.command_manager = mock.Mock()
self.app.command_manager.get_command_groups.return_value = [
'openstack.common'
]
self.app.command_manager.get_command_names.return_value = [
'limits show\nextension list'
]
# Get the command object to test
self.cmd = osc_module.ListCommand(self.app, None)
def test_command_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# TODO(bapalm): Adjust this when cliff properly supports
# handling the detection rather than using the hard-code below.
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
def test_command_list_with_group_not_found(self):
arglist = [
'--group', 'not_exist',
]
verifylist = [
('group', 'not_exist'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
self.assertEqual([], data)
def test_command_list_with_group(self):
arglist = [
'--group', 'common',
]
verifylist = [
('group', 'common'),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
collist = ('Command Group', 'Commands')
self.assertEqual(collist, columns)
datalist = ((
'openstack.common',
'limits show\nextension list'
),)
self.assertEqual(datalist, tuple(data))
@mock.patch.dict(
'openstackclient.common.module.sys.modules',
values=MODULES,
clear=True,
)
class TestModuleList(utils.TestCommand):
def setUp(self):
super(TestModuleList, self).setUp()
# Get the command object to test
self.cmd = osc_module.ListModule(self.app, None)
def test_module_list_no_options(self):
arglist = []
verifylist = [
('all', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient and openstacksdk, but not regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertNotIn(module_name_2, columns)
self.assertNotIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data)
def test_module_list_all(self):
arglist = [
'--all',
]
verifylist = [
('all', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class Lister in cliff, abstract method take_action()
# returns a tuple containing the column names and an iterable
# containing the data to be listed.
columns, data = self.cmd.take_action(parsed_args)
# Output xxxclient, openstacksdk and regular module, like: zlib
self.assertIn(module_name_1, columns)
self.assertIn(module_version_1, data)
self.assertIn(module_name_2, columns)
self.assertIn(module_version_2, data)
self.assertIn(module_name_3, columns)
self.assertIn(module_version_3, data)
# Filter sub and private modules
self.assertNotIn(module_name_4, columns)
self.assertNotIn(module_version_4, data)
self.assertNotIn(module_name_5, columns)
self.assertNotIn(module_version_5, data)
| StarcoderdataPython |
124337 | # -*- coding: utf-8 -*-
import unittest
def binary_search(sorted_list: list, first: int, last: int, target: int) -> int:
mid = int((first + last) / 2)
if first > last:
return 0
elif target == sorted_list[mid]:
return 1
else:
if target > sorted_list[mid]:
return binary_search(sorted_list, mid + 1, last, target)
else:
return binary_search(sorted_list, first, mid - 1, target)
def find_number(n: list, m: list) -> list:
result = []
n.sort()
for number in m:
result.append(binary_search(n, 0, len(n) - 1, number))
return result
class TestFindNumber(unittest.TestCase):
def test_find_number(self):
for n, m, expected in [
[
[4, 1, 5, 2, 3],
[1, 3, 7, 9, 3],
[1, 1, 0, 0, 1]
],
[
[1, 4, 99, 3, 2, 6, 7],
[5, 4, 3, 2, 1, 6, 7, 10, 20, 60, 70],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
]
]:
self.assertEqual(expected, find_number(n, m))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3259804 | <reponame>Valalala/mc2unity<filename>learning/testBlender.py
# testBlender.py
# <NAME>
# Testing code for a project that converts Minecraft worlds to Unity.
import bpy
import bmesh
import time
###### Alpha ######
# Only useful when viewing the file in blender.
def fixAlpha():
# Note: mineways has a blender script, details texture and material import.
for name in [s.lower() for s in transparentBlocks]:
if name not in bpy.data.materials.keys():
continue
# print(name)
bpy.data.materials[name].blend_method = 'BLEND'
bpy.data.materials[name].shadow_method = 'HASHED'
socket_in = bpy.data.materials[name].node_tree.nodes['Principled BSDF'].inputs['Alpha']
socket_out = bpy.data.materials[name].node_tree.nodes['Image Texture'].outputs['Alpha']
bpy.data.materials[name].node_tree.links.new(socket_in, socket_out)
# mat = bpy.data.materials[name]
# mat.blend_method = 'BLEND'
# mat.shadow_method = 'HASHED'
# socket_in = mat.node_tree.nodes['Principled BSDF'].inputs['Alpha']
# socket_out = mat.node_tree.nodes['Image Texture'].outputs['Alpha']
# mat.node_tree.links.new(socket_in, socket_out)
###### UV edit ######
def fixUVs():
# Get the active mesh
me = bpy.context.object.data
# Cube projection
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.dissolve_limited()
bpy.ops.uv.cube_project(cube_size=2, correct_aspect=False)
bpy.ops.object.mode_set(mode = 'OBJECT')
# Get a BMesh representation
bm = bmesh.new() # create an empty BMesh
bm.from_mesh(me) # fill it in from a Mesh
uv_layer = bm.loops.layers.uv.verify()
bm.faces.ensure_lookup_table()
uv_offset = bm.faces[0].loops[0][uv_layer].uv.xy
for f in bm.faces:
# Offset must be per face because cube projection can give non whole number values
uv_offset.x = f.loops[0][uv_layer].uv.x - round(f.loops[0][uv_layer].uv.x)
uv_offset.y = f.loops[0][uv_layer].uv.y - round(f.loops[0][uv_layer].uv.y)
for loop in f.loops:
loop_uv = loop[uv_layer]
# align to zero
loop_uv.uv = loop_uv.uv + uv_offset
if(f.normal.x == 1 or f.normal.x == -1):
# 90 deg turn then mirror
loop_uv.uv = (loop_uv.uv.y, loop_uv.uv.x)
if(f.normal.y == 1 or f.normal.y == -1):
# virtical flip
loop_uv.uv = (-loop_uv.uv.x, loop_uv.uv.y)
# Finish up, write the bmesh back to the mesh
bm.to_mesh(me)
bm.free() # free and prevent further access
###### Setup ######
# disolveMaterials = {'oak_leaves', 'birch_leaves', 'water_still'}
# transparentMaterials = {'water_still'}
# cutoutMaterials = {'grass', 'oak_leaves', 'birch_leaves', 'dandelion', 'poppy', 'sugar_cane'}
# Use Mineway's [block test world] to check/complete the list
#List of transparent blocks
transparentBlocks = ["Acacia_Leaves","Dark_Oak_Leaves","Acacia_Door","Activator_Rail","Bed","Beetroot_Seeds","Birch_Door","Brewing_Stand","Cactus","Carrot","Carrots","Cauldron","Chorus_Flower","Chorus_Flower_Dead","Chorus_Plant","Cobweb",
"Cocoa","Crops","Dandelion","Dark_Oak_Door","Dead_Bush","Detector_Rail","Enchantment_Table","Glass","Glass_Pane","Grass","Iron_Bars","Iron_Door","Iron_Trapdoor","Jack_o'Lantern","Jungle_Door","Large_Flowers",
"Leaves","Melon_Stem","Monster_Spawner","Nether_Portal","Nether_Wart","Oak_Leaves","Oak_Sapling","Poppy","Potato","Potatoes","Powered_Rail","Pumpkin_Stem","Rail","Red_Mushroom",
"Redstone_Comparator_(inactive)","Redstone_Torch_(inactive)","Repeater_(inactive)","Sapling","Spruce_Door","Stained_Glass_Pane","Sugar_Cane","Sunflower","Tall_Grass","Trapdoor","Vines","Wheat","Wooden_Door"]
#List of light emitting blocks
lightBlocks = ["Daylight_Sensor","End_Gateway","End_Portal","Ender_Chest","Flowing_Lava","Glowstone","Inverted_Daylight_Sensor","Lava","Magma_Block","Redstone_Lamp_(active)","Stationary_Lava","Sea_Lantern"]
#List of light emitting and transparent block
lightTransparentBlocks = ["Beacon","Brown_Mushroom","Dragon_Egg","Endframe","End_Rod","Fire","Powered_Rail_(active)","Redstone_Comparator_(active)","Redstone_Torch_(active)","Repeater_(active)","Torch"]
outputFolder = r"D:\Technic\Repositories\mc2unity\results\\"
view_layer = bpy.context.view_layer
###### Import ######
# bpy.ops.import_scene.obj('D:\Technic\Repositories\mc2unity\models\Suzanne.obj')
bpy.ops.import_scene.obj(filepath=r"D:\Technic\Repositories\mc2unity\models\skylands.obj", filter_glob="*.obj;*.mtl", use_edges=True, use_smooth_groups=True, use_split_objects=True, use_split_groups=False, use_groups_as_vgroups=False, use_image_search=True, split_mode='ON', global_clight_size=0.0, axis_forward='-Z', axis_up='Y')
lod_0 = bpy.data.objects[0]
# lod_0 = bpy.data.objects['Suzanne']
# lod_0.name = 'Suzanne_LOD_0'
lod_0.select_set(True)
view_layer.objects.active = lod_0
###### MAIN ######
# fixAlpha()
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.remove_doubles()
bpy.ops.mesh.separate(type='MATERIAL')
bpy.ops.object.mode_set(mode = 'OBJECT')
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
obj.select_set(True)
view_layer.objects.active = obj
# Should only occur for full blocks
fixUVs()
obj.select_set(False)
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.join()
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.remove_doubles()
bpy.ops.object.mode_set(mode = 'OBJECT')
bpy.ops.export_scene.fbx(filepath=outputFolder + "test" + ".fbx", use_selection=True)
# bpy.ops.wm.save_mainfile()
# for i in range(1,4):
# bpy.ops.object.duplicate()
# bpy.context.object.name = 'Suzanne_LOD_{0}'.format(i)
# # print(bpy.context.object)
# # print(lod_0.name)
# bpy.ops.object.modifier_add(type='DECIMATE')
# bpy.context.object.modifiers['Decimate'].ratio = 0.5
# bpy.ops.object.modifier_apply(apply_as='DATA', modifier='Decimate')
# bpy.ops.object.select_all(action='DESELECT')
# for obj in bpy.data.objects:
# obj.select_set(True)
# # some exporters only use the active object
# view_layer.objects.active = obj
# bpy.ops.export_scene.fbx(filepath=outputFolder + obj.name + ".fbx", use_selection=True)
# print("Exporting: " + obj.name)
# obj.select_set(False)
# mcWorld = input('Enter Minecraft world path: ')
# terrainTex = input('Enter Mineways terrain file path: ')
# print(mcWorld)
# print(terrainTex)
# mesh = bpy.data.meshes.new(name="MyMesh")
# print(bpy.data.objects[1].name)
# print(bpy.data.objects[1].modifier_add(type='DECIMATE'))
# print(mesh.name)
time.sleep(2)
# mineways script template:
# Minecraft world: C:\Users\erich\AppData\Roaming\.minecraft\saves\Round World
# Terrain file name: C:\Users\erich\Desktop\Mineways\terrainExt_Sphax.png
# Selection location: 60, 0, 60 to 80, 255, 100
# Set render type: Wavefront OBJ absolute indices
# Export for Rendering: C:\Users\erich\Desktop\tile00.obj
# os.system('D:\Technic\mineways\mineways.exe -m D:\Technic\Repositories\mc2unity\learning\file.mwscript')
# subprocess.run(["D:\Technic\mineways\mineways.exe", "-m", "D:\Technic\Repositories\mc2unity\learning\file.mwscript"])
# remember blender script templates.
# http://www.realtimerendering.com/erich/minecraft/public/mineways/scripting.html
# https://docs.blender.org/api/current/bpy.ops.import_scene.html?highlight=import#module-bpy.ops.import_scene
# https://docs.blender.org/api/current/bpy.ops.export_scene.html?highlight=export#module-bpy.ops.export_scene
# https://docs.blender.org/api/2.82/bmesh.html
# https://medium.com/@behreajj/creative-coding-in-blender-a-primer-53e79ff71e
| StarcoderdataPython |
183456 | """Export a checkpoint as an ONNX model.
Applies onnx utilities to improve the exported model and
also tries to simplify the model with onnx-simplifier.
https://github.com/onnx/onnx/blob/master/docs/PythonAPIOverview.md
https://github.com/daquexian/onnx-simplifier
"""
import argparse
import logging
import shutil
import torch
import openpifpaf
try:
import onnx
except ImportError:
onnx = None
try:
import onnxsim
except ImportError:
onnxsim = None
LOG = logging.getLogger(__name__)
def image_size_warning(basenet_stride, input_w, input_h):
if input_w % basenet_stride != 1:
LOG.warning(
'input width (%d) should be a multiple of basenet '
'stride (%d) + 1: closest are %d and %d',
input_w, basenet_stride,
(input_w - 1) // basenet_stride * basenet_stride + 1,
((input_w - 1) // basenet_stride + 1) * basenet_stride + 1,
)
if input_h % basenet_stride != 1:
LOG.warning(
'input height (%d) should be a multiple of basenet '
'stride (%d) + 1: closest are %d and %d',
input_h, basenet_stride,
(input_h - 1) // basenet_stride * basenet_stride + 1,
((input_h - 1) // basenet_stride + 1) * basenet_stride + 1,
)
def apply(model, outfile, verbose=True, input_w=129, input_h=97):
image_size_warning(model.base_net.stride, input_w, input_h)
# configure
openpifpaf.network.heads.CompositeField3.inplace_ops = False
dummy_input = torch.randn(1, 3, input_h, input_w)
torch.onnx.export(
model, dummy_input, outfile, verbose=verbose,
input_names=['input_batch'], output_names=['cif', 'caf'],
# keep_initializers_as_inputs=True,
opset_version=11,
do_constant_folding=True,
dynamic_axes={
"input": {0: 'dynamic'},
"cif": {0: 'dynamic'},
"caf": {0: 'dynamic'}}
)
def check(modelfile):
model = onnx.load(modelfile)
onnx.checker.check_model(model)
def simplify(infile, outfile=None):
if outfile is None:
assert infile.endswith('.onnx')
outfile = infile
infile = infile.replace('.onnx', '.unsimplified.onnx')
shutil.copyfile(outfile, infile)
simplified_model, check_ok = onnxsim.simplify(infile, check_n=3, perform_optimization=False)
assert check_ok
onnx.save(simplified_model, outfile)
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def main():
parser = argparse.ArgumentParser(
prog='python3 -m openpifpaf.export_onnx',
description=__doc__,
formatter_class=CustomFormatter,
)
parser.add_argument('--version', action='version',
version='OpenPifPaf {version}'.format(version=openpifpaf.__version__))
openpifpaf.network.Factory.cli(parser)
parser.add_argument('--outfile', default='openpifpaf-resnet50.onnx')
parser.add_argument('--simplify', dest='simplify', default=False, action='store_true')
parser.add_argument('--check', dest='check', default=False, action='store_true')
parser.add_argument('--input-width', type=int, default=129)
parser.add_argument('--input-height', type=int, default=97)
args = parser.parse_args()
openpifpaf.network.Factory.configure(args)
model, _ = openpifpaf.network.Factory().factory()
apply(model, args.outfile, input_w=args.input_width, input_h=args.input_height)
if args.simplify:
simplify(args.outfile)
if args.check:
check(args.outfile)
if __name__ == '__main__':
main()
| StarcoderdataPython |
153736 | <gh_stars>0
import argparse
from collections import defaultdict
from operator import itemgetter
import sys
import tqdm
import warnings
import faiss
import numpy as np
from numpy.linalg import norm
# TODO refactor some of the functionality into a library, so that it can
# also be called from Python code.
def ngrams(string, n):
return (string[i:i+n] for i in range(len(string)-n+1))
def determine_top_ngrams(verses, n, dim):
ngram_freq = defaultdict(lambda: 0)
for text in map(itemgetter(1), verses):
for ngr in ngrams(text, n):
ngram_freq[ngr] += 1
ngram_ids = {
ngr : i \
for i, (ngr, freq) in enumerate(sorted(
ngram_freq.items(), key=itemgetter(1), reverse=True)[:dim]) }
return ngram_ids
def vectorize(verses, ngram_ids, n=2, dim=200, min_ngrams=10):
# FIXME memory is being wasted here by storing v_ids and verses again
# TODO make the progress printer optional
v_ids, v_texts, rows = [], [], []
for (v_id, text) in tqdm.tqdm(verses):
v_ngr_ids = [ngram_ids[ngr] for ngr in ngrams(text, n) \
if ngr in ngram_ids]
if len(v_ngr_ids) >= min_ngrams:
row = np.zeros(dim, dtype=np.float32)
for ngr_id in v_ngr_ids:
row[ngr_id] += 1
rows.append(row)
v_ids.append(v_id)
v_texts.append(text)
m = np.vstack(rows)
m = np.divide(m, norm(m, axis=1).reshape((m.shape[0], 1)))
return v_ids, v_texts, m
def find_similarities(index, m, k, threshold, query_size, print_progress):
if print_progress:
progressbar = tqdm.tqdm(total=m.shape[0])
for i in range(0, m.shape[0], query_size):
query = range(i, min(m.shape[0], i+query_size))
D, I = index.search(m[query,], k)
for i, q in enumerate(query):
for j in range(k):
if q != I[i,j] and D[i,j] >= threshold:
yield (q, I[i,j], D[i,j])
if print_progress:
progressbar.update(D.shape[0])
def read_verses(fp):
result = []
for line in fp:
spl = line.rstrip().split('\t')
if len(spl) < 2: continue
v_id, text = spl[0], spl[1]
result.append((v_id, text))
return result
def parse_arguments():
parser = argparse.ArgumentParser(
description='Compute the n-gram similarities on short strings.')
parser.add_argument(
'-d', '--dim', type=int, default=200,
help='The number of dimensions of n-gram vectors')
parser.add_argument('-g', '--use-gpu', action='store_true')
parser.add_argument(
'-k', type=int, default=10,
help='The number of nearest neighbors to find for each verse.')
parser.add_argument(
'-i', '--index-file', metavar='FILE',
help='Read the verses to index from a separate file.')
parser.add_argument(
'-m', '--min-ngrams', type=int, default=10,
help='Minimum number of known n-grams to consider a verse.')
parser.add_argument(
'-n', type=int, default=2,
help='The size (`n`) of the n-grams (default: 2, i.e. ngrams).')
parser.add_argument(
'-q', '--query-size', type=int, default=100,
help='The number of verses to pass in a single query '
'(doesn\'t affect the results, only performance)')
parser.add_argument(
'-t', '--threshold', type=float, default=0.7,
help='Minimum similarity to output.')
parser.add_argument(
'-T', '--text', action='store_true',
help='Print the strings additionally to IDs.')
parser.add_argument(
'-p', '--print-progress', action='store_true',
help='Print a progress bar.')
return parser.parse_args()
def main():
args = parse_arguments()
res = None
if args.use_gpu:
try:
res = faiss.StandardGpuResources()
except Exception:
warnings.warn('GPU not available!')
query_verses = read_verses(sys.stdin)
index_verses = []
if args.index_file is not None:
with open(args.index_file) as fp:
index_verses = read_verses(fp)
sys.stderr.write('Counting n-gram frequencies\n')
ngram_ids = determine_top_ngrams(index_verses+query_verses, args.n, args.dim)
sys.stderr.write(' '.join(ngram_ids.keys()) + '\n')
sys.stderr.write('Creating a dense matrix\n')
query_v_ids, query_v_texts, query_m = \
vectorize(query_verses, ngram_ids,
n=args.n, dim=args.dim, min_ngrams=args.min_ngrams)
index_v_ids, index_v_texts, index_m = query_v_ids, query_v_texts, query_m
if index_verses:
index_v_ids, index_v_texts, index_m = \
vectorize(index_verses, ngram_ids,
n=args.n, dim=args.dim, min_ngrams=args.min_ngrams)
sys.stderr.write('Creating a FAISS index\n')
index = faiss.IndexFlatIP(args.dim)
if res is not None:
index = faiss.index_cpu_to_gpu(res, 0, index)
index.add(index_m)
sys.stderr.write('Searching for nearest neighbors\n')
progressbar = None
sims = find_similarities(index, query_m, args.k, args.threshold,
args.query_size, args.print_progress)
for i, j, sim in sims:
v1_id = query_v_ids[i]
v2_id = index_v_ids[j]
if args.text:
v1_text = query_v_texts[i]
v2_text = index_v_texts[j]
print(v1_id, v1_text, v2_id, v2_text, sim, sep='\t')
else:
print(v1_id, v2_id, sim, sep='\t')
| StarcoderdataPython |
133218 | from unittest import TestCase
from mock import Mock, patch
from samcli.commands.local.lib.sam_base_provider import SamBaseProvider
from samcli.lib.intrinsic_resolver.intrinsic_property_resolver import IntrinsicResolver
class TestSamBaseProvider_get_template(TestCase):
@patch("samcli.commands.local.lib.sam_base_provider.ResourceMetadataNormalizer")
@patch("samcli.commands.local.lib.sam_base_provider.SamTranslatorWrapper")
@patch.object(IntrinsicResolver, "resolve_template")
def test_must_run_translator_plugins(
self, resolve_template_mock, SamTranslatorWrapperMock, resource_metadata_normalizer_patch
):
resource_metadata_normalizer_patch.normalize.return_value = True
resolve_template_mock.return_value = {}
translator_instance = SamTranslatorWrapperMock.return_value = Mock()
template = {"Key": "Value"}
overrides = {"some": "value"}
SamBaseProvider.get_template(template, overrides)
SamTranslatorWrapperMock.assert_called_once_with(template)
translator_instance.run_plugins.assert_called_once()
| StarcoderdataPython |
180495 | <gh_stars>100-1000
from __future__ import unicode_literals, print_function
import ideone
import time
import praw
import re
import urllib
import traceback
import config
from socket import error as SocketError
from sys import exit
from functools import wraps
def handle_api_exceptions(max_attempts=1):
"""Return a function decorator that wraps a given function in a
try-except block that will handle various exceptions that may
occur during an API request to reddit. A maximum number of retry
attempts may be specified.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
retries = 0
while retries < max_attempts:
sleep_time = None
error_msg = ""
try:
return func(*args, **kwargs)
# Handle and log miscellaneous API exceptions
except praw.exceptions.PRAWException as e:
error_msg = "PRAW Exception \"{error}\" occurred: ".format(
error=e)
except praw.exceptions.ClientException as e:
error_msg = "Client Exception \"{error}\" occurred: ".format(
error=e)
except praw.exceptions.APIException as e:
error_msg = "API Exception \"{error}\" occurred: ".format(
error=e)
except SocketError as e:
error_msg = "SocketError \"{error}\" occurred: ".format(
error=e)
log(error_msg)
sleep_time = sleep_time or retries * 150
log("{0} in {f}. Sleeping for {t} seconds. "
"Attempt {rt} of {at}.".format(error_msg, f=func.__name__,
t=sleep_time, rt=retries + 1, at=max_attempts))
time.sleep(sleep_time)
retries += 1
return wrapper
return decorator
class Reply(object):
"""An object that represents a potential response to a comment.
Replies are not tied to a specific recipient on at their inception,
however once sent the recipient should be recorded.
"""
def __init__(self, text):
# Truncate text if it exceeds max character limit.
if len(text) >= 10000:
text = text[:9995] + '\n...'
self.text = text
self.recipient = None
def send(self, *args, **kwargs):
"""An abstract method that sends the reply."""
raise NotImplementedError
class CompiledReply(Reply):
"""Replies that contain details about evaluated code. These can be
sent as replies to comments.
"""
def __init__(self, text, compile_details):
Reply.__init__(self, text)
self.compile_details = compile_details
self.parent_comment = None
@handle_api_exceptions(max_attempts=3)
def send(self, comment, reddit_session):
"""Send a reply to a specific reddit comment or message."""
self.parent_comment = comment
self.recipient = comment.author
comment.reply(self.text)
log("Replied to {id}".format(id=comment.id))
@handle_api_exceptions(max_attempts=3)
def make_edit(self, comment, parent):
"""Edit one of the bot's existing comments."""
self.parent_comment = parent
self.recipient = parent.author
comment.edit(self.text)
log("Edited comment {}".format(comment.id))
def detect_spam(self):
"""Scan a reply and return a list of potentially spammy attributes
found in the comment's output.
"""
output = self.compile_details['output']
source = self.compile_details['source']
errors = self.compile_details['stderr']
spam_behaviors = {
"Excessive line breaks": output.count('\n') > config.LINE_LIMIT,
"Excessive character count": len(output) > config.CHAR_LIMIT,
"Spam phrase detected": any([word.encode('utf-8').lower() in
(source + output).encode('utf-8').lower()
for word in config.SPAM_PHRASES]),
"Illegal system call detected": "Permission denied" in errors
}
if any(spam_behaviors.values()):
spam_triggers = [k for k, v in spam_behaviors.iteritems() if v]
return spam_triggers
return []
class MessageReply(Reply):
"""Replies that contain information that may be sent to a reddit user
via private message.
"""
def __init__(self, text, subject=''):
Reply.__init__(self, text)
self.subject = subject
@handle_api_exceptions(max_attempts=3)
def send(self, comment, reddit):
"""Reply the author of a reddit comment by sending them a reply
via private message.
"""
self.recipient = comment.author
# If no custom subject line is given, the default will be a label
# that identifies the comment.
if not self.subject:
self.subject = "Comment {id}".format(id=comment.id)
# Prepend message subject with username
self.subject = "{} - {}".format(config.R_USERNAME, self.subject)
reddit.redditor(self.recipient.name).message(self.subject, self.text)
log("Message reply for comment {id} sent to {to}".format(
id=comment.id, to=self.recipient))
@handle_api_exceptions(max_attempts=3)
def log(message, alert=False):
"""Log messages along with a timestamp in a log file. If the alert
option is set to true, send a message to the admin's reddit inbox.
"""
t = time.strftime('%y-%m-%d %H:%M:%S', time.localtime())
message = "{}: {}\n".format(t, message)
message = message.encode('utf8', 'replace')
if config.LOG_FILE:
with open(config.LOG_FILE, 'a') as f:
f.write(message)
else:
print(message, end='')
if alert and config.ADMIN:
r = praw.Reddit(config.USER_AGENT)
r.login(config.R_USERNAME, config.R_PASSWORD)
admin_alert = message
subject = "CompileBot Alert"
r.redditor(config.ADMIN).message(subject, admin_alert)
@handle_api_exceptions(max_attempts=3)
def compile(source, lang, stdin=''):
"""Compile and evaluate source code using the ideone API and return
a dict containing the output details.
Keyword arguments:
source -- a string containing source code to be compiled and evaluated
lang -- the programming language pertaining to the source code
stdin -- optional "standard input" for the program
>>> d = compile('print("Hello World")', 'python')
>>> d['output']
Hello World
"""
lang = config.LANG_ALIASES.get(lang.lower(), lang)
# Login to ideone and create a submission
i = ideone.Ideone(config.I_USERNAME, config.I_PASSWORD)
sub = i.create_submission(source, language_name=lang, std_input=stdin)
sub_link = sub['link']
details = i.submission_details(sub_link)
# The status of the submission indicates whether or not the source has
# finished executing. A status of 0 indicates the submission is finished.
while details['status'] != 0:
details = i.submission_details(sub_link)
time.sleep(3)
details['link'] = sub_link
return details
def code_block(text):
"""Create a markdown formatted code block containing the given text"""
text = '\n' + text
for char in ['\n', '\r']:
text = text.replace(char, '\n ')
return text
@handle_api_exceptions()
def get_banned(reddit):
"""Retrieve list of banned users list from the moderator subreddit"""
banned = {user.name.lower() for user in
reddit.subreddit(config.SUBREDDIT).banned()}
return banned
@handle_api_exceptions()
def send_modmail(subject, body, reddit):
"""Send a message to the bot moderators"""
if config.SUBREDDIT:
sub = reddit.subreddit(config.SUBREDDIT)
reddit.subreddit(sub.display_name).message(subject, body)
else:
log("Mod message not sent. No subreddit found in settings.")
def format_reply(details, opts):
"""Returns a reply that contains the output from a ideone submission's
details along with optional additional information.
"""
head, body, extra, = '', '', ''
# Combine information that will go before the output.
if '--source' in opts:
head += 'Source:\n{}\n\n'.format(code_block(details['source']))
if '--input' in opts:
# Combine program output and runtime error output.
head += 'Input:\n{}\n\n'.format(code_block(details['input']))
output = details['output'] + details['stderr']
# Truncate the output if it contains an excessive
# amount of line breaks or if it is too long.
if output.count('\n') > config.LINE_LIMIT:
lines = output.split('\n')
# If message contains an excessive amount of duplicate lines,
# truncate to a small amount of lines to discourage spamming
if len(set(lines)) < 5:
lines_allowed = 2
else:
lines_allowed = 51
output = '\n'.join(lines[:lines_allowed])
output += "\n..."
# Truncate the output if it is too long.
if len(output) > 8000:
output = output[:8000] + '\n ...\n'
body += 'Output:\n{}\n\n'.format(code_block(output))
if details['cmpinfo']:
body += 'Compiler Info:\n{}\n\n'.format(code_block(details['cmpinfo']))
# Combine extra runtime information.
if '--date' in opts:
extra += "Date: {}\n\n".format(details['date'])
if '--memory' in opts:
extra += "Memory Usage: {} bytes\n\n".format(details['memory'])
if '--time' in opts:
extra += "Execution Time: {} seconds\n\n".format(details['time'])
if '--version' in opts:
extra += "Version: {}\n\n".format(details['langVersion'])
# To ensure the reply is less than 10000 characters long, shorten
# sections of the reply until they are of adequate length. Certain
# sections with less priority will be shortened before others.
total_len = 0
for section in (config.FOOTER, body, head, extra):
if len(section) + total_len > 9800:
section = section[:9800 - total_len] + '\n...\n'
total_len += len(section)
reply_text = head + body + extra
return reply_text
def parse_comment(body):
"""Parse a string that contains a username mention and code block
and return the supplied arguments, source code and input.
c_pattern is a regular expression that searches for the following:
1. "+/u/" + the reddit username that is using the program
(case insensitive).
2. A string representing the programming language and arguments
+ a "\n".
3. A markdown code block (one or more lines indented by 4 spaces or
a tab) that represents the source code + a "\n".
4. (Optional) "Input:" OR "Stdin:" + "\n".
5. (Optional) A markdown code block that represents the
program's input.
"""
c_pattern = (
r'\+/u/(?i)%s\s*(?P<args>.*)\n\s*'
r'((?<=\n( {4}))|(?<=\n\t))'
r'(?P<src>.*(\n((( {4}|\t).*\n)|\n)*(( {4}|\t).*))?)'
r'(\n\s*((?i)Input|Stdin):?\s*\n\s*'
r'((?<=\n( {4}))|(?<=\n\t))'
r'(?P<in>.*(\n((( {4}|\t).*\n)|\n)*(( {4}|\t).*\n?))?))?'
) % config.R_USERNAME
m = re.search(c_pattern, body)
args, src, stdin = m.group('args'), m.group('src'), m.group('in') or ''
# Remove the leading four spaces from every line.
src = src.replace('\n ', '\n')
stdin = stdin.replace('\n ', '\n')
return args, src, stdin
def create_reply(comment):
"""Search comments for username mentions followed by code blocks
and return a formatted reply containing the output of the executed
block or a message with additional information.
"""
try:
args, src, stdin = parse_comment(comment.body)
except AttributeError:
preamble = config.ERROR_PREAMBLE.format(link=comment_link(comment))
postamble = config.ERROR_POSTAMBLE.format(link=comment_link(comment))
error_text = preamble + config.FORMAT_ERROR_TEXT + postamble
log("Formatting error on comment {}".format(comment_link(comment)))
return MessageReply(error_text)
# Separate the language name from the rest of the supplied options.
try:
lang, opts = args.split(' -', 1)
opts = ('-' + opts).split()
except ValueError:
# No additional opts found
lang, opts = args, []
lang = lang.strip()
try:
details = compile(src, lang, stdin=stdin)
log("Compiled ideone submission {link} for comment {id}".format(
link=details['link'], id=comment.id))
except ideone.LanguageNotFoundError as e:
preamble = config.ERROR_PREAMBLE.format(link=comment_link(comment))
postamble = config.ERROR_POSTAMBLE.format(link=comment_link(comment))
choices = ', '.join(e.similar_languages)
error_text = config.LANG_ERROR_TEXT.format(lang=lang, choices=choices)
error_text = preamble + error_text + postamble
# TODO Add link to accepted languages to msg
log("Language error on comment {id}".format(id=comment.id))
return MessageReply(error_text)
# The ideone submission result value indicates the final state of
# the program. If the program compiled and ran successfully the
# result is 15. Other codes indicate various errors.
result_code = details['result']
# The user is alerted of any errors via message reply unless they
# include an option to include errors in the reply.
if result_code == 15 or ('--include-errors' in opts and result_code in [11, 12]):
text = format_reply(details, opts)
ideone_link = "http://ideone.com/{}".format(details['link'])
url_pl = urllib.quote(comment_link(comment))
text += config.FOOTER.format(ide_link=ideone_link, perm_link=url_pl)
else:
log("Result error {code} detected in comment {id}".format(
code=result_code, id=comment.id))
preamble = config.ERROR_PREAMBLE.format(link=comment_link(comment))
postamble = config.ERROR_POSTAMBLE.format(link=comment_link(comment))
error_text = {
11: config.COMPILE_ERROR_TEXT,
12: config.RUNTIME_ERROR_TEXT,
13: config.TIMEOUT_ERROR_TEXT,
17: config.MEMORY_ERROR_TEXT,
19: config.ILLEGAL_ERROR_TEXT,
20: config.INTERNAL_ERROR_TEXT
}.get(result_code, '')
# Include any output from the submission in the reply.
if details['cmpinfo']:
error_text += "Compiler Output:\n\n{}\n\n".format(
code_block(details['cmpinfo']))
if details['output']:
error_text += "Output:\n\n{}\n\n".format(
code_block(details['output']))
if details['stderr']:
error_text += "Error Output:\n\n{}\n\n".format(
code_block(details['stderr']))
error_text = preamble + error_text + postamble
return MessageReply(error_text)
return CompiledReply(text, details)
@handle_api_exceptions()
def process_unread(new, r):
"""Parse a new comment or message for various options and ignore reply
to as appropriate.
"""
reply = None
sender = new.author
log("New {type} {id} from {sender}".format(
type="mention" if new.was_comment else "message",
id=new.id, sender=sender))
if sender.name.lower() in config.BANNED_USERS:
log("Ignoring banned user {user}".format(user=sender))
return
# Search for a user mention preceded by a '+' which is the signal
# for CompileBot to create a reply for that comment.
if (new.was_comment and
re.search(r'(?i)\+/u/{}'.format(config.R_USERNAME), new.body)):
reply = create_reply(new)
if reply:
reply.send(new, r)
elif ((not new.was_comment) and
re.match(r'(i?)\s*--help', new.body)):
# Message a user the help text if comment is a message
# containing "--help".
reply = MessageReply(config.HELP_TEXT, subject='CompileBot Help')
reply.send(new, r)
elif ((not new.was_comment) and
re.match(r'(i?)\s*--report', new.body) and config.SUBREDDIT):
# Forward a reported message to the moderators.
send_modmail("Report from {author}".format(author=new.author),
new.body, r)
reply = MessageReply("Your message has been forwarded to the "
"moderators. Thank you.",
subject="CompileBot Report")
reply.send(new, r)
elif ((not new.was_comment) and
re.match(r'(i?)\s*--recompile', new.body)):
# Search for the recompile command followed by a comment id.
# Example: 1tt4jt/post_title/ceb7czt
# The comment id can optionally be prefixed by a url.
# Example: reddit.com/r/sub/comments/1tt4jt/post_title/ceb7czt
p = (r'(i?)--recompile\s*(?P<url>[^\s*]+)?'
r'(?P<id>\b\w+/\w+/\w+\b)')
m = re.search(p, new.body)
try:
id = m.group('id')
except AttributeError:
new.reply(config.RECOMPILE_ERROR_TEXT)
return
# Fetch the comment that will be recompiled.
original = r.comment(id.split('/')[-1])
log("Processing request to recompile {id} from {user}"
"".format(id=original.id, user=new.author))
# Ensure the author of the original comment matches the author
# requesting the recompile to prevent one user sending a recompile
# request on the behalf of another.
if original.author.name == new.author.name:
reply = create_reply(original)
else:
new.reply(config.RECOMPILE_AUTHOR_ERROR_TEXT)
log("Attempt to recompile on behalf of another author "
"detected. Request denied.")
return
# Ensure the recompiled reply resulted in a valid comment
# reply and not an error message reply.
if isinstance(reply, CompiledReply):
# Search for an existing comment reply from the bot.
# If one is found, edit the existing comment instead
# of creating a new one.
#
# Note: the .replies property only returns a limited
# number of comments. If the reply is buried, it will
# not be retrieved and a new one will be created
for rp in original.replies:
if rp.author.name.lower() == config.R_USERNAME.lower():
footnote = ("\n\n**EDIT:** Recompile request "
"by {}".format(new.author))
reply.text += footnote
reply.make_edit(rp, original)
break
else:
# Reply to the original comment.
reply.send(original, r)
else:
# Send a message reply.
reply.send(new, r)
if reply and isinstance(reply, CompiledReply):
# Report any potential spam to the moderators.
spam = reply.detect_spam()
if spam and reply.parent_comment.subreddit.display_name not in config.IGNORE_SPAM:
text = ("Potential spam detected on comment {link} "
"by {c.author}: ".format(c=reply.parent_comment, link=comment_link(reply.parent_comment)))
text += ', '.join(spam)
send_modmail("Potential spam detected", text, r)
log(text)
def comment_link(comment):
return "{c.submission.permalink}{c.id}".format(c=comment)
@handle_api_exceptions()
def main():
r = praw.Reddit(
user_agent=config.USER_AGENT,
client_id=config.R_CLIENT_ID,
client_secret=config.R_CLIENT_SECRET,
username=config.R_USERNAME,
password=config.R_PASSWORD,
)
if config.SUBREDDIT:
config.BANNED_USERS
config.BANNED_USERS = get_banned(r)
# Iterate though each new comment/message in the inbox and
# process it appropriately.
inbox = r.inbox.unread()
for new in inbox:
try:
process_unread(new, r)
except:
tb = traceback.format_exc()
# Notify admin of any errors
log("Error processing comment {c.id}\n"
"{traceback}".format(c=new, traceback=code_block(tb)), alert=True)
finally:
new.mark_read()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3364102 | import unittest
from pycpfcnpj import cpf
class CPFTests(unittest.TestCase):
"""docstring for CPFTests"""
def setUp(self):
self.valid_cpf = '11144477735'
self.invalid_cpf = '11144477736'
def test_validate_cpf_true(self):
self.assertTrue(cpf.validate(self.valid_cpf))
def test_validate_cpf_false(self):
self.assertFalse(cpf.validate(self.invalid_cpf))
for i in range(10):
self.assertFalse(cpf.validate(
'{0}'.format(i) * 11
))
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
175603 | import os
import unittest
import settings as settings
import urllib.request
from helper import InputFiles
class FindOrbitReferenceHelperTest(unittest.TestCase):
pass
# def test_find_orbit_reference_default(self):
# """ Passing all arguments as default, must return igr18471.sp3.Z """
# self.assertEqual(self._find_orbit_reference(), "igr18471.sp3.Z")
#
# def test_find_orbit_reference_R(self):
# """ Passing the correspondent arguments as chpi1521.15o, 2015, R, must return igl18471.sp3.Z """
# self.assertEqual(self._find_orbit_reference(orbit_type="R"), "igl18471.sp3.Z")
#
# def test_find_year_orbit_reference_year(self):
# """ Passing the correspondent arguments as chpi1521.15o, 2016, G, must return igr18992.sp3.Z """
# self.assertEqual(self._find_orbit_reference(year=2016), "igr18992.sp3.Z")
#
# def test_find_orbit_reference_R_year(self):
# """ Passing the correspondent arguments as chpi1521.15o, 2016, R, must return igl18992.sp3.Z """
# self.assertEqual(self._find_orbit_reference(year=2016, orbit_type="R"), "igl18992.sp3.Z")
#
# def test_find_orbit_reference_wrong_year_2_digits(self):
# """ Passing year 2 digits, must return None """
# self.assertEqual(self._find_orbit_reference(year=16), None)
#
# def test_find_orbit_reference_wrong_year_5_digits(self):
# """ Passing year with 5 digits, must return None """
# self.assertEqual(self._find_orbit_reference(year=20164), None)
#
# def test_find_orbit_reference_rinex_no_station(self):
# """ Passing rinex_name with wrong station iaga code, must return None """
# self.assertEqual(self._find_orbit_reference(rinex_name='chp1521.15o'), None)
#
# def test_find_orbit_reference_rinex_no_extension(self):
# """ Passing rinex_name with no extension, must return None """
# self.assertEqual(self._find_orbit_reference(rinex_name='chpi1521'), None)
#
# def test_find_orbit_reference_rinex_empty(self):
# """ Passing rinex_name empty, must return None """
# self.assertEqual(self._find_orbit_reference(rinex_name=''), None)
#
# def test_find_orbit_reference_rinex_with_incorrect_doy(self):
# """ Passing rinex_name with wrong doy, must return None """
# self.assertEqual(self._find_orbit_reference(rinex_name='chpi3681.15o'), None)
#
# def test_find_orbit_reference_orbit_type_invalid(self):
# """ Passing orbit_type invalid, must return None """
# self.assertEqual(self._find_orbit_reference(orbit_type='X'), None)
#
# def test_find_orbit_reference_orbit_type_empty(self):
# """ Passing rinex_name with no extension, must return None """
# self.assertEqual(self._find_orbit_reference(orbit_type=''), None)
#
# def _find_orbit_reference(self, **kwargs):
# input_files = InputFiles()
# default = dict(
# rinex_name='chpi1521.15o',
# year=2015,
# orbit_type='G'
# )
# data = dict(default, **kwargs)
# find = input_files._find_orbit_reference(data['rinex_name'], data['year'], data['orbit_type'])
# return find
class ChannelsURLHelperTest(unittest.TestCase):
def test_download_channels_url_response(self):
"""HTML must contain text """
response = urllib.request.urlopen(settings.URL_GLONASS_CHANNELS).getcode()
self.assertEqual(response, 200)
class CheckURLExist(unittest.TestCase):
pass
# def test_url_download_exist(self):
# url = urlopen(settings.URL_ORBIT + '1847/igr18471.sp3.Z')
# if url != URLError:
# return True
# self.assertEqual(url, True)
# class SetupFileAndDownloadHelperTest(unittest.TestCase):
# def _setup_file_and_download(self, **kwargs):
# input_files = InputFiles()
# default = dict(
# year=2015,
# month=6,
# file='chpi1521.15o',
# file_type='DCB',
# rinex_interval=30
# )
# data = dict(default, **kwargs)
# find = input_files._setup_file_and_download(data['year'], data['month'], data['file'], data['file_type'],
# data['rinex_interval'])
# return find
class DownloadFileHelperTest(unittest.TestCase):
pass
# def test_download_file_default(self):
# """ Passing all arguments as default, must return complete path of igl18471.sp3.Z"""
# self.assertEqual(self._download_file(), settings.PATH_ORBIT + '2015/igl18471.sp3.Z')
#
# def _download_file(self, **kwargs):
# input_files = InputFiles()
# default = dict(
# year=2015,
# filename='/home/lotte/embrace/tec/orbit/1847/igl18471.sp3.Z',
# orbit_name='igl18471.sp3.Z',
# gnss_week='1847',
# what_to_download='Orbit',
# )
# data = dict(default, **kwargs)
# find = input_files._download_file(data)
# return find
# def test_download_orbit_default(self):
# """ Passing uncompressed orbit_name, must return igl18471.sp3"""
# self.assertEqual(self._download_orbit(), settings.PATH_ORBIT + '2015/igl18471.sp3')
#
# def test_download_orbit_with_orbit_name_empty(self):
# """ Passing orbit_name empty must return None"""
# self.assertEqual(self._download_orbit(orbit_name=''), None)
#
# def test_download_orbit_with_orbit_name_with_wrong_extension(self):
# """ Passing orbit_name with wrong extension must return None """
# self.assertEqual(self._download_orbit(orbit_name='igl18471.sp5'), None)
#
# def test_download_orbit_with_orbit_name_with_uppercase(self):
# """ Passing orbit_name with upper case, must return the same orbit_name in lower case: igl18471.sp3.Z"""
# self.assertEqual(self._download_orbit(orbit_name='IGL18471.SP3.Z'), settings.PATH_ORBIT
# + '2015/igl18471.sp3')
#
# def test_download_orbit_with_year_empty(self):
# """ Passing year empty must return ..."""
# self.assertEqual(self._download_orbit(year=''), settings.PATH_ORBIT + '...')
#
# def test_download_orbit_with_year_with_1_digit(self):
# """ Passing year with one single digit, must return the empty orbit path"""
# self.assertEqual(self._download_orbit(year=5), '')
#
# def test_download_orbit_with_year_with_3_digits(self):
# """ Passing year with three digits, must return the empty orbit path"""
# self.assertEqual(self._download_orbit(year=105), '')
#
# def test_download_orbit_with_year_with_2_digits(self):
# """ Passing year with only two digits, must return the saved orbit path with correct 4-digits year folder"""
# self.assertEqual(self._download_orbit(year=15), settings.PATH_ORBIT + '2015/igl18471.sp3.Z')
#
# def test_download_orbit_with_year_with_more_than_4_digits(self):
# """ Passing year with more than 4 digits, must return the empty orbit path"""
# self.assertEqual(self._download_orbit(year=20159), '')
#
# def test_download_orbit_with_year_as_string(self):
# """ Passing year as string, must return the empty orbit path"""
# self.assertEqual(self._download_orbit(year='2015a'), '')
class CheckFilesExistHelperTest(unittest.TestCase):
pass
# def delete_all(self, folder):
# for the_file in os.listdir(folder):
# file_path = os.path.join(folder, the_file)
# try:
# if os.path.isfile(file_path):
# os.unlink(file_path)
# except Exception as e:
# print(e)
#
# def create_file(self, folder, filename):
# tmp_file = os.path.join(folder, filename)
# with open(tmp_file, 'w+') as f:
# f.write('Python mock testing. Delete me later!')
#
# def test_dcb_compressed_and_uncompressed_exists_return_uncompressed(self):
# """ Passing default DCB, check if it exists, if so, returns absolute path of the uncompressed file """
# self.create_file(settings.PATH_DCB, 'P1C11506.DCB')
# self.create_file(settings.PATH_DCB, 'P1C11506.DCB.Z')
# self.assertTrue(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB'))
# self.assertTrue(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.assertEqual(self._check_files_already_exist(), (False, settings.PATH_DCB + 'P1C11506.DCB'))
# self.delete_all(settings.PATH_DCB)
#
# def test_dcb_only_compressed_exists_return_compressed(self):
# """ Passing the DCB only with the P1C11506.DCB.Z file, make sure it exists, if it does,
# returns the absolute path of the compressed file """
# self.create_file(settings.PATH_DCB, 'P1C11506.DCB.Z')
# self.assertTrue(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.assertFalse(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB'))
# self.assertEqual(self._check_files_already_exist(), (False, settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.delete_all(settings.PATH_DCB)
#
# def test_dcb_no_file_exists(self):
# """ Passing the DCB without any files, check if there is any file, if so,
# returns the absolute path of the compressed file """
# self.assertFalse(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.assertFalse(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB'))
# self.assertEqual(self._check_files_already_exist(), (True, settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.delete_all(settings.PATH_DCB)
#
# def test_dcb_only_decompressed_exists_return_decompressed(self):
# """ Passing the DCB only with the P1C11506.DCB file, make sure the file exists,
# if so, returns the absolute path of the uncompressed file """
# self.create_file(settings.PATH_DCB, 'P1C11506.DCB')
# self.assertTrue(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB'))
# self.assertFalse(os.path.isfile(settings.PATH_DCB + 'P1C11506.DCB.Z'))
# self.assertEqual(self._check_files_already_exist(), (False, settings.PATH_DCB + 'P1C11506.DCB'))
# self.delete_all(settings.PATH_DCB)
#
# def _check_files_already_exist(self, **kwargs):
# input_files = InputFiles()
# default = dict(
# path=settings.PATH_DCB,
# filename='P1C11506.DCB.Z',
# )
# data = dict(default, **kwargs)
# find = input_files._check_files_already_exist(data['path'], data['filename'])
# return find
| StarcoderdataPython |
3303839 | <reponame>amymariaparker2401/luci-py
#!/usr/bin/env vpython
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import sys
import unittest
import isolate_test_env as test_env
test_env.setup_test_env()
from components import auth
from test_support import test_case
import config
import metrics
from proto import config_pb2
class TestMetrics(test_case.TestCase):
"""Test cases for metrics.py."""
def test_file_size(self):
fields = {
'client_email': 'anonymous:anonymous',
'client_name': 'bots',
'download_source': 'GCS',
}
cfg = config_pb2.SettingsCfg(client_monitoring_config=[
config_pb2.ClientMonitoringConfig(
ip_whitelist='bots',
label='bots',
),
])
self.mock(config, '_get_settings_with_defaults', lambda: ('revision', cfg))
# Client is whitelisted for monitoring.
self.mock(auth, 'is_in_ip_whitelist', lambda *args, **kwargs: False)
metrics.file_size(123)
self.assertIsNone(metrics._bytes_requested.get(fields=fields))
# Client is not whitelisted for monitoring.
self.mock(auth, 'is_in_ip_whitelist', lambda *args, **kwargs: True)
metrics.file_size(123)
self.assertEqual(metrics._bytes_requested.get(fields=fields), 123)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| StarcoderdataPython |
3302821 | #
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import pytest
import mock
import stat
import helpers as h
import opencas
@pytest.mark.parametrize(
"line",
[
"",
" ",
"#",
" # ",
(
"<KEY>"
"<KEY>"
"<KEY>"
),
" # ? } { ! ",
"1 1 /dev/not_a_real_device /dev/sdb",
"1 2 1 /dev/not_a_real_device ",
"1 2 1 /dev/not_a_real_device dinosaur=velociraptor",
],
)
@mock.patch("opencas.cas_config.core_config.validate_config")
def test_core_config_from_line_parsing_checks_01(mock_validate, line):
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line(line)
@pytest.mark.parametrize(
"line",
[
"1 1 /dev/not_a_real_device",
"1 1 /dev/not_a_real_device ",
"1 1 /dev/not_a_real_device lazy_startup=true",
"1 1 /dev/not_a_real_device lazy_startup=false",
"1 1 /dev/not_a_real_device lazy_startup=False",
"1 1 /dev/not_a_real_device lazy_startup=True",
],
)
def test_core_config_from_line_parsing_checks_02(line):
opencas.cas_config.core_config.from_line(line, allow_incomplete=True)
@pytest.mark.parametrize(
"line",
[
"1 1 /dev/not_a_real_device dinosaur=velociraptor",
"1 1 /dev/not_a_real_device lazy_startup=maybe",
"1 1 /dev/not_a_real_device lazy_saturday=definitely",
"1 1 /dev/not_a_real_device 00000=345",
"1 1 /dev/not_a_real_device eval(38+4)",
],
)
def test_core_config_from_line_parsing_checks_params_01(line):
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line(line, allow_incomplete=True)
@mock.patch("os.path.exists")
@mock.patch("os.stat")
def test_core_config_from_line_device_is_directory(mock_stat, mock_path_exists):
mock_path_exists.side_effect = h.get_mock_os_exists(["/home/user/stuff"])
mock_stat.return_value = mock.Mock(st_mode=stat.S_IFDIR)
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line("1 1 /home/user/stuff")
@mock.patch("os.path.exists")
@mock.patch("os.stat")
def test_core_config_from_line_device_not_present(mock_stat, mock_path_exists):
mock_path_exists.side_effect = h.get_mock_os_exists([])
mock_stat.side_effect = ValueError()
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line("1 1 /dev/not_a_real_device")
def test_core_config_from_line_recursive_multilevel():
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line("1 1 /dev/cas1-1")
def test_core_config_from_line_multilevel():
opencas.cas_config.core_config.from_line("1 1 /dev/cas2-1")
@mock.patch("opencas.cas_config.check_block_device")
def test_core_config_from_line_allow_incomplete(mock_check_block,):
opencas.cas_config.core_config.from_line(
"1 1 /dev/not_a_real_device", allow_incomplete=True
)
assert not mock_check_block.called
@pytest.mark.parametrize(
"cache_id,core_id",
[
("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", "bbbbbbb"),
("lizard", "chicken"),
("0", "0"),
("0", "100"),
("0", "-1"),
("-1", "0"),
("-1", "1"),
("-1", "-1"),
("16385", "4095"),
("16384", "4096"),
("0", "0"),
("1", "-1"),
],
)
@mock.patch("os.path.exists")
@mock.patch("os.stat")
def test_core_config_from_line_cache_id_validation_01(
mock_stat, mock_path_exists, cache_id, core_id
):
mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/not_a_real_device"])
mock_stat.return_value = mock.Mock(st_mode=stat.S_IFBLK)
line = "{0} {1} /dev/not_a_real_device".format(cache_id, core_id)
with pytest.raises(ValueError):
opencas.cas_config.core_config.from_line(line)
@pytest.mark.parametrize(
"cache_id,core_id", [("16384", "4095"), ("1", "0"), ("1", "10")]
)
@mock.patch("os.path.exists")
@mock.patch("os.stat")
def test_core_config_from_line_cache_id_validation_02(
mock_stat, mock_path_exists, cache_id, core_id
):
mock_path_exists.side_effect = h.get_mock_os_exists(["/dev/not_a_real_device"])
mock_stat.return_value = mock.Mock(st_mode=stat.S_IFBLK)
line = "{0} {1} /dev/not_a_real_device".format(cache_id, core_id)
opencas.cas_config.core_config.from_line(line)
@pytest.mark.parametrize(
"cache_id,core_id,device",
[
("1", "1", "/dev/not_a_real_device"),
("16384", "4095", "/dev/not_a_real_device"),
("16384", "0", "/dev/nvme0n1p"),
("100", "5", "/dev/dm-10"),
],
)
@mock.patch("os.path.exists")
@mock.patch("os.stat")
def test_core_config_from_line_cache_id_validation(
mock_stat, mock_path_exists, cache_id, core_id, device
):
mock_path_exists.side_effect = h.get_mock_os_exists([device])
mock_stat.return_value = mock.Mock(st_mode=stat.S_IFBLK)
core_reference = opencas.cas_config.core_config(
cache_id=cache_id, core_id=core_id, path=device
)
core_reference.validate_config()
core_after = opencas.cas_config.core_config.from_line(core_reference.to_line())
assert core_after.cache_id == core_reference.cache_id
assert core_after.core_id == core_reference.core_id
assert core_after.device == core_reference.device
| StarcoderdataPython |
3329555 | import unittest
from blng.Voodoo import DataAccess
from alchemy_cli import alchemy_voodoo_wrapper
import prompt_toolkit.validation
class TestAlchemySimple(unittest.TestCase):
def setUp(self):
self.session = DataAccess('crux-example.xml')
self.root = self.session.get_root()
self.subject = alchemy_voodoo_wrapper(self.root)
def test_go_into_conf_mode_and_exit_again(self):
# Action & Assert
self.assertEqual(self.subject.mode, 0)
self.subject.do('configure')
self.assertEqual(self.subject.mode, 1)
self.assertEqual(self.subject.OUR_PROMPT, 'brewer@localhost% ')
self.subject.do('exit')
self.assertEqual(self.subject.mode, 0)
self.assertEqual(self.subject.OUR_PROMPT, 'brewer@localhost> ')
def assertGeneratorReturnedEverything(self, results, required_results):
"""
Helper to compare the results of generators.
"""
not_found_results = list(required_results)
extra_results = []
for result in results:
if result.text not in required_results:
extra_results.append(result.text)
for required_result in required_results:
if result.text == required_result:
not_found_results.remove(required_result)
break
if len(not_found_results) and len(extra_results):
self.fail('Results not found %s\nExtra results found %s' % (str(not_found_results), str(extra_results)))
if len(not_found_results):
self.fail('Results not found %s' % (str(not_found_results)))
if len(extra_results):
self.fail('Extra results found %s' % (str(extra_results)))
def drainGenerator(self, gen):
try:
while 1:
next(gen)
except StopIteration:
pass
class Document:
def __init__(self, text):
self.text = text
| StarcoderdataPython |
3246718 | """Upload models.
Upload the scene's configuration file and asset file.
"""
# Import built-in modules
import configparser
import os
import subprocess
# Import local modules
from rayvision_sync import RayvisionTransfer
from rayvision_sync.utils import run_cmd
from rayvision_sync.utils import read_ini_config
from rayvision_sync.utils import str2unicode
from rayvision_sync.utils import upload_retry
from rayvision_sync.exception import RayvisionError, UnsupportedDatabaseError
from rayvision_sync.utils import create_transfer_params
class RayvisionUpload(object):
"""Upload files.
Upload configuration files and asset files.
"""
def __init__(self, api, db_config_path=None):
"""Initialize instance."""
params = create_transfer_params(api)
self.api = api
self.trans = RayvisionTransfer(**params)
self.logger = self.trans.logger
# load db config ini
self.transfer_log_path, self.redis_config, self.sqlite_config, self.database_config = \
self.load_db_config(db_config_path)
custom_db_path = self.database_config.get("db_path")
db_dir = self._check_and_mk(custom_db_path)
self.check_transfer_log_path(self.transfer_log_path)
self.db_ini = os.path.join(db_dir, 'db_ini')
self._db = os.path.join(db_dir, 'db')
def check_transfer_log_path(self, transfer_log_path):
"""Check the log location of the transport engine."""
rayvision_log_path = os.environ.get("RAYVISION_LOG", "")
if bool(transfer_log_path) and os.path.exists(transfer_log_path):
transfer_path = transfer_log_path
if rayvision_log_path != transfer_log_path:
os.environ.update({"RAYVISION_LOG": transfer_path})
subprocess.Popen('setx RAYVISION_LOG "%s" /m' % transfer_path, shell=True)
elif os.path.exists(rayvision_log_path):
transfer_path = rayvision_log_path
else:
if self.api.user_info['local_os'] == "windows":
transfer_path = os.path.join(os.environ["USERPROFILE"],
"renderfarm_sdk")
else:
transfer_path = os.path.join(os.environ["HOME"], "renderfarm_sdk")
os.environ.update({"RAYVISION_LOG": transfer_path})
subprocess.Popen('setx RAYVISION_LOG "%s" /m' % transfer_path, shell=True)
return transfer_path
def _check_and_mk(self, custom_db_path):
"""Check the path to the DB data file generated by the upload asset."""
rayvision_db_env = os.environ.get("RAYVISION_DB", "")
if bool(custom_db_path) and os.path.exists(custom_db_path):
db_path = custom_db_path
elif os.path.exists(rayvision_db_env):
db_path = rayvision_db_env
else:
if self.api.user_info['local_os'] == "windows":
db_path = os.path.join(os.environ["USERPROFILE"], "renderfarm_sdk")
else:
db_path = os.path.join(os.environ["HOME"], "renderfarm_sdk")
return db_path
def create_db_ini(self, upload_json_path):
"""Create the database configuration file.
Args:
upload_json_path (str): Upload json path.
Returns:
str: Configuration file path.
"""
db_type = self.database_config.get("type", "sqlite").strip().lower()
if not os.path.exists(self.db_ini):
os.makedirs(self.db_ini)
time_temp = os.path.split(os.path.dirname(upload_json_path))[-1]
db_path = os.path.join(self._db, "%s.db" % time_temp)
config_ini = configparser.ConfigParser()
config_ini['database'] = {
"on": self.database_config.get("on", "true"),
"platform_id": self.trans.platform,
"type": db_type
}
config_ini['redis'] = {
"host": self.redis_config.get("host", "127.0.0.1"),
"port": self.redis_config.get("port", 6379),
"password": self.redis_config.get("password", ""),
"table_index": self.redis_config.get("table_index", ""),
"timeout": self.redis_config.get("timeout", 5000)
}
config_ini['sqlite'] = {
"db_path": db_path,
"temporary": self.sqlite_config.get("temporary", "false")
}
if db_type == "redis":
db_ini_path = os.path.join(self.db_ini, "db_redis.ini")
elif db_type == "sqlite":
db_ini_path = os.path.join(self.db_ini, "%s.ini" % time_temp)
else:
error_data_msg = "{} is not a supported database, only support 'redis' or 'sqlite'".format(db_type)
raise UnsupportedDatabaseError(error_data_msg)
with open(db_ini_path, 'w') as configfile:
config_ini.write(configfile)
return db_ini_path
def upload(self, task_id, task_json_path, tips_json_path, asset_json_path,
upload_json_path, max_speed=None):
"""Run the cmd command to upload the configuration file.
Args:
task_id (str, optional): Task id.
task_json_path (str, optional): task.json file absolute path.
tips_json_path (str, optional): tips.json file absolute path.
asset_json_path (str, optional): asset.json file absolute path.
upload_json_path (str, optional): upload.json file absolute path.
max_speed (str): Maximum transmission speed, default value
is 1048576 KB/S.
Returns:
bool: True is success, False is failure.
"""
config_file_list = [
task_json_path,
tips_json_path,
asset_json_path,
upload_json_path
]
result_config = self.upload_config(task_id, config_file_list,
max_speed)
if not result_config:
return False
result_asset = self.upload_asset(upload_json_path, max_speed)
if not result_asset:
return False
return True
def upload_config(self, task_id, config_file_list, max_speed=None):
"""Run the cmd command to upload configuration profiles.
Args:
task_id (str): Task id.
config_file_list (list): Configuration file path list.
max_speed (str): Maximum transmission speed, default value
is 1048576 KB/S.
Returns:
bool: True is success, False is failure.
"""
transmit_type = "upload_files"
max_speed = max_speed if max_speed is not None else "1048576"
for config_path in config_file_list:
local_path = str2unicode(config_path)
config_basename = os.path.basename(config_path)
server_path = '/{0}/cfg/{1}'.format(task_id, config_basename)
server_path = str2unicode(server_path)
if not os.path.exists(local_path):
self.logger.info('%s is not exists.', local_path)
continue
cmd_params = [transmit_type, local_path, server_path, max_speed,
'false', 'config_bid']
cmd = self.trans.create_cmd(cmd_params)
times = 0
while True:
result = run_cmd(cmd, flag=True, logger=self.logger)
if result:
if times == 9:
raise RayvisionError(20004, "%s upload failed" %
config_path)
times += 1
else:
break
return True
@upload_retry
def upload_asset(self, upload_json_path, max_speed=None, is_db=True):
"""Run the cmd command to upload asset files.
Args:
upload_json_path (str): Path to the upload.json file.
max_speed (str): Maximum transmission speed, default value
is 1048576 KB/S.
is_db (bool): Whether to produce local database record upload file.
Returns:
bool: True is success, False is failure.
"""
transmit_type = "upload_file_pairs"
max_speed = max_speed if max_speed is not None else "1048576"
cmd_params = [transmit_type, upload_json_path, '/', max_speed,
'false', 'input_bid']
if is_db:
db_ini_path = self.create_db_ini(upload_json_path)
else:
db_ini_path = None
cmd = self.trans.create_cmd(cmd_params, db_ini_path)
return run_cmd(cmd, flag=True, logger=self.logger)
def load_db_config(self, db_config_path=None):
if not bool(db_config_path) or not os.path.exists(db_config_path):
db_config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "db_config.ini"))
conf = read_ini_config(db_config_path)
on = conf.get("DATABASE_CONFIG", "on")
type = conf.get("DATABASE_CONFIG", "type")
db_path = conf.get("DATABASE_CONFIG", "db_path")
host = conf.get("REDIS", "host")
port = conf.get("REDIS", "port")
password = conf.get("REDIS", "password")
table_index = conf.get("REDIS", "table_index")
timeout = conf.get("REDIS", "timeout")
temporary = conf.get("SQLITE", "temporary")
transfer_log_path = conf.get("TRANSFER_LOG_PATH", "transfer_log_path")
database_config = {
"on": on,
"type": type,
"db_path": db_path
}
redis_config = {
"host": host,
"port": int(port),
"password": password,
"table_index": table_index,
"timeout": timeout
}
sqlite_config = {
"temporary": temporary
}
return transfer_log_path, redis_config, sqlite_config, database_config
| StarcoderdataPython |
1762397 | from sails.ui.mainmenubar import MainMenuBar
class SunVoxMainMenuBar(MainMenuBar):
pass
| StarcoderdataPython |
1783057 | <reponame>TheGoldfish01/pydpf-core<gh_stars>10-100
from .serializer import serializer
from .mechanical_csv_to_field import mechanical_csv_to_field
from .field_to_csv import field_to_csv
from .deserializer import deserializer
from .csv_to_field import csv_to_field
from .vtk_export import vtk_export
from .vtk_to_fields import vtk_to_fields
from .migrate_file_to_vtk import migrate_file_to_vtk
from .serialize_to_hdf5 import serialize_to_hdf5
| StarcoderdataPython |
1691821 | import numpy as np
from opendp.smartnoise_t.sql.privacy import Privacy
class Odometer:
"""
Implements k-folds homogeneous composition from Kairouz, et al
Theorem 3.4
https://arxiv.org/pdf/1311.0776.pdf
"""
def __init__(self, privacy: Privacy):
self.k = 0
self.privacy = privacy
if not self.privacy.delta:
self.privacy.delta = 0.0
self.tol = self.privacy.delta / 2
def spend(self, k=1):
self.k += k
def reset(self):
self.k = 0
@property
def spent(self):
epsilon = self.privacy.epsilon
delta = self.privacy.delta
tol = self.tol
if self.k == 0:
return (0.0, 0.0)
basic = self.k * epsilon
optimal_left_side = ((np.exp(epsilon) - 1) * epsilon * self.k)/(np.exp(epsilon) + 1)
optimal_a = optimal_left_side + epsilon * np.sqrt(2 * self.k * np.log(epsilon + (np.sqrt(self.k*epsilon*epsilon)/tol)))
optimal_b = optimal_left_side + epsilon * np.sqrt(2 * self.k * (1/tol))
delta = 1 - (1 - delta) ** self.k
delta = delta * (1 - delta) + self.tol
return tuple([min(basic, optimal_a, optimal_b), delta])
class OdometerHeterogeneous:
"""
Implements k-folds heterogeneous composition from Kairouz, et al
Theorem 3.5
https://arxiv.org/pdf/1311.0776.pdf
"""
def __init__(self, privacy: Privacy = None):
self.steps = []
self.privacy = privacy
self.tol = None
if privacy:
if not self.privacy.delta:
self.privacy.delta = 0.0
self.tol = self.privacy.delta / 2
def spend(self, privacy: Privacy = None):
if privacy:
if not self.tol:
self.tol = privacy.delta / 2
if self.tol > privacy.delta:
self.tol = privacy.delta
self.steps.append((privacy.epsilon, privacy.delta))
elif self.privacy:
self.steps.append((self.privacy.epsilon, self.privacy.delta))
else:
raise ValueError("No privacy information passed in")
def reset(self):
self.steps = []
@property
def k(self):
return len(self.steps)
@property
def spent(self):
k = len(self.steps)
basic = np.sum([eps for eps, _ in self.steps])
optimal_left_side = np.sum([((np.exp(eps) - 1) * eps) / ((np.exp(eps) + 1)) for eps, _ in self.steps])
sq = np.sum([eps * eps for eps, _ in self.steps])
sqsq = np.sum([2 * eps * eps for eps, _ in self.steps])
optimal_a = optimal_left_side + np.sqrt(sqsq * np.log(np.exp(1) + (np.sqrt(sq)/self.tol)))
optimal_b = optimal_left_side + np.sqrt(sqsq * np.log(1/self.tol))
delta = 1 - (1 - self.tol) * np.prod([(1 - delta) for _, delta in self.steps])
return tuple([min(basic, optimal_a, optimal_b), delta])
| StarcoderdataPython |
3373633 | class Quote(object):
def __init__(self, recipient, quote):
self.quote = '@{RECIPIENT} {QUOTE}'.format(RECIPIENT=recipient, QUOTE=quote)
def __unicode__(self):
return self.quote
def __str__(self):
return unicode(self).encode('UTF-8')
def __repr__(self):
return unicode(self) | StarcoderdataPython |
1725490 | # This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""String tokenizer that returns a list of n-grams. A n-gram in this case is a
substring of length n.
"""
from typing import List, Optional
from openclean.data.types import Scalar
from openclean.function.token.base import Tokenizer, Token
class NGrams(Tokenizer):
"""Split values into lists of n-grams. n-grams are substrings of length n.
Provides the option to pad stings with special characters to the left and
right before computing n-grams. That is, if a left (right) padding character
is given (e.g. $), a string containing n-1 padding characters will be added
to the left (right) of a given string before n-gams are computer.
If no padding is specified (default) the value is split into n-grams as is.
If the string does not contain more than n characters the string is returned
as is.
"""
def __init__(self, n: int, pleft: Optional[str] = None, pright: Optional[str] = None):
"""Initialize the length of the generated n-grams and the optional
padding characters.
Parameters
----------
n: int
Length of generated n-grams.
pleft: str, default=None
Padding character that is used to create a left-padding for each
processed value of length n-1.
pright: str, default=None
Padding character that is used to create a right-padding for each
processed value of length n-1.
"""
self.n = n
self.pleft = pleft
self.pright = pright
def tokens(self, value: Scalar, rowidx: Optional[int] = None) -> List[Token]:
"""Convert a given scalar values into a list of n-grams. If the value
length is not greater than n and no padding was specified, the returned
list will only contain the given value.
Parameters
----------
value: scalar
Value that is converted into a list of n-grams.
rowidx: int, default=None
Optional index of the dataset row that the value originates from.
Returns
-------
list of openclean.function.token.base.Token
"""
# Add left and right padding if specified.
if self.pleft:
value = self.pleft * (self.n - 1) + value
if self.pright:
value = value + self.pright * (self.n - 1)
# If value length is not greater than n return single item list.
if len(value) <= self.n:
return [value]
# Split value into n-grams.
result = list()
for i in range(len(value) - (self.n - 1)):
result.append(Token(value=value[i: i + self.n], rowidx=rowidx))
return result
| StarcoderdataPython |
1610244 | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics Module Unittest."""
from unittest.mock import call, patch
import unittest
import pytest
import streamlit.metrics
from streamlit import config
class MetricsTest(unittest.TestCase):
"""Metrics Unittest class."""
def setUp(self):
"""Make sure Client singleton is always empty before starting tests."""
streamlit.metrics.Client._singleton = None
def tearDown(self):
"""Cleanup metrics client."""
config.set_option("global.metrics", False)
streamlit.metrics.Client._singleton = None
client = streamlit.metrics.Client.get_current()
client.toggle_metrics()
def test_constructor(self):
"""Test streamlit.metrics.Client."""
client = streamlit.metrics.Client()
self.assertEqual(streamlit.metrics.Client._singleton, client)
def test_get_current(self):
"""Test streamlit.metrics.clientget_current."""
client = streamlit.metrics.Client.get_current()
self.assertEqual(streamlit.metrics.Client._singleton, client)
def test_not_singleton(self):
"""Test streamlit.metrics.Client not singleton."""
client = streamlit.metrics.Client.get_current()
with pytest.raises(RuntimeError) as e:
streamlit.metrics.Client()
msg = "Client already initialized. Use .get_current() instead"
self.assertEqual(msg, str(e.value))
def test_enabled_metrics_no_prometheus(self):
"""Test streamlit.metrics.Client.toggle_metrics no prometheus."""
config.set_option("global.metrics", True)
client = streamlit.metrics.Client.get_current()
builtin_import = "builtins.__import__"
with pytest.raises(ImportError) as e:
with patch(builtin_import, side_effect=ImportError):
client.toggle_metrics()
msg = "prometheus-client is not installed. pip install prometheus-client"
self.assertEqual(msg, str(e.value))
def test_enabled_metrics(self):
"""Test streamlit.metrics.toggle_metrics enabled."""
config.set_option("global.metrics", True)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
truth = [
"unittest_counter_total 1.0",
'unittest_counter_labels_total{label="some_label"} 0.0',
"unittest_gauge 42.0",
]
lines = client.generate_latest().splitlines()
metrics = [
x.decode("utf-8") for x in lines if x.decode("utf-8").startswith("unit")
]
metrics = [str(x) for x in metrics if "_created" not in x]
self.assertEqual(sorted(truth), sorted(metrics))
def test_disabled_metrics_check_value(self):
"""Test streamlit.metrics.Client.toggle_metrics disabled check value."""
with patch("streamlit.metrics.MockMetric", spec=True) as mock_metric:
config.set_option("global.metrics", False)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
# Test that handler in Server.py will return nothing.
self.assertEqual(client.generate_latest(), "")
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
client.get("unittest_gauge").dec()
calls = [
call(), # Constructor
call(), # unittest_counter
call(), # unittest_counter_labels
call(), # unittest_gauge
call().inc(),
call().labels("some_label"),
call().set(42),
call().dec(),
]
self.assertEqual(calls, mock_metric.mock_calls)
def test_disabled_metrics(self):
"""Test streamlit.metrics.Client.toggle_metrics disabled."""
config.set_option("global.metrics", False)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
client.get("unittest_gauge").dec()
# Purposely not testing anything, just verifying the calls
# actually work.
| StarcoderdataPython |
3303400 | <gh_stars>0
import requests
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from .config import access_and_compliance_group_name
logger = logging.getLogger(__name__)
def ensure_compliant(sender, request, user, **kwargs):
payload = {'uniqname': user.username}
response = requests.get(settings.ACCESS_AND_COMPLIANCE_VALIDATION_URL, params=payload)
response.raise_for_status()
group, created = Group.objects.get_or_create(name=access_and_compliance_group_name)
if _is_compliant(response):
group.user_set.add(user)
logger.debug(f'{user} has attested to the data access and compliance policy')
else:
group.user_set.remove(user)
logger.debug(f'{user} has not attested to data compliance policy')
def _is_compliant(response):
return response.text in settings.ACCESS_AND_COMPLIANCE_TRUTHY_VALUES
| StarcoderdataPython |
1798221 | import cv2 as cv
import dlib
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from config import device
from retinaface.detector import Detector
from utils.ddfa import ToTensorGjz, NormalizeGjz, _parse_param
from utils.inference import crop_img, parse_roi_box_from_bbox, parse_roi_box_from_landmark
def extract(img_ori):
rects = face_detector(img_ori, 1)
rect = rects[0]
# dets, landms = detector.detect_faces(img_ori)
# det = dets[0]
# bbox = [det[0], det[1], det[2], det[3]]
# print('bbox: ' + str(bbox))
# roi_box = parse_roi_box_from_bbox(bbox)
# print('roi_box: ' + str(roi_box))
# - use landmark for cropping
pts = face_regressor(img_ori, rect).parts()
pts = np.array([[pt.x, pt.y] for pt in pts]).T
roi_box = parse_roi_box_from_landmark(pts)
img = crop_img(img_ori, roi_box)
img = cv.resize(img, (120, 120), interpolation=cv.INTER_LINEAR)
input = transform(img).unsqueeze(0)
input = input.to(device)
with torch.no_grad():
param = model(input)
param = param.squeeze().cpu().numpy().flatten().astype(np.float32)
# print('param: ' + str(param))
p, offset, alpha_shp, alpha_exp = _parse_param(param)
# print('alpha_exp: ' + str(alpha_exp))
return alpha_exp, p
if __name__ == '__main__':
filename_scripted = '3ddfa_scripted.pt'
model = torch.jit.load(filename_scripted)
cudnn.benchmark = True
model = model.to(device)
model.eval()
face_detector = dlib.get_frontal_face_detector()
dlib_landmark_model = 'models/shape_predictor_68_face_landmarks.dat'
face_regressor = dlib.shape_predictor(dlib_landmark_model)
# detector = Detector()
transform = transforms.Compose([ToTensorGjz(), NormalizeGjz(mean=127.5, std=128)])
alpha_exp_list = []
pose_list = []
video = 'data/kuazhangbiaoqing.mp4'
cap = cv.VideoCapture(video)
idx = 0
while cap.isOpened():
success, frame = cap.read()
if not success:
break
try:
alpha_exp, p = extract(frame)
alpha_exp_list.append(alpha_exp)
pose_list.append(p)
except IndexError as err:
print(err)
idx += 1
print(idx)
import pickle
print(len(alpha_exp_list))
data = dict()
data['alpha_exp'] = alpha_exp_list
data['pose'] = pose_list
with open('data.pkl', 'wb') as fp:
pickle.dump(data, fp)
| StarcoderdataPython |
3397609 | # -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Lock
from actors.internal.executor import Executor
from actors.future import Promise
class Dispatcher(object):
throughput = 5
def __init__(self, executor):
self._executor = executor
self._attach_lock = Lock()
self._attached_count = 0
self._terminated = Promise()
def dispatch(self, message, mailbox):
if not mailbox.is_closed():
mailbox.enqueue(message)
self.schedule_execution(mailbox)
else:
print("Failed to deliver message. mailbox closed")
def dispatch_system(self, message, mailbox):
if not mailbox.is_closed():
mailbox.enqueue_system(message)
self.schedule_execution(mailbox)
else:
print("Failed to deliver system message. mailbox closed")
def attach(self, mailbox):
with self._attach_lock:
self._attached_count += 1
assert not mailbox.is_closed()
def detach(self, mailbox):
assert mailbox.is_closed()
with self._attach_lock:
self._attached_count -= 1
if self._attached_count == 0:
self._terminated.complete(None)
self._executor.shutdown()
def schedule_execution(self, mailbox):
if mailbox.is_closed() or mailbox.is_scheduled() or not mailbox.has_messages():
return
if mailbox.set_scheduled():
self._executor.submit(mailbox.process_messages)
class PinnedDispatcher(Dispatcher):
def __init__(self):
super(PinnedDispatcher, self).__init__(Executor(1))
| StarcoderdataPython |
1759610 | <reponame>seik/django-belt<gh_stars>0
import warnings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
def transition_handler_decorator(func):
"""Decorator for transitions methods. Allows to activate a flag if the
transition is running, and also saves the original status value.
"""
if not func:
return func
def _transition_wrapper(self=None, *args, **kwargs):
self._original_status = getattr(self, self.STATUS_FIELD)
self._handling_transition = True
result = func(*args, **kwargs)
self._handling_transition = False
return result
return _transition_wrapper
class StatusMixin(models.Model):
"""Mixin to handle status changes"""
STATUS_FIELD = "status"
ALLOWED_TRANSITIONS = []
FORBIDDEN_TRANSITIONS = []
TRANSITION_HANDLERS = {}
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
"""Init _handling_transition value to False."""
self._handling_transition = False
super().__init__(*args, **kwargs)
self._original_status = getattr(self, self.STATUS_FIELD)
def refresh_from_db(self, using=None, fields=None):
super().refresh_from_db(using=using, fields=fields)
if hasattr(self, "_transition"):
delattr(self, "_transition")
def get_status_transition(self):
"""Get status transition."""
if self.pk:
if hasattr(self, "_transition"):
return self._transition
previous = self._meta.model.objects.get(pk=self.pk)
if previous.status != getattr(self, self.STATUS_FIELD):
self._transition = previous.status, getattr(self, self.STATUS_FIELD)
return self._transition
def validate_transition(self):
"""Validates the transition."""
transition = self.get_status_transition()
# Check the transition is not a allowed transitions
if (
transition
and self.ALLOWED_TRANSITIONS
and transition not in self.ALLOWED_TRANSITIONS
):
raise ValidationError(
_(
f"A {self._meta.model_name} can't change from "
f"{transition[0]} to {transition[1]}"
)
)
# Check the transition is not a forbidden transitions
if (
transition
and self.FORBIDDEN_TRANSITIONS
and transition in self.FORBIDDEN_TRANSITIONS
):
raise ValidationError(
_(
f"A {self._meta.model_name} can't change from "
f"{transition[0]} to {transition[1]}"
)
)
def pre_status_handler(self, transition):
"""Method used to execute code before the status handler is called."""
pass
def post_status_handler(self, transition):
"""Method used to execute code after the status handler is called."""
pass
def get_transition_handler(self):
"""Get the transition handler between status."""
transition = self.get_status_transition()
if transition:
handler_name = self.TRANSITION_HANDLERS.get(transition, "")
transition_handler = (
getattr(self, handler_name) if hasattr(self, handler_name) else None
)
return transition_handler_decorator(transition_handler)
def clean(self):
"""Validates transition in clean."""
self.validate_transition()
def save(self, *args, **kwargs):
# Checks if the is a transition during a handling
if self._handling_transition and self._original_status != getattr(
self, self.STATUS_FIELD
):
setattr(self, self.STATUS_FIELD, self._original_status)
warnings.warn(
Warning(
"Status changes during the execution of transitions handlers are not allowed"
)
)
# Gets the handler before the save
transition = self.get_status_transition()
transition_handler = self.get_transition_handler()
super().save(*args, **kwargs)
# Executes the handler after the save
if transition_handler and not self._handling_transition:
self.pre_status_handler(transition)
transition_handler(self)
self.post_status_handler(transition)
| StarcoderdataPython |
53541 | <reponame>remo5000/magma
#!/usr/bin/env python3
import os
SPACES = ' ' * 4
class CodeChunk:
class Block:
def __init__(self, codegen: 'CodeChunk'):
self.gen = codegen
def __enter__(self):
self.gen.indent()
return self.gen
def __exit__(self, exc_type, exc_val, exc_tb):
self.gen.unindent()
def __init__(self):
self.lines = []
self.level = 0
def indent(self):
self.level += 1
def unindent(self):
if self.level > 0:
self.level -= 1
@property
def indent_string(self):
return self.level * SPACES
def write(self, value: str, *args, **kwargs):
if value != '':
value = self.indent_string + value
if args or kwargs:
value = value.format(*args, **kwargs)
self.lines.append(value)
def write_lines(self, lines):
for line in lines:
self.lines.append(self.indent_string + line)
def block(self):
return self.Block(self)
def write_block(self, block_header: str, *args, **kwargs):
self.write(block_header, *args, **kwargs)
return self.block()
def __str__(self):
return os.linesep.join(self.lines)
| StarcoderdataPython |
3243224 | from worker import app
from worker.utils import Mailer
@app.task
def send_mail(recipient, subject, body):
mailer = Mailer(app.conf)
return mailer.send(recipient=recipient, subject=subject, body=body) | StarcoderdataPython |
3329378 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 <NAME> (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Experiment classes:
Experiment, Flow, Routine, Param, Loop*, *Handlers, and NameSpace
The code that writes out a *_lastrun.py experiment file is (in order):
experiment.Experiment.writeScript() - starts things off, calls other parts
settings.SettingsComponent.writeStartCode()
experiment.Flow.writeBody()
which will call the .writeBody() methods from each component
settings.SettingsComponent.writeEndCode()
"""
from __future__ import absolute_import, print_function
from .params import getCodeFromParamStr, Param
from .components import getInitVals, getComponents, getAllComponents
from .routines import getAllStandaloneRoutines
from ._experiment import Experiment
from .utils import unescapedDollarSign_re, valid_var_re, \
nonalphanumeric_re
from psychopy.experiment.utils import CodeGenerationException
| StarcoderdataPython |
3246581 | """
Record from the default microphone in an infinite loop and create
a Live Playlist (Sliding Window).
Reference: https://developer.apple.com/documentation/http_live_streaming/
"""
import hashlib
import os
import multiprocessing
import pyaudio
import pydub
from typing import List, Tuple
MEDIAS_DIR: str = os.environ.get("HLS_SERVER_MEDIAS_DIR", ".")
TARGET_SEGMENT_DURATION: int = int(
os.environ.get("HLS_SERVER_TARGET_SEGMENT_DURATION", 2)
)
def update_playlist(
sequence: List[Tuple[str, float]],
sequence_number: int,
target_segment_duration: int,
):
"""Update the master.m3u8 with the given sequence.
Args:
sequence: the sequence of segments described by tuples of
filename (.ts) and duration.
sequence_number: the position of the first segment with respect
to the beginning of the recording.
target_segment_duration: the expected duration of segments.
"""
with open(os.path.join(MEDIAS_DIR, "0", "master.m3u8"), mode="w") as f:
f.truncate()
f.write(
"#EXTM3U\n"
f"#EXT-X-TARGETDURATION:{target_segment_duration}\n"
"#EXT-X-VERSION:4\n"
f"#EXT-X-MEDIA-SEQUENCE:{sequence_number}\n"
)
for filename, duration in sequence:
f.write(f"#EXTINF:{duration},\n" f"{filename}\n")
def make_stream(chunk_size: int, rate: int, channels: int) -> pyaudio.Stream:
"""Make an audio stream from the default microphone
"""
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=channels,
rate=rate,
frames_per_buffer=chunk_size,
input=True,
)
return stream
def make_path_from_media_dir(filename: str) -> str:
"""Make a path to the given filename relative to the media dir.
"""
return os.path.join(MEDIAS_DIR, "0", filename)
def record(*, target_segment_duration: int = 5, output_queue: multiprocessing.Queue):
"""Record from the default microphone and write segments.
Write segments as .ts files (MPEG-TS) along with a master.m3u8 playlist.
"""
rate = 44100
chunk_size = rate // 10
stream = make_stream(chunk_size, rate, channels=1)
while True:
frames = []
n_frames = round(target_segment_duration / (chunk_size / rate))
for _ in range(n_frames):
data = stream.read(chunk_size, exception_on_overflow=True)
frames.append(data)
segment = pydub.AudioSegment(
data=b"".join(frames), sample_width=2, frame_rate=44100, channels=1
)
output_queue.put(segment)
def process_segments(input_queue: multiprocessing.Queue, target_segment_duration):
sequence_number = 1
rolling_sequence: List[Tuple[str, float]] = []
rolling_size = 3
while True:
segment = input_queue.get()
filename = "{}.ts".format(hashlib.sha256(segment.raw_data).hexdigest())
segment.export(
make_path_from_media_dir(filename),
format="mpegts",
codec="mp2",
bitrate="64k",
)
rolling_sequence.append((filename, len(segment) / 1000))
if len(rolling_sequence) > rolling_size:
sequence_number += 1
os.remove(make_path_from_media_dir(rolling_sequence[0][0]))
rolling_sequence = rolling_sequence[1:]
update_playlist(rolling_sequence, sequence_number, target_segment_duration)
q = multiprocessing.Queue()
multiprocessing.Process(
target=process_segments, args=(q, TARGET_SEGMENT_DURATION)
).start()
record(target_segment_duration=TARGET_SEGMENT_DURATION, output_queue=q)
| StarcoderdataPython |
1665137 | # added processing time seems not worth added 'accuracy'
def single2float(x):
"""Convert a single x to a rounded floating point number
that has the same number of decimals as the original"""
dPlaces = len(str(x).split('.')[1])
y = round(float(x),dPlaces+2)
# NOTE: Alternative method:
#y = round(float(x),6)
return y
| StarcoderdataPython |
27464 | <reponame>namrak/pyzkillredisq
from pymongo import MongoClient, errors
import tstp
from mdb import creds
def connect(logfile):
"""connect to mongodb"""
try:
client = MongoClient(creds['ip'], int(creds['port']))
db = client.fpLoss
db.authenticate(creds['un'], creds['pw'])
return db
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
log = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
log = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
def insert2mongo(mongohandle, logfile, killmail):
"""insert formatted killmail to mongodb"""
try:
allkills = mongohandle.allkills
allkills.insert_one(killmail)
return 0
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
log = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
log = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(log)
logfile.write(err)
logfile.close()
sys.exit()
def get_groupid_from_typeid(mongohandle, logfile, typeid):
"""get item name from typeid db"""
try:
typeids = mongohandle.typeIDs
cursor = typeids.find_one({"typeID": typeid}, {"groupID": 1})
if cursor is not None:
return cursor['groupID']
else:
print(tstp.now() + '!!ERROR!! Group ID not found for Type ID: ' + str(typeid) + '\n')
return 0
except errors.ServerSelectionTimeoutError as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Timeout Error - Aborting')
timeoutlog = (tstp.now() + ' Log - Server timeout - ' + '\n')
logfile.write(timeoutlog)
logfile.write(err)
logfile.close()
sys.exit()
except errors.ConnectionFailure as err:
print(time.strftime('%m/%d %H:%M:%S'), 'Connection Failure - Aborting')
failconnlog = (tstp.now() + ' Log - Failed connection - ' + '\n')
logfile.write(failconnlog)
logfile.write(err)
logfile.close()
sys.exit() | StarcoderdataPython |
3206404 | <filename>app/types_frontend.py
"""
Data types shared by the frontend and backend.
"""
from enum import Enum
class Aggregate(Enum):
day = 'day'
week = 'week'
month = 'month'
year = 'year'
class Ternary(Enum):
true = 'true'
false = 'false'
both = 'both'
class SearchKey:
video = 'video'
channel = 'channel'
show = 'show'
hour = 'hour'
day_of_week = 'dayofweek'
face_name = 'name'
face_tag = 'tag'
face_count = 'facecount'
text = 'text'
text_window = 'textwindow'
class SearchParam:
aggregate = 'aggregate'
start_date = 'start_date'
end_date = 'end_date'
detailed = 'detailed'
is_commercial = 'is_commercial'
query = 'query'
video_ids = 'ids'
class GlobalTags:
all = 'all'
male = 'male'
female = 'female'
host = 'presenter'
non_host = 'non_presenter'
GLOBAL_TAGS = {GlobalTags.all, GlobalTags.male, GlobalTags.female,
GlobalTags.host, GlobalTags.non_host}
| StarcoderdataPython |
4810653 | <gh_stars>10-100
#!/usr/bin/env python
__version__ = '$Revision: 8143 $'.split()[1]
__date__ = '$Date: 2008-01-07 19:19:20 -0500 (Mon, 07 Jan 2008) $'.split()[1]
__author__ = '<NAME>'
__doc__='''
Normalize AIS messages so that each message takes exactly one line. Works
like a Queue. Messages must use the uscg format.
Rewrite from the command line only ais_normalize
@requires: U{epydoc<http://epydoc.sourceforge.net/>} > 3.0alpha3
@requires: U{BitVector<http://cheeseshop.python.org/pypi/BitVector>}
@author: U{'''+__author__+'''<http://schwehr.org/>}
@version: ''' + __version__ +'''
@var __date__: Date of last svn commit
@undocumented: __version__ __author__ __doc__ parser
@status: under development
@license: Apache 2.0
@since: 2008-Jan-30
@see: U{Queue<http://www.python.org/doc/current/lib/QueueObjects.html>}
'''
import sys
import Queue
import uscg
import ais.nmea
#from decimal import Decimal
#from BitVector import BitVector
#import StringIO
#from ais.nmea import isChecksumValid,checksumStr # Needed for checksums
#import ais.nmea
class Normalize(Queue.Queue):
'''
Provide a channel that normalizes messages. Try to model it like a Queue.
'''
def __init__(self,maxsize=0,ttl=30,verbose=False):
'''
param ttl: number of seconds that a message fragment can live
'''
Queue.Queue.__init__(self,maxsize)
self.mostRecentTime=0 # Seconds from UTC epoch
self.ttl=ttl
self.stations={} # Buffer by station
self.v=verbose
def cull(self):
'''
Drop messages older than the ttl
'''
pass
def put(self,uscgNmeaStr,block=True,timeout=None):
cgMsg = uscg.UscgNmea(uscgNmeaStr)
if self.mostRecentTime<cgMsg.cg_sec:
self.mostRecentTime = cgMsg.cg_sec
# single line message needs no help
if 1 == cgMsg.totalSentences:
Queue.Queue.put(self,uscgNmeaStr,block,timeout)
return
if cgMsg.sentenceNum!=cgMsg.totalSentences:
station = cgMsg.station
if station not in self.stations:
self.stations[station] = [cgMsg,]
else:
#print 'self.stations[station]',station,type(self.stations[station])
self.stations[station].append(cgMsg)
self.cull() # Clean house so the buffers do not get too large
return
# We have a final sentence, so construct the whole deal
# Only can happen the first time we see a station and have not seen the first sentence
if cgMsg.station not in self.stations:
sys.stderr.write('dropping dangling fragment\n')
return
cgMsgFinal = cgMsg
stationList = self.stations[cgMsgFinal.station]
parts=[]
payloads=[]
del cgMsg
for msg in stationList:
if (msg.aisChannel == cgMsgFinal.aisChannel
and msg.sequentialMsgId == cgMsgFinal.sequentialMsgId
):
if msg.sentenceNum==1:
cgMsgFinal.cg_sec=msg.cg_sec # Save the first timestamp
parts.append(msg)
payloads.append(msg.contents)#.getBitVector)
assert(msg.fillbits==0)
#print 'num parts',len(parts)
#print 'before',len(stationList)
for cgMsg in parts:
stationList.remove(cgMsg)
#print 'after',len(stationList)
if len(parts)!=cgMsgFinal.totalSentences-1:
if self.v: sys.stderr.write('partial message. Discarding\n')
return
payloads.append(cgMsgFinal.contents)
#print 'payloads:', payloads
# The fill bits will not change
#bv = payloads[0]
payload = ''.join(payloads)
#print payload
#parts.append(cgMsgFinal.getBitVector)
cgMsgFinal.totalSentences=1
cgMsgFinal.sentenceNum=1
cgMsgFinal.contents = payload
cgMsgFinal.checksumStr = ais.nmea.checksumStr(payload)
newNmeaStr = cgMsgFinal.buildNmea()
#print 'queuing',newNmeaStr
Queue.Queue.put(self,newNmeaStr,block,timeout)
| StarcoderdataPython |
3269044 | import urllib.request
def getGovPdf(index):
baseurl="https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/"
filename = f"Actualizacion_{index}_COVID-19.pdf"
url = baseurl + filename
print (url)
urllib.request.urlretrieve(url, f"./data/{filename}")
return filename
#raise RuntimeError("Download failed!")
if __name__ == '__main__':
pdf=getGovPdf(100)
| StarcoderdataPython |
170150 | <gh_stars>1-10
from flask import Flask, render_template, request
from wtforms import Form, TextAreaField, validators
import pickle
import sqlite3
import re
import os
import numpy as np
app = Flask(__name__)
######## Preparing the Classifier
import re
from sklearn.feature_extraction.text import HashingVectorizer
cur_dir = os.path.dirname(__file__)
stop = pickle.load(open(os.path.join(cur_dir, 'pkl_objects/stopwords.pkl'), 'rb'))
porter = pickle.load(open(os.path.join(cur_dir, 'pkl_objects/porterstemmer.pkl'), 'rb'))
clf = pickle.load(open(os.path.join(cur_dir, 'pkl_objects/classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'reviews.sqlite')
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', '')
text = [w for w in text.split() if w not in stop]
tokenized = [porter.stem(w) for w in text]
return tokenized
vect = HashingVectorizer(decode_error='ignore',
n_features=2**21,
preprocessor=None,
tokenizer=tokenizer)
def classify(document):
label = {0: 'negative', 1: 'positive'}
X = vect.transform([document])
y = clf.predict(X)[0]
proba = np.max(clf.predict_proba(X))
return label[y], proba
def train(document, y):
X = vect.transform([document])
clf.partial_fit(X, [y])
pickle.dump(clf, open(os.path.join(cur_dir, 'pkl_objects/classifier_upd.pkl'), 'wb'))
def sqlite_entry(path, document, y):
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute("INSERT INTO review_db (review, sentiment, date) VALUES (?, ?, DATETIME('now'))", (document, y))
conn.commit()
conn.close()
######## Flask
class ReviewForm(Form):
moviereview = TextAreaField('',
[validators.DataRequired(),
validators.length(min=15)])
@app.route('/')
def index():
form = ReviewForm(request.form)
return render_template('reviewform.html', form=form)
@app.route('/results', methods=['POST'])
def results():
form = ReviewForm(request.form)
if request.method == 'POST' and form.validate():
review = request.form['moviereview']
y, proba = classify(review)
return render_template('results.html',
content=review,
prediction=y,
probability=round(proba*100, 2))
return render_template('reviewform.html', form=form)
@app.route('/thanks', methods=['POST'])
def feedback():
feedback = request.form['feedback_button']
review = request.form['review']
prediction = request.form['prediction']
inv_label = {'negative': 0, 'positive': 1}
y = inv_label[prediction]
if feedback == 'incorrect':
y = int(not(y))
train(review, y)
sqlite_entry(db, review, y)
return render_template('thanks.html')
if __name__ == '__main__':
app.run(debug=True) | StarcoderdataPython |
91642 | import argparse
import logging
from concurrent import futures
from importlib import import_module
from time import sleep
import grpc
from gate_grpc.api import service_pb2_grpc as api_grpc
from . import InstanceServicer, RootServicer
default_addr = "localhost:12345"
service_instance_types = {}
def main():
parser = argparse.ArgumentParser(__package__)
parser.add_argument("-l", metavar="ADDR", default=default_addr,
help="bind address (default: {})".format(default_addr))
parser.add_argument("module", nargs="+", help="service to import")
args = parser.parse_args()
logging.basicConfig(format="%(asctime)s %(name)s.%(funcName)s: %(message)s",
level=logging.DEBUG)
for name in args.module:
module = import_module(name, __package__)
service_instance_types.update(module.service_instance_types)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
api_grpc.add_RootServicer_to_server(RootServicer(), server)
api_grpc.add_InstanceServicer_to_server(InstanceServicer(), server)
server.add_insecure_port(args.l)
server.start()
try:
while True:
sleep(1000)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3392140 | import tensorflow as tf
from tensorflow import keras
from keras.layers import (
Conv2D, Conv2DTranspose,
MaxPooling2D, Dropout,
concatenate, Reshape)
from keras import Model
from keras_unet_collection.losses import dice
def conv_block(input, filt):
C_1 = Conv2D(filt, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(input)
C_1 = Dropout(0.1)(C_1)
C_1 = Conv2D(filt, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(C_1)
P_1 = MaxPooling2D((2, 2))(C_1)
return C_1, P_1
def define_model(outchannels, img_height, img_width, img_channels):
inputs = tf.keras.layers.Input((img_height, img_width, img_channels))
filters = 64
c1, p1 = conv_block(inputs, filters)
c2, p2 = conv_block(p1, filters*2)
c3, p3 = conv_block(p2, filters*4)
c4, p4 = conv_block(p3, filters*8)
## BRIDGE/BOTTLENECK
c5 = Conv2D(
filters*16, (3, 3),
activation='relu',
kernel_initializer='he_normal',
padding='same')(p4)
c5 = Dropout(0.2)(c5)
c5 = Conv2D(
filters*16, (3, 3),
activation='relu',
kernel_initializer='he_normal',
padding='same')(c5)
## Decoder
u6 = Conv2DTranspose(
filters=filters*8,
kernel_size=(2,2),
strides=(2,2),
padding=('same'))(c5)
# c4 = Reshape(
# target_shape=u6.shape[1:]
# )(c4)
u6 = concatenate([u6, c4])
c6 = Conv2D(filters*8, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
c6 = Dropout(0.2)(c6)
c6 = Conv2D(filters*8, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
u7 = Conv2DTranspose(
filters=filters*4,
kernel_size=(2,2),
strides=(2,2),
padding='same')(c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(filters*4, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
c7 = Dropout(0.2)(c7)
c7 = Conv2D(filters*4, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
u8 = Conv2DTranspose(
filters=filters*2,
kernel_size=(2,2),
strides=(2,2),
padding='same')(c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(filters*4, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
c8 = Dropout(0.1)(c8)
c8 = Conv2D(filters*4, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
u9 = Conv2DTranspose(
filters=filters,
kernel_size=(2,2),
strides=(2,2),
padding='same')(c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(filters, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
c9 = Dropout(0.1)(c9)
c9 = Conv2D(filters, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
outputs = Conv2D(outchannels, (1, 1), activation='sigmoid')(c9)
model = Model(inputs=[inputs], outputs=[outputs])
return model
# if __name__ == "__main__":
# model = define_model(1, int(320*0.8), 320, 1)
# model.compile(
# optimizer='adam',
# loss='binary_crossentropy',
# metrics=[dice]
# )
# print(model.summary())
| StarcoderdataPython |
1663508 | <filename>tools/optimized_workflows/encode-bag-client.py<gh_stars>1-10
import requests
import sys
from bdbag import bdbag_api
import urllib
BAG_SERVICE = "http://encode.bdbag.org/encode"
#QUERY_BASE = "https://www.encodeproject.org/search/?type=Experiment&assay_slims=DNA+accessibility&assay_title=DNase-seq&"
QUERY_BASE = "https://www.encodeproject.org/search/?type=Experiment&assay_term_name=DNase-seq&replicates.library.biosample.donor.organism.scientific_name=Homo+sapiens&biosample_term_name="
BASE_DOWNLOAD_PATH = sys.argv[2]
query = "%s%s" % (QUERY_BASE, sys.argv[1])
print "Executing query: %s" % query
r = requests.post(BAG_SERVICE, json = {'q': query}, headers = {"Content-Type" : "application/json", "Accepts" : "application/json"})
print r
url = r.json()["uri"]
filename = url.split("/")[-1]
path = "%s%s" % (BASE_DOWNLOAD_PATH, filename)
print "Downloading result: %s" % url
testfile = urllib.URLopener()
testfile.retrieve(url, path)
# BDBag tooling doesnt let us unzip into a single dir.. so
# we use a /base/uuid/uuid
extract_path = path.split(".")[0]
output_path = "%s/%s" %(extract_path, filename.split(".")[0])
print "Extracting bag and resolving fetch: %s" % output_path
bdbag_api.extract_bag(path, extract_path)
bdbag_api.resolve_fetch(output_path, True)
print output_path
| StarcoderdataPython |
1782978 | <gh_stars>0
# -*- coding: utf-8 -*-
"""A major refactoring of ``edge2vec``.
A high level overview:
.. code-block:: python
from edge2vec import calculate_edge_transition_matrix, train, read_graph
graph = read_graph(...)
transition_matrix = calculate_edge_transition_matrix(graph=graph, ...)
word2vec = train(graph=graph, transition_matrix=transition_matrix, ...)
"""
from .edge2vec import train
from .transition import calculate_edge_transition_matrix
from .utils import read_graph
| StarcoderdataPython |
3297054 | <filename>tf_agents/system/default/multiprocessing_core.py
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Multiprocessing hooks for TF-Agents."""
import abc
import multiprocessing as _multiprocessing
from typing import Any, Text
__all__ = [
'StateSaver',
'handle_main',
'handle_test_main',
'enable_interactive_mode',
]
_INITIALIZED = [False]
_INTERACTIVE = [False]
_STATE_SAVERS = []
def initialized():
return _INITIALIZED[0]
class StateSaver(object):
"""Class for getting and setting global state."""
@abc.abstractmethod
def collect_state(self) -> Any:
pass
@abc.abstractmethod
def restore_state(self, state: Any) -> None:
pass
def get_context(method: Text = None) -> _multiprocessing.context.BaseContext:
return _multiprocessing.get_context(method)
def handle_main(parent_main_fn, *args, **kwargs):
"""Function that wraps the main function in a multiprocessing-friendly way.
This function additionally accepts an `extra_state_savers` kwarg;
users can provide a list of `tf_agents.multiprocessing.StateSaver` instances,
where a `StateSaver` tells multiprocessing how to store some global state
and how to restore it in the subprocess.
Args:
parent_main_fn: A callable.
*args: rgs for `parent_main_fn`.
**kwargs: kwargs for `parent_main_fn`.
This may also include `extra_state_savers` kwarg.
Returns:
Output of `parent_main_fn`.
"""
extra_state_savers = kwargs.pop('extra_state_savers', [])
_STATE_SAVERS.extend(extra_state_savers)
_INITIALIZED[0] = True
return parent_main_fn(*args, **kwargs)
def handle_test_main(parent_main_fn, *args, **kwargs):
"""Function that wraps the test main in a multiprocessing-friendly way.
This function additionally accepts an `extra_state_savers` kwarg;
users can provide a list of `tf_agents.multiprocessing.StateSaver` instances,
where a `StateSaver` tells multiprocessing how to store some global state
and how to restore it in the subprocess.
Args:
parent_main_fn: A callable.
*args: rgs for `parent_main_fn`.
**kwargs: kwargs for `parent_main_fn`.
This may also include `extra_state_savers` kwarg.
Returns:
Output of `parent_main_fn`.
"""
extra_state_savers = kwargs.pop('extra_state_savers', [])
_STATE_SAVERS.extend(extra_state_savers)
_INITIALIZED[0] = True
return parent_main_fn(*args, **kwargs)
def enable_interactive_mode(extra_state_savers=None):
"""Function that enables multiprocessing in interactive mode.
This function accepts an `extra_state_savers` argument;
users can provide a list of `tf_agents.multiprocessing.StateSaver` instances,
where a `StateSaver` tells multiprocessing how to store some global state
and how to restore it in the subprocess.
Args:
extra_state_savers: A list of `StateSaver` instances.
"""
if _INITIALIZED[0]:
raise ValueError('Multiprocessing already initialized')
extra_state_savers = extra_state_savers or []
_STATE_SAVERS.extend(extra_state_savers)
_INITIALIZED[0] = True
| StarcoderdataPython |
81551 | from . import halconfig_types as types
from . import halconfig_dependency as dep
name = "CSEN"
compatibility = dep.Dependency(dep.Platform.SERIES1) # all
peripheral = 'CSEN'
modes = {
'define': 'hal_csen_mode',
'description': 'Mode',
'hide_properties': False,
'values': [
types.EnumValue('single', 'Single Channel Mode'),
types.EnumValue('scan', 'Scan Mode'),
types.EnumValue('bonded', 'Bonded Mode'),
]
}
options = {
"BSP_CSEN_SINGLE_INPUT": {
"type": types.AportSingleChannel(
"BSP_CSEN_SINGLE_INPUT",
signal='CEXT',
define_name_prefix='BSP_CSEN_SINGLE',
define_value_prefix='_CSEN_SINGLECTRL_SINGLESEL_',
),
"description": "Single input selection",
"subcategory":"Single Channel Input",
"mode": "single",
},
"BSP_CSEN_SCAN_MASK0": {
"type": types.AportScanMode(
define_value_prefix="_CSEN_SCANINPUTSEL0_INPUT%nSEL_",
),
"description": "Scan input mask (0-31)",
"category": "Scan Mode",
"allowedconflicts": ["BSP_CSEN_BONDED_INPUT", "BSP_CSEN_SCAN_INPUT"],
"mode": "scan",
},
"BSP_CSEN_SCAN_MASK1": {
"type": types.AportScanMode(
define_value_prefix="_CSEN_SCANINPUTSEL1_INPUT%nSEL_",
channel_start=32
),
"description": "Scan input mask (32-63)",
"category": "Scan Mode",
"allowedconflicts": ["BSP_CSEN_BONDED_INPUT", "BSP_CSEN_SCAN_INPUT"],
"mode": "scan",
},
"BSP_CSEN_BONDED_MASK0": {
"type": types.AportBondedMode(
channel_start=0,
aport="1"
),
"description": "Bonded input mask (0-31)",
"category": "Bonded Mode",
"subcategory": "Input 0-31 (APORT1)",
"allowedconflicts": ["BSP_CSEN_BONDED_INPUT", "BSP_CSEN_SCAN_INPUT"],
"mode": "bonded",
},
"BSP_CSEN_BONDED_MASK1": {
"type": types.AportBondedMode(
channel_start=32,
aport="3"
),
"description": "Bonded input mask (32-63)",
"category": "Bonded Mode",
"subcategory": "Input 32-63 (APORT3)",
"allowedconflicts": ["BSP_CSEN_BONDED_INPUT", "BSP_CSEN_SCAN_INPUT"],
"mode": "bonded",
}
} | StarcoderdataPython |
3267055 | <reponame>tody411/ImageViewerFramework
# -*- coding: utf-8 -*-
## @package ivf.scene.scene
#
# ivf.scene.scene utility package.
# @author tody
# @date 2016/01/25
import numpy as np
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from ivf.io_util.image import loadRGBA
from ivf.scene.data import Data
from ivf.scene.stroke import StrokeSets
from ivf.scene.layer import LayerSet
from ivf.cv.normal import normalToColor
from ivf.cv.image import alpha, gray2rgb, setAlpha, to8U
## Scene
class Scene(QObject, Data):
updatedImage = pyqtSignal(object)
updatedDepth = pyqtSignal(object, object)
updatedMessage = pyqtSignal(str)
DisplayImage = 0
DisplayNormal = 1
DisplayDepth = 2
## Constructor
def __init__(self):
super(Scene, self).__init__()
self._image = None
self._image_file = ""
self._normal = None
self._depth = None
self._layer_set = LayerSet()
self._stroke_sets = StrokeSets()
self._selection = None
self._display_mode = self.DisplayImage
def setDisplayMode(self, mode):
if self._image is None:
return
self._display_mode = mode
if self._display_mode == self.DisplayImage:
self.updatedImage.emit(self._image)
if self._display_mode == self.DisplayNormal:
if self._normal is None:
return
normal_image = self.normalColor()
self.updatedImage.emit(normal_image)
if self._display_mode == self.DisplayDepth:
if self._depth is None:
return
depth_image = self.depthImage()
self.updatedImage.emit(depth_image)
def displayMode(self):
return self._display_mode
def setImageFile(self, image_file):
self._image_file = image_file
image = loadRGBA(self._image_file)
self.setImage(image)
self._normal = None
self._depth = None
self.updatedDepth.emit(self._image, self._depth)
def setImage(self, image):
self._image = image
self.updatedImage.emit(image)
def image(self):
return self._image
def setNormal(self, normal):
self._normal = normal
def normal(self):
return self._normal
def normalColor(self):
A_8U = alpha(self._image)
return normalToColor(self._normal, A_8U)
def setDepth(self, depth):
self._depth = depth
self.updatedDepth.emit(self._image, self._depth)
def depth(self):
return self._depth
def depthImage(self):
D_min = np.min(self._depth)
D_max = np.max(self._depth)
D_32F = (self._depth - D_min) / (D_max - D_min)
D_8U = to8U(D_32F)
D_8U = gray2rgb(D_8U)
A_8U = alpha(self._image)
D_8U = setAlpha(D_8U, A_8U)
return D_8U
def strokeSets(self):
return self._stroke_sets
def layerSet(self):
return self._layer_set
def setSelection(self, selection):
self._selection = selection
def Selection(self):
return self._selection
def setMessage(self, message):
self.updatedMessage.emit(message)
## dictionary data for writeJson method.
def _dataDict(self):
data = {"image_file": self._image_file}
data["stroke"] = self._stroke_sets._dataDict()
return data
## set dictionary data for loadJson method.
def _setDataDict(self, data):
self.setImageFile(data["image_file"])
self._stroke_sets._setDataDict(data["stroke"])
| StarcoderdataPython |
3282322 | #
# Copyright 2017 Import.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import logging
import sys
import os
from importio2 import ExtractorAPI
from importio2 import CrawlRunAPI
from jinja2 import Template, FileSystemLoader, Environment
logger = logging.getLogger(__name__)
class CrawlRunMetadata(object):
def __init__(self):
pass
class ExtractorMetadata(object):
def __init__(self,
guid, parent_guid, chained, name, fields, create_timestamp, modified_timestamp):
self.create_timestamp = create_timestamp
self.modified_timestamp = modified_timestamp
self.crawl_runs = []
self.fields = fields
self.name = name
self.guid = guid
self.parent_guid = parent_guid
self.chained = chained
class ExtractorDocumentGenerator(object):
def __init__(self):
self._filter = None
self._template = None
def handle_arguments(self):
parser = argparse.ArgumentParser(description="Generates Extractor Documentation")
parser.add_argument('-c', '--configuration', action='store', dest='configuration', metavar='path',
required=True, help="Configuration data for documentation generation")
parser.add_argument('-f', '--filter', action='store', dest='filter', metavar='regexp',
help="Filter Extractors based on Regular Expression")
parser.add_argument('-t', '--template', action='store', dest='template', metavar='path',
required=True, help="Path to jina2 template for generating output")
args = parser.parse_args()
if args.filter is not None:
self._filter = args.filter
if args.template is not None:
self._template = args.template
def get_extractor_ids(self):
"""
Extractors the required metadata from
:return:
"""
api = ExtractorAPI()
extractors = api.list()
extractor_list = []
for extractor in extractors:
f = extractor['fields']
chained = False
parent_guid = None
if 'isChained' in f:
chained = bool(f['isChained'])
if 'parentExtractorGuid' in f:
parent_guid = f['parentExtractorGuid']
e = ExtractorMetadata(guid=f['guid'],
parent_guid=parent_guid,
name=f['name'],
fields=f['fields'],
create_timestamp=f['_meta']['creationTimestamp'],
modified_timestamp=f['_meta']['timestamp'],
chained=chained
)
# print(extractor)
extractor_list.append(e)
return extractor_list
def generate(self, extractors):
template_loader = FileSystemLoader(searchpath='./')
template_env = Environment(loader=template_loader)
t = template_env.get_template(self._template)
template_vars = {
"title": "Extractor Documentation",
"extractors": extractors,
}
print(t.render(template_vars))
def generate_documentation(self):
extractors = self.get_extractor_ids()
leaf_extractors = []
for e in extractors:
if e.parent_guid is not None:
leaf_extractors.append(e)
self.generate(leaf_extractors)
def execute(self):
self.handle_arguments()
self.generate_documentation()
def main():
cli = ExtractorDocumentGenerator()
cli.execute()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5952 | <filename>src/freemovr_engine/calib/acquire.py
import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
class CameraHandler(object):
def __init__(self,topic_prefix='',debug=False,enable_dynamic_reconfigure=False):
self.topic_prefix=topic_prefix
self.debug = debug
rospy.Subscriber( '%s/image_raw'%self.topic_prefix, sensor_msgs.msg.Image,
self.get_image_callback)
self.pipeline_max_latency = 0.2
self.last_image = None
self.im_queue = None
self.recon = None
if enable_dynamic_reconfigure:
self.recon = rospy.ServiceProxy('%s/set_parameters'%self.topic_prefix, dynamic_reconfigure.srv.Reconfigure)
self.recon_cache = {}
def reconfigure(self, **params):
if self.recon is not None:
changed = {}
for k,v in list(params.items()):
if k in self.recon_cache:
if self.recon_cache[k] != v:
changed[k] = v
else:
changed[k] = v
if changed:
msg = dynamic_reconfigure.encoding.encode_config(params)
self.recon_cache.update(changed)
self.recon(msg)
if self.im_queue is not None:
#clear the queue so we get a new image with the new settings
while True:
try:
self.im_queue.get_nowait()
except queue.Empty:
break
def set_im_queue(self,q):
self.im_queue = q
def get_image_callback(self,msg):
if self.im_queue is None:
return
try:
if self.debug:
print("%s got image: %f" % (self.topic_prefix, msg.header.stamp.to_sec()))
self.im_queue.put_nowait((self.topic_prefix,msg))
except queue.Full:
if self.debug:
print(self.topic_prefix,"full")
class _Runner(object):
def __init__(self,cam_handlers,ros_latency=0.2,queue_depth=20):
self.cam_handlers = cam_handlers
self.im_queue = queue.Queue(len(cam_handlers)*queue_depth)
for ch in self.cam_handlers:
ch.set_im_queue(self.im_queue)
self.ros_latency = ros_latency
self.max_cam_latency = max( [ch.pipeline_max_latency for ch in self.cam_handlers ])
self._result = {}
@property
def result(self):
return self._result
@property
def result_as_nparray(self):
res = {}
for cam in self._result:
nimgs = len(self._result[cam])
tmpres = [0]*nimgs
for i in range(nimgs):
msg = self._result[cam][i]
shape = (msg.height, msg.width)
imarr = np.fromstring(msg.data,dtype=np.uint8)
imarr.shape = (msg.height, msg.width)
tmpres[i] = imarr
#sad to use dstack here, IMO res[cam][:,:,i] = imarr
#should have worked.
res[cam] = np.dstack(tmpres)
return res
def cycle_duration( self, dur ):
tstart = time.time()
while (time.time() - tstart) < dur:
time.sleep(0.05) # wait 50 msec
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
def _is_done(self,rdict,n_per_camera,verbose=False):
done=True
for topic_prefix in list(rdict.keys()):
if verbose:
rospy.loginfo(' _is_done() has %d frames for %r'%(len(rdict[topic_prefix]), topic_prefix))
if len(rdict[topic_prefix]) < n_per_camera:
done=False
return done
class SimultaneousCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
def get_images(self,n_per_camera, pre_func=None, pre_func_args=[], post_func=None, post_func_args=[], verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
#clear the queue
self.clear_queue()
if pre_func: pre_func(*pre_func_args)
t_latest = time.time() + (self.ros_latency + self.max_cam_latency)*n_per_camera
#wait for the images to arrive
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
self._result[topic_prefix].append( msg )
if post_func: post_func(*post_func_args)
class SequentialCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
self.wait_duration = kwargs.get("wait_duration", 0.1)
self.check_earliest = False
self.check_latest = False
def get_images(self,n_per_camera,verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
t_earliest = time.time()
self.clear_queue()
t_latest = t_earliest + (self.ros_latency + self.max_cam_latency)
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if self.check_latest and t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
if self.check_earliest and t_image < t_earliest:
rospy.logwarn("image from %s at t=%f was too early (by %f)" % (topic_prefix, t_image, t_earliest - t_image))
continue
self._result[topic_prefix].append( msg )
| StarcoderdataPython |
4831957 | import os
import time
import base64
import redis
import config
import plivo
def baseN(num,b,numerals="0123456789abcdefghijklmnopqrstuvwxyz"):
return ((num == 0) and "0" ) or (baseN(num // b, b).lstrip("0") + numerals[num % b])
def tinyid(size=6):
id = '%s%s' % (
baseN(abs(hash(time.time())), 36),
baseN(abs(hash(time.time())), 36))
return id[0:size]
def get_redis_connection():
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
rd = redis.from_url(redis_url)
return rd
def get_plivo_connection():
pl = plivo.RestAPI(config.PLIVO_AUTH_ID, config.PLIVO_AUTH_TOKEN)
return pl
| StarcoderdataPython |
1722393 | <reponame>zibuyu1995/Hardware
from RPi import GPIO
def led_on():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(4, GPIO.OUT)
GPIO.output(4, GPIO.HIGH)
print('LED on')
if __name__ == '__main__':
led_on()
| StarcoderdataPython |
3316564 | # coding: utf-8
try:
import cPickle as pickle
except:
import pickle
from django.test import TestCase
from value.application_settings.models import ApplicationSetting
class ApplicationSettingTests(TestCase):
def setUp(self):
ApplicationSetting.objects.create(
name=ApplicationSetting.EXCEL_SHEET_INDEX,
value='0'
)
ApplicationSetting.objects.create(
name=ApplicationSetting.EXCEL_ENTRY_ORIENTATION,
value='row'
)
ApplicationSetting.objects.create(
name=ApplicationSetting.EXCEL_STARTING_ROW_COLUMN,
value='0'
)
ApplicationSetting.objects.create(
name=ApplicationSetting.EXCEL_IMPORT_TEMPLATE,
value=pickle.dumps({'name': 'A', 'description': 'B'})
)
ApplicationSetting.objects.create(
name=ApplicationSetting.PLAIN_TEXT_SEPARATOR,
value=''
)
ApplicationSetting.objects.create(
name=ApplicationSetting.PLAIN_TEXT_STARTING_LINE,
value=''
)
ApplicationSetting.objects.create(
name=ApplicationSetting.DECISION_ITEMS_DEFAULT_ORDERING,
value=''
)
ApplicationSetting.objects.create(
name=ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY,
value='name,description'
)
def test_application_setting_unicode(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.EXCEL_SHEET_INDEX)
self.assertTrue(isinstance(setting, ApplicationSetting))
self.assertEqual(setting.__unicode__(), 'EXCEL_SHEET_INDEX:0')
def test_application_setting_get(self):
s = ApplicationSetting.get()
self.assertTrue(isinstance(s[ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY], list))
self.assertTrue(isinstance(s[ApplicationSetting.EXCEL_IMPORT_TEMPLATE], dict))
self.assertTrue(isinstance(s[ApplicationSetting.EXCEL_STARTING_ROW_COLUMN], int))
self.assertTrue(isinstance(s[ApplicationSetting.EXCEL_SHEET_INDEX], int))
def test_application_setting_get_invalid_import_template(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.EXCEL_IMPORT_TEMPLATE)
setting.value = 'invalid_input'
setting.save()
s = ApplicationSetting.get()
self.assertEqual(s[ApplicationSetting.EXCEL_IMPORT_TEMPLATE], dict())
def test_application_setting_get_invalid_excel_starting_row_column(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.EXCEL_STARTING_ROW_COLUMN)
setting.value = 'invalid_input'
setting.save()
s = ApplicationSetting.get()
self.assertEqual(s[ApplicationSetting.EXCEL_STARTING_ROW_COLUMN], 0)
def test_application_setting_get_invalid_excel_sheet_index(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.EXCEL_SHEET_INDEX)
setting.value = 'invalid_input'
setting.save()
s = ApplicationSetting.get()
self.assertEqual(s[ApplicationSetting.EXCEL_SHEET_INDEX], 0)
def test_application_setting_get_empty_decision_items_column_display(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY)
setting.value = 'name,description,'
setting.save()
s = ApplicationSetting.get()
self.assertEqual(s[ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY], ['name', 'description'])
def test_application_setting_get_invalid_decision_items_column_display(self):
setting = ApplicationSetting.objects.get(pk=ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY)
setting.value = None
setting.save()
s = ApplicationSetting.get()
self.assertEqual(s[ApplicationSetting.DECISION_ITEMS_COLUMNS_DISPLAY], ['name', 'description'])
| StarcoderdataPython |
1736028 | <filename>src/xscontainer/remote_helper/ssh.py<gh_stars>1-10
from xscontainer import api_helper
from xscontainer import util
from xscontainer.util import log
import constants
import fcntl
import errno
import os
import paramiko
import paramiko.rsakey
import select
import socket
import StringIO
import sys
DOCKER_SOCKET_PATH = '/var/run/docker.sock'
SSH_PORT = 22
ERROR_CAUSE_NETWORK = (
"Error: Cannot find a valid IP that allows SSH connections to "
"the VM. Please make sure that Tools are installed, a "
"network route is set up, there is a SSH server running inside "
"the VM that is reachable from Dom0.")
class SshException(util.XSContainerException):
pass
class VmHostKeyException(SshException):
pass
class AuthenticationException(SshException):
pass
def prepare_request_cmd():
return ("ncat -U %s" % (DOCKER_SOCKET_PATH))
class MyHostKeyPolicy(paramiko.MissingHostKeyPolicy):
_session = None
_vm_uuid = None
def __init__(self, session, vm_uuid):
self._session = session
self._vm_uuid = vm_uuid
def missing_host_key(self, client, hostname, key):
hostkey = key.get_base64()
remembered_hostkey = api_helper.get_ssh_hostkey(self._session,
self._vm_uuid)
if remembered_hostkey:
# We have a key on record
if hostkey == remembered_hostkey:
# all good - continue
return
else:
# bad - throw error because of mismatch
message = ("Key for VM %s does not match the known public key."
% (self._vm_uuid))
log.error(message)
raise VmHostKeyException(message)
else:
# we don't have key on record. Let's remember this one for next
# time
log.debug("No public key on record found for %s. Will remember."
% hostkey)
api_helper.set_ssh_hostkey(self._session, self._vm_uuid, hostkey)
# all good - continue
return
def prepare_ssh_client(session, vmuuid):
username = api_helper.get_vm_xscontainer_username(session, vmuuid)
host = api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT)
log.info("prepare_ssh_client for vm %s, via %s@%s"
% (vmuuid, username, host))
client = paramiko.SSHClient()
pkey = paramiko.rsakey.RSAKey.from_private_key(
StringIO.StringIO(api_helper.get_idrsa_secret_private(session)))
client.get_host_keys().clear()
client.set_missing_host_key_policy(MyHostKeyPolicy(session, vmuuid))
try:
client.connect(host, port=SSH_PORT, username=username,
pkey=pkey, look_for_keys=False)
except SshException:
# This exception is already improved - leave it as it is
raise
except paramiko.AuthenticationException as exception:
message = ("prepare_ssh_client failed to authenticate with private key"
" on VM %s" % (vmuuid))
log.info(message)
raise AuthenticationException(message)
except (paramiko.SSHException, socket.error) as exception:
# reraise as SshException
raise SshException("prepare_ssh_client: %s" % exception,
(sys.exc_info()[2]))
return client
def execute_docker(session, vmuuid, request):
return execute_ssh(session, vmuuid, prepare_request_cmd(), request)
def execute_ssh(session, vmuuid, cmd, stdin_input=None):
client = None
try:
try:
client = prepare_ssh_client(session, vmuuid)
if isinstance(cmd, list):
cmd = ' '.join(cmd)
stripped_stdin_input = stdin_input
if stripped_stdin_input:
stripped_stdin_input = stripped_stdin_input.strip()
log.info("execute_ssh will run '%s' with stdin '%s' on vm %s"
% (cmd, stripped_stdin_input, vmuuid))
stdin, stdout, _ = client.exec_command(cmd)
if stdin_input:
stdin.write(stdin_input)
stdin.channel.shutdown_write()
output = stdout.read(constants.MAX_BUFFER_SIZE)
if stdout.read(1) != "":
raise SshException("too much data was returned when executing"
"'%s'" % (cmd))
returncode = stdout.channel.recv_exit_status()
if returncode != 0:
log.info("execute_ssh '%s' on vm %s exited with rc %d: Stdout:"
" %s" % (cmd, vmuuid, returncode, stdout))
raise SshException("Returncode for '%s' is not 0" % cmd)
return output
except SshException:
# This exception is already improved - leave it as it is
raise
except Exception as exception:
# reraise as SshException
raise SshException("execute_ssh: %s" % exception,
(sys.exc_info()[2]))
finally:
if client:
client.close()
def execute_docker_data_listen(session, vmuuid, request,
stop_monitoring_request):
ssh_client = prepare_ssh_client(session, vmuuid)
try:
cmd = prepare_request_cmd()
log.info("execute_docker_listen_charbychar is running '%s' on VM '%s'"
% (cmd, vmuuid))
stdin, stdout, _ = ssh_client.exec_command(cmd)
stdin.write(request)
# set unblocking io for select.select
stdout_fd = stdout.channel.fileno()
fcntl.fcntl(stdout_fd,
fcntl.F_SETFL,
os.O_NONBLOCK | fcntl.fcntl(stdout_fd, fcntl.F_GETFL))
while not stop_monitoring_request:
rlist, _, _ = select.select([stdout_fd], [], [],
constants.MONITOR_EVENTS_POLL_INTERVAL)
if not rlist:
continue
try:
read_data = stdout.read(1)
if read_data == "":
break
yield read_data
except IOError as exception:
log.info("IOError")
if exception[0] not in (errno.EAGAIN, errno.EINTR):
log.info("Cleared")
raise
sys.exc_clear()
finally:
try:
ssh_client.close()
except Exception:
util.log.exception("Error when closing ssh_client for %r"
% ssh_client)
log.info('execute_docker_listen_charbychar (%s) exited' % cmd)
def determine_error_cause(session, vmuuid):
cause = ""
try:
api_helper.get_suitable_vm_ip(session, vmuuid, SSH_PORT)
except util.XSContainerException:
cause = ERROR_CAUSE_NETWORK
# No reason to continue, if there is no network connection
return cause
try:
execute_ssh(session, vmuuid, ['echo', 'hello world'])
except AuthenticationException:
cause = (cause + "Unable to verify key-based authentication. "
"Please prepare the VM to install a key.")
# No reason to continue, if there is no SSH connection
return cause
except VmHostKeyException:
cause = (cause + "The SSH host key of the VM has unexpectedly"
" changed, which could potentially be a security breach."
" If you think this is safe and expected, you"
" can reset the record stored in XS using xe"
" vm-param-remove uuid=<vm-uuid> param-name=other-config"
" param-key=xscontainer-sshhostkey")
# No reason to continue, if there is no SSH connection
return cause
except SshException:
cause = (cause + "Unable to connect to the VM using SSH. Please "
"check the logs inside the VM and also try manually.")
# No reason to continue, if there is no SSH connection
return cause
# @todo: we could alternatively support socat
# @todo: we could probably prepare this as part of xscontainer-prepare-vm
try:
execute_ssh(session, vmuuid, ['command -v ncat'])
except util.XSContainerException:
cause = (cause + "Unable to find ncat inside the VM. Please install "
"ncat. ")
try:
execute_ssh(session, vmuuid, ['test', '-S', DOCKER_SOCKET_PATH])
except util.XSContainerException:
cause = (cause + "Unable to find the Docker unix socket at %s."
% (DOCKER_SOCKET_PATH) +
" Please install and run Docker.")
# No reason to continue, if there is no docker socket
return cause
try:
execute_ssh(session, vmuuid, ['test -r "%s" && test -w "%s" '
% (DOCKER_SOCKET_PATH,
DOCKER_SOCKET_PATH)])
except util.XSContainerException:
cause = (cause + "Unable to access the Docker unix socket. "
"Please make sure the specified user account "
"belongs to the docker account group.")
if cause == "":
cause = "Unable to determine cause of failure."
return cause
| StarcoderdataPython |
3253000 | <reponame>shantanu561993/PyCssMinify
# -*- coding: utf-8 -*-
import os
import concurrent.futures
import requests
def minify_css(file_path):
if type(file_path)!=str :
raise Exception("Image file path must be string")
if not os.path.isfile(os.path.abspath(file_path)) and not os.path.isdir(os.path.abspath(file_path)):
raise Exception("file path must be a valid directory or a file path "+os.path.abspath(file_path) )
if os.path.isfile(file_path) and os.path.getsize(file_path)>>20 < 5:
data={'input':open(file_path,'r').read()}
r=requests.post("http://cssminifier.com/raw",data=data)
if r.status_code == 200:
with open(file_path.rstrip(".css")+".min.css", 'w') as f:
f.write(r.text)
elif os.path.isdir(file_path):
for root,dirs,files in os.walk("."):
for file in files:
if file.endswith(".css") and not file.endswith(".min.css"):
minify_css(os.path.abspath(os.path.join(root,file)))
def minify(file_path):
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
if os.path.isdir(file_path):
for root,dirs,files in os.walk(file_path):
for file_name in files:
if file_name.endswith(".css") and not file_name.endswith(".min.css"):
executor.submit(minify_css,os.path.abspath(os.path.join(root,file_name)))
elif os.path.isfile(file_path):
if file_path.endswith(".css") and not file_path.endswith(".min.css"):
executor.submit(minify_css,file_path)
| StarcoderdataPython |
3361740 | #!/usr/bin/env python3
import os
import sys
path = "/media/Colossus/Series/"
if len(sys.argv) > 1:
path = sys.argv[1]
isFile = os.path.exists(path)
#print(path)
#print(isFile)
#print(len(sys.argv))
if isFile:
sys.exit(0)
else:
sys.exit(1)
| StarcoderdataPython |
125386 | <reponame>dariusgrassi/trex-core<filename>scripts/external_libs/scapy-2.4.5/scapy/contrib/automotive/gm/gmlanutils.py<gh_stars>100-1000
#! /usr/bin/env python
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
# scapy.contrib.description = GMLAN Utilities
# scapy.contrib.status = loads
import time
from scapy.compat import Optional, cast, Callable
from scapy.contrib.automotive.gm.gmlan import GMLAN, GMLAN_SA, GMLAN_RD, \
GMLAN_TD, GMLAN_PM, GMLAN_RMBA
from scapy.config import conf
from scapy.packet import Packet
from scapy.contrib.isotp import ISOTPSocket
from scapy.error import warning, log_loading
from scapy.utils import PeriodicSenderThread
__all__ = ["GMLAN_TesterPresentSender", "GMLAN_InitDiagnostics",
"GMLAN_GetSecurityAccess", "GMLAN_RequestDownload",
"GMLAN_TransferData", "GMLAN_TransferPayload",
"GMLAN_ReadMemoryByAddress", "GMLAN_BroadcastSocket"]
log_loading.info("\"conf.contribs['GMLAN']"
"['treat-response-pending-as-answer']\" set to True). This "
"is required by the GMLAN-Utils module to operate "
"correctly.")
try:
conf.contribs['GMLAN']['treat-response-pending-as-answer'] = False
except KeyError:
conf.contribs['GMLAN'] = {'treat-response-pending-as-answer': False}
# Helper function
def _check_response(resp, verbose):
# type: (Packet, Optional[bool]) -> bool
if resp is None:
if verbose:
print("Timeout.")
return False
if verbose:
resp.show()
return resp.service != 0x7f # NegativeResponse
class GMLAN_TesterPresentSender(PeriodicSenderThread):
def __init__(self, sock, pkt=GMLAN(service="TesterPresent"), interval=2):
# type: (ISOTPSocket, Packet, int) -> None
""" Thread to send GMLAN TesterPresent packets periodically
:param sock: socket where packet is sent periodically
:param pkt: packet to send
:param interval: interval between two packets
"""
PeriodicSenderThread.__init__(self, sock, pkt, interval)
def run(self):
# type: () -> None
while not self._stopped.is_set():
for p in self._pkts:
self._socket.sr1(p, verbose=False, timeout=0.1)
time.sleep(self._interval)
def GMLAN_InitDiagnostics(sock, broadcast_socket=None, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, Optional[ISOTPSocket], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send messages to put an ECU into diagnostic/programming state.
:param sock: socket for communication.
:param broadcast_socket: socket for broadcasting. If provided some message
will be sent as broadcast. Recommended when used
on a network with several ECUs.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success else False
"""
# Helper function
def _send_and_check_response(sock, req, timeout, verbose):
# type: (ISOTPSocket, Packet, Optional[int], Optional[bool]) -> bool
if verbose:
print("Sending %s" % repr(req))
resp = sock.sr1(req, timeout=timeout, verbose=False)
return _check_response(resp, verbose)
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
retry -= 1
# DisableNormalCommunication
p = GMLAN(service="DisableNormalCommunication")
if broadcast_socket is None:
if not _send_and_check_response(sock, p, timeout, verbose):
continue
else:
if verbose:
print("Sending %s as broadcast" % repr(p))
broadcast_socket.send(p)
time.sleep(0.05)
# ReportProgrammedState
p = GMLAN(service="ReportProgrammingState")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
# ProgrammingMode requestProgramming
p = GMLAN() / GMLAN_PM(subfunction="requestProgrammingMode")
if not _send_and_check_response(sock, p, timeout, verbose):
continue
time.sleep(0.05)
# InitiateProgramming enableProgramming
# No response expected
p = GMLAN() / GMLAN_PM(subfunction="enableProgrammingMode")
if verbose:
print("Sending %s" % repr(p))
sock.send(p)
time.sleep(0.05)
return True
return False
def GMLAN_GetSecurityAccess(sock, key_function, level=1, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, Callable[[int], int], int, Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Authenticate on ECU. Implements Seey-Key procedure.
:param sock: socket to send the message on.
:param key_function: function implementing the key algorithm.
:param level: level of access
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
if key_function is None:
return False
if level % 2 == 0:
warning("Parameter Error: Level must be an odd number.")
return False
while retry >= 0:
retry -= 1
request = GMLAN() / GMLAN_SA(subfunction=level)
if verbose:
print("Requesting seed..")
resp = sock.sr1(request, timeout=timeout, verbose=0)
if not _check_response(resp, verbose):
if resp is not None and resp.returnCode == 0x37 and retry:
if verbose:
print("RequiredTimeDelayNotExpired. Wait 10s.")
time.sleep(10)
if verbose:
print("Negative Response.")
continue
seed = resp.securitySeed
if seed == 0:
if verbose:
print("ECU security already unlocked. (seed is 0x0000)")
return True
keypkt = GMLAN() / GMLAN_SA(subfunction=level + 1,
securityKey=key_function(seed))
if verbose:
print("Responding with key..")
resp = sock.sr1(keypkt, timeout=timeout, verbose=0)
if resp is None:
if verbose:
print("Timeout.")
continue
if verbose:
resp.show()
if resp.sprintf("%GMLAN.service%") == "SecurityAccessPositiveResponse": # noqa: E501
if verbose:
print("SecurityAccess granted.")
return True
# Invalid Key
elif resp.sprintf("%GMLAN.service%") == "NegativeResponse" and \
resp.sprintf("%GMLAN.returnCode%") == "InvalidKey":
if verbose:
print("Key invalid")
continue
return False
def GMLAN_RequestDownload(sock, length, timeout=None, verbose=None, retry=0):
# type: (ISOTPSocket, int, Optional[int], Optional[bool], int) -> bool
""" Send RequestDownload message.
Usually used before calling TransferData.
:param sock: socket to send the message on.
:param length: value for the message's parameter 'unCompressedMemorySize'.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RD(memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return True
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return False
def GMLAN_TransferData(sock, addr, payload, maxmsglen=None, timeout=None, verbose=None, retry=0): # noqa: E501
# type: (ISOTPSocket, int, bytes, Optional[int], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send TransferData message.
Usually used after calling RequestDownload.
:param sock: socket to send the message on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
startretry = retry
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return False
# max size of dataRecord according to gmlan protocol
if maxmsglen is None or maxmsglen <= 0 or maxmsglen > (4093 - scheme):
maxmsglen = (4093 - scheme)
maxmsglen = cast(int, maxmsglen)
for i in range(0, len(payload), maxmsglen):
retry = startretry
while True:
if len(payload[i:]) > maxmsglen:
transdata = payload[i:i + maxmsglen]
else:
transdata = payload[i:]
pkt = GMLAN() / GMLAN_TD(startingAddress=addr + i,
dataRecord=transdata)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
break
retry -= 1
if retry >= 0:
if verbose:
print("Retrying..")
else:
return False
return True
def GMLAN_TransferPayload(sock, addr, payload, maxmsglen=None, timeout=None,
verbose=None, retry=0):
# type: (ISOTPSocket, int, bytes, Optional[int], Optional[int], Optional[bool], int) -> bool # noqa: E501
""" Send data by using GMLAN services.
:param sock: socket to send the data on.
:param addr: destination memory address on the ECU.
:param payload: data to be sent.
:param maxmsglen: maximum length of a single iso-tp message.
default: maximum length
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: True on success.
"""
if not GMLAN_RequestDownload(sock, len(payload), timeout=timeout,
verbose=verbose, retry=retry):
return False
if not GMLAN_TransferData(sock, addr, payload, maxmsglen=maxmsglen,
timeout=timeout, verbose=verbose, retry=retry):
return False
return True
def GMLAN_ReadMemoryByAddress(sock, addr, length, timeout=None,
verbose=None, retry=0):
# type: (ISOTPSocket, int, int, Optional[int], Optional[bool], int) -> Optional[bytes] # noqa: E501
""" Read data from ECU memory.
:param sock: socket to send the data on.
:param addr: source memory address on the ECU.
:param length: bytes to read.
:param timeout: timeout for sending, receiving or sniffing packages.
:param verbose: set verbosity level.
:param retry: number of retries in case of failure.
:return: bytes red or None
"""
if verbose is None:
verbose = conf.verb > 0
retry = abs(retry)
scheme = conf.contribs['GMLAN']['GMLAN_ECU_AddressingScheme']
if addr < 0 or addr >= 2**(8 * scheme):
warning("Error: Invalid address %s for scheme %s",
hex(addr), str(scheme))
return None
# max size of dataRecord according to gmlan protocol
if length <= 0 or length > (4094 - scheme):
warning("Error: Invalid length %s for scheme %s. "
"Choose between 0x1 and %s",
hex(length), str(scheme), hex(4094 - scheme))
return None
while retry >= 0:
# RequestDownload
pkt = GMLAN() / GMLAN_RMBA(memoryAddress=addr, memorySize=length)
resp = sock.sr1(pkt, timeout=timeout, verbose=0)
if _check_response(resp, verbose):
return resp.dataRecord
retry -= 1
if retry >= 0 and verbose:
print("Retrying..")
return None
def GMLAN_BroadcastSocket(interface):
# type: (str) -> ISOTPSocket
""" Returns a GMLAN broadcast socket using interface.
:param interface: interface name
:return: ISOTPSocket configured as GMLAN Broadcast Socket
"""
return ISOTPSocket(interface, sid=0x101, did=0x0, basecls=GMLAN,
extended_addr=0xfe, padding=True)
| StarcoderdataPython |
1772803 | from cache import REDIS
from fastapi.responses import PlainTextResponse
from loguru import logger
# pylint: disable=E0611
from pydantic import BaseModel
# pylint: enable=E0611
DOC = {
200: {
"description": "API response successfully",
"content": {"application/json": {"example": {"name": "apple", "count": 0}}},
}
}
class Payload(BaseModel):
name: str
count: int = 0
def post(payload: Payload):
try:
if REDIS.get(f"{payload.name}"):
raise Exception(f"Fruit {payload.name} already exists!")
REDIS.set(f"{payload.name}", f"{payload.count}")
return PlainTextResponse("OK", 200)
except Exception as error:
logger.warning(error)
return PlainTextResponse("Bad Request", 400)
| StarcoderdataPython |
1796447 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""PNG形式で画像を保存する"""
import os
from gimpfu import *
FILENAME_ENCODING = 'cp932'
def generate_new_filename(filename):
"""拡張子を.pngにしたファイル名を作成"""
dirname = os.path.dirname(filename)
basename_without_ext = os.path.splitext(os.path.basename(filename))[0]
new_filename = os.path.join(dirname, basename_without_ext + '.png')
return new_filename
def plugin_func():
for image in gimp.image_list():
# 拡張子を.pngにしたファイル名を作成
filename = generate_new_filename(image.filename)
# 複製して新しい画像を作成
new_image = pdb.gimp_image_duplicate(image)
# レイヤーを統合
layer = new_image.flatten()
# 画像を保存
pdb.gimp_file_save(new_image, layer, filename, '')
# 複製した画像を削除
pdb.gimp_image_delete(new_image)
register(
'save_png',
'PNG形式で画像を保存',
'help message',
'author',
'copyright',
'year',
'PNG形式で画像を保存',
'*',
[],
[],
plugin_func,
menu='<Image>/Tools'
)
main()
| StarcoderdataPython |
140710 | #usr/bin/env python
import time
import io
import os
import re
import sys
from io import open
from sys import argv
import pandas as pd
## ARGV
if len (sys.argv) < 4:
print ("\nUsage:")
print ("python3 %s repeatMasker info_sequence_names folder\n" %os.path.abspath(argv[0]))
exit()
repeatMasker_file = argv[1]
conversion_name = argv[2]
folder = argv[3]
df_SeqName = pd.read_csv(conversion_name, sep="\t", header=None, index_col=0, squeeze=True).to_dict()
print ("+ Parsing information provided for sequence names:")
###
split_name = os.path.splitext( os.path.basename(repeatMasker_file) )
repeatmasker_bed = folder + '/' + split_name[0] + '.bed'
# Open file OUT
output_file = open(repeatmasker_bed, 'w')
# Open file IN
fileHandler = open (repeatMasker_file, "r")
while True:
# Get next line from file
line = fileHandler.readline()
# If line is empty then end of file reached
if not line :
break;
line = line.strip()
if not line:
continue
if line.startswith('SW'):
continue
elif line.startswith('score'):
continue
else:
line = re.sub('\s+', '\t', line) ## replace spaces for tabs
seqID = line.split('\t')[4]
typeRepeat = line.split('\t')[10]
if seqID in df_SeqName:
if typeRepeat != 'Simple_repeat':
chR = df_SeqName[seqID]
start = line.split('\t')[5]
end = line.split('\t')[6]
repeatID = line.split('\t')[9]
score = line.split('\t')[0]
## strand
strand = '+'
if line.split('\t')[7] == 'C':
strand = '-'
string2write = '%s\t%s\t%s\t%s==%s\t%s\t%s\n' %(chR, start, end, repeatID, typeRepeat, score, strand )
output_file.write(string2write)
## close and finish
output_file.close()
fileHandler.close()
| StarcoderdataPython |
3315570 | #! /usr/bin/env python3
class Rectangle:
def __init__(self, offset, dimensions, patchid):
self.patchid = patchid
self.left = offset[0]
self.up = offset[1]
self.width = dimensions[0]
self.height = dimensions[1]
with open('input/day3') as input:
vals = list(map(lambda x: x.strip(), input.readlines()))
# parse the lines
rectangles = []
for val in vals:
split = val.split()
patchid = split[0].strip()[1:]
offset = tuple(map(int, split[2].strip()[:-1].split(',')))
dimensions = tuple(map(int, split[3].strip().split('x')))
rectangles.append(Rectangle(offset, dimensions, patchid))
board_width, board_height = 0, 0
for r in rectangles:
board_width = max(board_width, r.left + r.width)
board_height = max(board_height, r.up + r.height)
board = []
for i in range(board_height):
board.append([0] * board_width)
for r in rectangles:
for i in range(r.height):
y = r.up + i
for j in range(r.width):
x = r.left + j
board[y][x] += 1
# part 1
total = 0
for i in range(board_height):
for j in range(board_width):
total += board[i][j] > 1
print("Part 1: %d" % total)
# part 2
def has_overlap(board, r):
for i in range(r.height):
y = r.up + i
for j in range(r.width):
x = r.left + j
if board[y][x] > 1:
return True
return False
def find_not_overlapping(board, rectangles):
for r in rectangles:
if not has_overlap(board, r):
return r
print("Part 2: %s" % find_not_overlapping(board, rectangles).patchid)
| StarcoderdataPython |
140377 | <filename>testinvenio/records/api.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 alzp.
#
# testInvenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Records API."""
from __future__ import absolute_import, print_function
from flask import make_response, abort
from invenio_records_files.api import Record as FilesRecord
#import oarepo_actions
from oarepo_actions.decorators import action
from invenio_access.permissions import Permission, authenticated_user, any_user
from flask_principal import Need, RoleNeed
from werkzeug.wrappers import Response
def can():
return True
class FakeNeed():
def __init__(self):
self.method = None
def can(self):
return True
class OwnerNeed():
def __init__(self, record):
self.record = record
def can(self, record):
if record['Owner'] == 1:
return True
else:
return False
def pf(record=None):
# return Permission(RoleNeed('admin'))
# return Permission(FakeNeed())
# return Permission(OwnerNeed(record["owner"]))
return Permission(any_user)
class Record(FilesRecord):
"""Custom record."""
# @action(url_path="blah", permissions=pf)
# def send_email(self, **kwargs):
# return {"title": self["title"]}
@classmethod
@action(detail=False, url_path="jej/<string:param>")
def blah1(cls, param= None,**kwargs):
return {"DOI": param}
# @classmethod
# @action(detail=False, url_path="test/<int:param>", permissions=pf,
# serializers={'GET': {'text/html': make_response}})
# def test1(cls, param=None, **kwargs):
# print("juch")
# return {param: "jej"}
#
# @classmethod
# @action(detail=False, url_path="a", permissions=pf, serializers={'text/html': make_response}, meth="PUT")
# def a(cls, param=None, **kwargs):
# return "<h1>jej</h1>"
#
# @classmethod
# @action(detail=False, permissions=pf, serializers={'text/html': make_response})
# def test(cls, **kwargs):
# print("jej")
# return Response(status=200)
_schema = "records/record-v1.0.0.json"
| StarcoderdataPython |
1693652 | <filename>Beginner/1963.py
A, B = tuple(map(float,input().split()))
print("{0:.2f}%".format((B/A-1)*100))
| StarcoderdataPython |
3327915 | <reponame>zhaoyi3264/leetcode-solutions
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def constructFromPrePost(self, pre: List[int], post: List[int]) -> TreeNode:
if not pre:
return None
root = TreeNode(pre[0])
if len(pre) == 1:
return root
i = 0
while post[i] != pre[1]:
i += 1
i += 1
root.left = self.constructFromPrePost(pre[1:i+1], post[:i])
root.right = self.constructFromPrePost(pre[i+1:], post[i:-1])
return root
| StarcoderdataPython |
86622 | <gh_stars>0
from .resellerclub import ResellerClubAPI
| StarcoderdataPython |
17818 | <gh_stars>1-10
from bs4 import BeautifulSoup
import requests
import re
from lxml.html import fromstring
from lxml.etree import ParserError
from gemlog_from_rss.spip import SinglePost
def localize(lang):
if lang == "pl":
return {
"our_articles": "Nasze artykuły"
}
if lang == "fr":
return {
"our_articles": "Nos articles"
}
else:
return {
"our_articles": "Our articles"
}
class Page:
def __init__(self, title=None, content_class="article-texte"):
self.content = None
self.url = None
self.title = title
self.content_class = content_class
def download_html(self, url):
self.url = url
req = requests.get(self.url)
open(f'resources/{self.url.split("/")[-1].split("?")[0]}.html', 'wb').write(req.content)
def fetch_content(self):
file_content = open(f'resources/{self.url.split("/")[-1].split("?")[0]}.html', 'r').read()\
.replace("<strong>", "\n**").replace("</strong>", "**")
soup = BeautifulSoup(file_content, 'html.parser')
if self.title is None:
self.title = soup.title.get_text()
self.content = soup.find("div", class_=re.compile(self.content_class)).get_text().split("Credits")[0]
def output_gemini(self):
uit = f"""
##{self.title}
{self.content}
"""
class MainPage:
def __init__(self, post_list=None, root_dir="blog", title=None, pages_list=None, feed=None, lang="en"):
if post_list is None:
post_list = []
if pages_list is None:
pages_list = []
if root_dir[-1] == "/":
root_dir = root_dir[:-1]
self.post_list = post_list
self.title = title
self.root_dir = root_dir
self.pages_list = pages_list
self.feed = feed
if self.feed is not None:
self.parse_feed()
if self.title is None:
import xml.etree.ElementTree as ET
self.title = ET.parse("resources/all_posts.xml").getroot()[0].find("title").text
self.lang = lang
self.dict = localize(lang)
self.root = None
def parse_feed(self):
req = requests.get(self.feed)
open("resources/all_posts.xml", "wb").write(req.content)
return req.content
def add_posts(self):
for child in self.root[0].findall('item'):
if child.find('{http://purl.org/rss/1.0/modules/content/}encoded') is not None:
substituting = re.sub(' class=".*?"', '',
child.find('{http://purl.org/rss/1.0/modules/content/}encoded').text)
substituting = re.sub('<h3>', '\n### ', substituting)
substituting = re.sub('</h3>', '\n', substituting)
substituting = re.sub('<img.*?>', '', substituting)
try:
self.post_list.append(SinglePost(
child.find("title").text,
fromstring(substituting).text_content(),
child.find('{http://purl.org/dc/elements/1.1/}creator').text,
child.find('{http://purl.org/dc/elements/1.1/}date').text.split("T")[0])
)
except ParserError:
continue
def create_files(self):
for post in self.post_list:
with open(f'{self.root_dir}/{post.link}', 'w') as f:
f.write(post.get_gemini(blog_name=self.title))
def add_page(self, page):
self.pages_list.append(page)
def add_pages(self, pages):
for page in pages:
self.add_page(page)
def add_pages_to_main(self):
content = f""
for page in self.pages_list:
content += f"## {page.title}\n\n" \
f"{page.content}\n\n"
return content
def make_main_page(self):
content = f"""
# {self.title}
{self.add_pages_to_main()}
## {self.dict["our_articles"]}:
"""
for post in self.post_list:
content += f"=> {post.link} {post.title}\n\n"
return content
| StarcoderdataPython |
121182 | # Created: 06.01.2020
# Copyright (c) 2020 <NAME>
# License: MIT License
from string import ascii_letters
from typing import Iterable
from pyparsing import *
from . import ast
ABS = Keyword('ABS')
ABSTRACT = Keyword('ABSTRACT')
ACOS = Keyword('ACOS')
AGGREGATE = Keyword('AGGREGATE')
ALIAS = Keyword('ALIAS')
AND = Keyword('AND')
ANDOR = Keyword('ANDOR')
ARRAY = Keyword('ARRAY')
AS = Keyword('AS')
ASIN = Keyword('ASIN')
ATAN = Keyword('ATAN')
BAG = Keyword('BAG')
BASED_ON = Keyword('BASED_ON')
BEGIN = Keyword('BEGIN')
BINARY = Keyword('BINARY')
BLENGTH = Keyword('BLENGTH')
BOOLEAN = Keyword('BOOLEAN')
BY = Keyword('BY')
CASE = Keyword('CASE')
CONSTANT = Keyword('CONSTANT')
CONST_E = Keyword('CONST_E')
COS = Keyword('COS')
DERIVE = Keyword('DERIVE')
DIV = Keyword('DIV')
ELSE = Keyword('ELSE')
END = Keyword('END')
END_ALIAS = Keyword('END_ALIAS')
END_CASE = Keyword('END_CASE')
END_CONSTANT = Keyword('END_CONSTANT')
END_ENTITY = Keyword('END_ENTITY')
END_FUNCTION = Keyword('END_FUNCTION')
END_IF = Keyword('END_IF')
END_LOCAL = Keyword('END_LOCAL')
END_PROCEDURE = Keyword('END_PROCEDURE')
END_REPEAT = Keyword('END_REPEAT')
END_RULE = Keyword('END_RULE')
END_SCHEMA = Keyword('END_SCHEMA')
END_SUBTYPE_CONSTRAINT = Keyword('END_SUBTYPE_CONSTRAINT')
END_TYPE = Keyword('END_TYPE')
ENTITY = Keyword('ENTITY')
ENUMERATION = Keyword('ENUMERATION')
ESCAPE = Keyword('ESCAPE')
EXISTS = Keyword('EXISTS')
EXTENSIBLE = Keyword('EXTENSIBLE')
EXP = Keyword('EXP')
FALSE = Keyword('FALSE')
FIXED = Keyword('FIXED')
FOR = Keyword('FOR')
FORMAT = Keyword('FORMAT')
FROM = Keyword('FROM')
FUNCTION = Keyword('FUNCTION')
GENERIC = Keyword('GENERIC')
GENERIC_ENTITY = Keyword('GENERIC_ENTITY')
HIBOUND = Keyword('HIBOUND')
HIINDEX = Keyword('HIINDEX')
IF = Keyword('IF')
IN = Keyword('IN')
INSERT = Keyword('INSERT')
INTEGER = Keyword('INTEGER')
INVERSE = Keyword('INVERSE')
LENGTH = Keyword('LENGTH')
LIKE = Keyword('LIKE')
LIST = Keyword('LIST')
LOBOUND = Keyword('LOBOUND')
LOCAL = Keyword('LOCAL')
LOG = Keyword('LOG')
LOG10 = Keyword('LOG10')
LOG2 = Keyword('LOG2')
LOGICAL = Keyword('LOGICAL')
LOINDEX = Keyword('LOINDEX')
MOD = Keyword('MOD')
NOT = Keyword('NOT')
NUMBER = Keyword('NUMBER')
NVL = Keyword('NVL')
ODD = Keyword('ODD')
OF = Keyword('OF')
ONEOF = Keyword('ONEOF')
OPTIONAL = Keyword('OPTIONAL')
OR = Keyword('OR')
OTHERWISE = Keyword('OTHERWISE')
PI = Keyword('PI')
PROCEDURE = Keyword('PROCEDURE')
QUERY = Keyword('QUERY')
REAL = Keyword('REAL')
REFERENCE = Keyword('REFERENCE')
REMOVE = Keyword('REMOVE')
RENAMED = Keyword('RENAMED')
REPEAT = Keyword('REPEAT')
RETURN = Keyword('RETURN')
ROLESOF = Keyword('ROLESOF')
RULE = Keyword('RULE')
SCHEMA = Keyword('SCHEMA')
SELECT = Keyword('SELECT')
SELF = Keyword('SELF')
SET = Keyword('SET')
SIN = Keyword('SIN')
SIZEOF = Keyword('SIZEOF')
SKIP = Keyword('SKIP')
SQRT = Keyword('SQRT')
STRING = Keyword('STRING')
SUBTYPE = Keyword('SUBTYPE')
SUBTYPE_CONSTRAINT = Keyword('SUBTYPE_CONSTRAINT')
SUPERTYPE = Keyword('SUPERTYPE')
TAN = Keyword('TAN')
THEN = Keyword('THEN')
TO = Keyword('TO')
TOTAL_OVER = Keyword('TOTAL_OVER')
TRUE = Keyword('TRUE')
TYPE = Keyword('TYPE')
TYPEOF = Keyword('TYPEOF')
UNIQUE = Keyword('UNIQUE')
UNKNOWN = Keyword('UNKNOWN')
UNTIL = Keyword('UNTIL')
USE = Keyword('USE')
USEDIN = Keyword('USEDIN')
VALUE = Keyword('VALUE')
VALUE_IN = Keyword('VALUE_IN')
VALUE_UNIQUE = Keyword('VALUE_UNIQUE')
VAR = Keyword('VAR')
WHERE = Keyword('WHERE')
WHILE = Keyword('WHILE')
WITH = Keyword('WITH')
XOR = Keyword('XOR')
built_in_constant = (CONST_E | PI | SELF | '?').addParseAction(ast.BuiltInConstant.action)
built_in_function = (ABS | ACOS | ASIN | ATAN | BLENGTH | COS | EXISTS | EXP | FORMAT | HIBOUND | HIINDEX | LENGTH
| LOBOUND | LOINDEX | LOG2 | LOG10 | LOG | NVL | ODD | ROLESOF | SIN | SIZEOF | SQRT | TAN
| TYPEOF | USEDIN | VALUE_IN | VALUE_UNIQUE | VALUE
).addParseAction(ast.BuiltInFunction.action)
built_in_procedure = INSERT | REMOVE.addParseAction(ast.BuiltInProcedure.action)
bit = Char('01')
digit = Char('0123456789')
digits = Word('0123456789')
sign = Char('+-')
encoded_character = Word(hexnums, exact=8)
binary_literal = Word('%', '01').addParseAction(lambda toks: int(toks[0][1:], 2)) # convert to int
# TODO: maybe ignoring leading signs [+-] for numbers will fix some errors
integer_literal = pyparsing_common.signed_integer.copy() # as int
real_literal = pyparsing_common.sci_real.copy() # as float
encoded_string_literal = Suppress('"') + \
OneOrMore(encoded_character).addParseAction(ast.StringLiteral.decode) + \
Suppress('"')
logical_literal = (FALSE | TRUE | UNKNOWN).addParseAction(ast.LogicalLiteral.action)
simple_string_literal = sglQuotedString.copy().addParseAction(ast.StringLiteral.action)
string_literal = simple_string_literal | encoded_string_literal
literal = binary_literal | logical_literal | real_literal | integer_literal | string_literal
schema_version_id = string_literal
simple_id = Word(ascii_letters, ascii_letters + '0123456789_').setParseAction(ast.SimpleID.action)
attribute_id = simple_id
constant_id = simple_id
entity_id = simple_id
enumeration_id = simple_id
function_id = simple_id
parameter_id = simple_id
procedure_id = simple_id
rule_label_id = simple_id
rule_id = simple_id
schema_id = simple_id
subtype_constraint_id = simple_id
type_label_id = simple_id
type_id = simple_id
variable_id = simple_id
rename_id = constant_id | entity_id | function_id | procedure_id | type_id
attribute_ref = attribute_id
constant_ref = constant_id
entity_ref = entity_id
enumeration_ref = enumeration_id
function_ref = function_id
parameter_ref = parameter_id
procedure_ref = procedure_id
rule_label_ref = rule_label_id
rule_ref = rule_id
schema_ref = schema_id
subtype_constraint_ref = subtype_constraint_id
type_label_ref = type_label_id
type_ref = type_id
variable_ref = variable_id
general_ref = parameter_ref | variable_ref
resource_ref = constant_ref | entity_ref | function_ref | procedure_ref | type_ref
named_types = entity_ref | type_ref
# TODO: attribute_qualifier - added OneOrMore() reason: query_expression (SELF\aaa.bbb.ccc.eee)
attribute_qualifier = OneOrMore('.' + attribute_ref)
enumeration_reference = Optional(type_ref + '.') + enumeration_ref
resource_or_rename = resource_ref + Optional(AS + rename_id)
constant_factor = built_in_constant | constant_ref
named_type_or_rename = named_types + Optional(AS + (entity_id | type_id))
population = entity_ref
group_qualifier = '\\' + entity_ref
type_label = type_label_id | type_label_ref
qualified_attribute = SELF + group_qualifier + attribute_qualifier
referenced_attribute = qualified_attribute | attribute_ref
unique_rule = Optional(rule_label_id + ':') + referenced_attribute + ZeroOrMore(',' + referenced_attribute)
null_stmt = Char(';') # pass ?
skip_stmt = SKIP + ';' # continue ?
escape_stmt = ESCAPE + ';' # break ?
add_like_op = (Char('+-') | OR | XOR).setName('add like operand')
interval_op = oneOf('< <=').setName('interval operand')
multiplication_like_op = ('||' | Char('*/|') | DIV | MOD | AND).setName('multiplication like operand')
rel_op = oneOf('< > <= >= <> = :<>: :=:').setName('relation operand')
rel_op_extended = (rel_op | IN | LIKE).setName('extended relation operand')
unary_op = (sign | NOT).setName('unary operand')
exponentiation_op = '**'
simple_factor = Forward()
factor = simple_factor + Optional(exponentiation_op + simple_factor)
term = factor + ZeroOrMore(multiplication_like_op + factor)
simple_expression = term + ZeroOrMore(add_like_op + term)
numeric_expression = simple_expression
precision_spec = numeric_expression
index = numeric_expression
index_1 = index('index_1')
index_2 = index('index_2')
index_qualifier = '[' + index_1 + Optional(':' + index_2) + ']'
width = numeric_expression
width_spec = '(' + width + ')' + Optional(FIXED)
expression = simple_expression + Optional(rel_op_extended + simple_expression)
bound_1 = numeric_expression('bound_1')
bound_2 = numeric_expression('bound_2')
bound_spec = '[' + bound_1 + ':' + bound_2 + ']'
case_label = expression('case label')
aggregate_source = simple_expression
interval_high = simple_expression
interval_item = simple_expression
interval_low = simple_expression
logical_expression = expression
parameter = expression
repetition = numeric_expression
selector = expression
increment = numeric_expression
element = expression + Optional(':' + repetition)
aggregate_initializer = '[' + Optional(element + ZeroOrMore(',' + element)) + ']'
domain_rule = Optional(rule_label_id + ':') + expression
qualifier = attribute_qualifier | group_qualifier | index_qualifier
redeclared_attribute = qualified_attribute + Optional(RENAMED + attribute_id)
increment_control = variable_id + ':=' + bound_1 + TO + bound_2 + Optional(BY + increment)
attribute_decl = attribute_id | redeclared_attribute
interval = '{' + interval_low + interval_op + interval_item + interval_op + interval_high + '}'
abstract_entity_declaration = ABSTRACT
abstract_supertype = ABSTRACT + SUPERTYPE + ';'
boolean_type = BOOLEAN
number_type = NUMBER
logical_type = LOGICAL
integer_type = INTEGER
generic_type = GENERIC + Optional(':' + type_label)
binary_type = BINARY + Optional(width_spec)
string_type = STRING + Optional(width_spec)
real_type = REAL + Optional('(' + precision_spec + ')')
concrete_types = Forward()
instantiable_type = concrete_types | entity_ref
bag_type = BAG + Optional(bound_spec) + OF + instantiable_type
array_type = ARRAY + bound_spec + OF + Optional(OPTIONAL) + Optional(UNIQUE) + instantiable_type
list_type = LIST + Optional(bound_spec) + OF + Optional(UNIQUE) + instantiable_type
set_type = SET + Optional(bound_spec) + OF + instantiable_type
aggregation_types = array_type | bag_type | list_type | set_type
simple_types = binary_type | boolean_type | integer_type | logical_type | number_type | real_type | string_type
concrete_types <<= aggregation_types | simple_types | type_ref
subtype_declaration = SUBTYPE + OF + '(' + entity_ref + ZeroOrMore(',' + entity_ref) + ')'
generalized_types = Forward()
parameter_type = generalized_types | named_types | simple_types
general_array_type = ARRAY + Optional(bound_spec) + OF + Optional(OPTIONAL) + Optional(UNIQUE) + parameter_type
general_bag_type = BAG + Optional(bound_spec) + OF + parameter_type
general_list_type = LIST + Optional(bound_spec) + OF + Optional(UNIQUE) + parameter_type
general_set_type = SET + Optional(bound_spec) + OF + parameter_type
generic_entity_type = GENERIC_ENTITY + Optional(':' + type_label)
general_aggregation_types = general_array_type | general_bag_type | general_list_type | general_set_type
aggregate_type = AGGREGATE + Optional(':' + type_label) + OF + parameter_type
generalized_types <<= aggregate_type | general_aggregation_types | generic_entity_type | generic_type
formal_parameter = parameter_id + ZeroOrMore(',' + parameter_id) + ':' + parameter_type
supertype_expression = Forward()
one_of = ONEOF + '(' + supertype_expression + ZeroOrMore(',' + supertype_expression) + ')'
actual_parameter_list = '(' + parameter + ZeroOrMore(',' + parameter) + ')'
enumeration_items = '(' + enumeration_id + ZeroOrMore(',' + enumeration_id) + ')'
enumeration_extension = BASED_ON + type_ref + Optional(WITH + enumeration_items)
enumeration_type = Optional(EXTENSIBLE) + ENUMERATION + Optional((OF + enumeration_items) | enumeration_extension)
select_list = '(' + named_types + ZeroOrMore(',' + named_types) + ')'
select_extension = BASED_ON + type_ref + Optional(WITH + select_list)
select_type = Optional(EXTENSIBLE + Optional(GENERIC_ENTITY)) + SELECT + Optional(select_list | select_extension)
constructed_types = enumeration_type | select_type
underlying_type = constructed_types | concrete_types
supertype_term = one_of | ('(' + supertype_expression + ')') | entity_ref
supertype_factor = supertype_term + ZeroOrMore(AND + supertype_term)
supertype_expression <<= supertype_factor + ZeroOrMore(ANDOR + supertype_factor)
subtype_constraint = OF + '(' + supertype_expression + ')'
supertype_rule = SUPERTYPE + subtype_constraint
abstract_supertype_declaration = ABSTRACT + SUPERTYPE + Optional(subtype_constraint)
supertype_constraint = abstract_supertype_declaration | abstract_entity_declaration | supertype_rule
subsuper = Optional(supertype_constraint) + Optional(subtype_declaration)
subtype_constraint_head = SUBTYPE_CONSTRAINT + subtype_constraint_id + FOR + entity_ref + ';'
total_over = TOTAL_OVER + '(' + entity_ref + ZeroOrMore(',' + entity_ref) + ')' + ';'
subtype_constraint_body = Optional(abstract_supertype) + Optional(total_over) + Optional(supertype_expression + ';')
subtype_constraint_decl = subtype_constraint_head + subtype_constraint_body + END_SUBTYPE_CONSTRAINT + ';'
stmt = Forward()
declaration = Forward()
return_stmt = RETURN + Optional('(' + expression + ')') + ';'
assignment_stmt = general_ref + ZeroOrMore(qualifier) + ':=' + expression + ';'
alias_stmt = ALIAS + variable_id + FOR + general_ref + ZeroOrMore(qualifier) + ';' + OneOrMore(stmt) + END_ALIAS + ';'
compound_stmt = BEGIN + OneOrMore(stmt) + END + ';'
case_action = case_label + ZeroOrMore(',' + case_label) + ':' + stmt
case_stmt = CASE + selector + OF + ZeroOrMore(case_action) + Optional(OTHERWISE + ':' + stmt) + END_CASE + ';'
local_variable = variable_id + ZeroOrMore(',' + variable_id) + ':' + parameter_type + Optional(':=' + expression) + ';'
local_decl = LOCAL + local_variable + ZeroOrMore(local_variable) + END_LOCAL + ';'
constant_body = constant_id + ':' + instantiable_type + ':=' + expression + ';'
constant_decl = CONSTANT + OneOrMore(constant_body) + END_CONSTANT + ';'
derived_attr = attribute_decl + ':' + parameter_type + ':=' + expression + ';'
derive_clause = DERIVE + OneOrMore(derived_attr)
function_call = (built_in_function | function_ref) + Optional(actual_parameter_list)
explicit_attr = attribute_decl + ZeroOrMore(',' + attribute_decl) + ':' + Optional(OPTIONAL) + parameter_type + ';'
unique_clause = UNIQUE + unique_rule + ';' + ZeroOrMore(unique_rule + ';')
while_control = WHILE + logical_expression
until_control = UNTIL + logical_expression
repeat_control = Optional(increment_control) + Optional(while_control) + Optional(until_control)
repeat_stmt = REPEAT + repeat_control + ';' + OneOrMore(stmt) + END_REPEAT + ';'
if_stmt = IF + logical_expression + THEN + OneOrMore(stmt) + Optional(ELSE + OneOrMore(stmt)) + END_IF + ';'
algorithm_head = ZeroOrMore(declaration) + Optional(constant_decl) + Optional(local_decl)
procedure_call_stmt = (built_in_procedure | procedure_ref) + Optional(actual_parameter_list) + ';'
procedure_head = PROCEDURE + procedure_id + Optional(
'(' + Optional(VAR) + formal_parameter + ZeroOrMore(';' + Optional(VAR) + formal_parameter) + ')') + ';'
procedure_decl = procedure_head + algorithm_head + ZeroOrMore(stmt) + END_PROCEDURE + ';'
# Different where clauses required, because parser need the stopOn argument!
where_clause = WHERE + OneOrMore(domain_rule + ';', stopOn=END_TYPE)
entity_where_clause = WHERE + OneOrMore(domain_rule + ';', stopOn=END_ENTITY)
rule_where_clause = WHERE + OneOrMore(domain_rule + ';', stopOn=END_RULE)
type_decl = TYPE + type_id + '=' + underlying_type + ';' + Optional(where_clause) + END_TYPE + ';'
qualifiable_factor = function_call | constant_factor | general_ref | population | attribute_ref
primary = (qualifiable_factor + ZeroOrMore(qualifier)) | literal
# TODO: restore original expression (_aggregate_source) ???
# original: query_expression = QUERY + '(' + variable_id + '<*' + aggregate_source + '|' + logical_expression + ')'
# aggregate_source = simple_expression
expr_or_primary = Optional(unary_op) + ('(' + expression + ')' | primary)
aggregate_source_ = qualified_attribute | primary | expr_or_primary
query_expression = QUERY + '(' + variable_id + '<*' + aggregate_source_ + '|' + logical_expression + ')'
function_head = FUNCTION + function_id + Optional(
'(' + formal_parameter + ZeroOrMore(';' + formal_parameter) + ')') + ':' + parameter_type + ';'
function_decl = function_head + algorithm_head + OneOrMore(stmt) + END_FUNCTION + ';'
inverse_attr = attribute_decl + ':' + Optional((SET | BAG) + Optional(bound_spec) + OF) + entity_ref + FOR + Optional(
entity_ref + '.') + attribute_ref + ';'
inverse_clause = INVERSE + OneOrMore(inverse_attr)
entity_constructor = entity_ref + '(' + Optional(expression + ZeroOrMore(',' + expression)) + ')'
entity_head = ENTITY + entity_id + subsuper + ';'
entity_body = ZeroOrMore(explicit_attr) + Optional(derive_clause) + Optional(inverse_clause) + Optional(
unique_clause) + Optional(entity_where_clause)
entity_decl = entity_head + entity_body + END_ENTITY + ';'
rule_head = RULE + rule_id + FOR + '(' + entity_ref + ZeroOrMore(',' + entity_ref) + ')' + ';'
rule_decl = rule_head + algorithm_head + ZeroOrMore(stmt) + rule_where_clause + END_RULE + ';'
reference_clause = REFERENCE + FROM + schema_ref + Optional(
'(' + resource_or_rename + ZeroOrMore(',' + resource_or_rename) + ')') + ';'
use_clause = USE + FROM + schema_ref + Optional(
'(' + named_type_or_rename + ZeroOrMore(',' + named_type_or_rename) + ')') + ';'
interface_specification = reference_clause | use_clause
schema_body = ZeroOrMore(interface_specification) + Optional(constant_decl) + ZeroOrMore(declaration | rule_decl)
schema_decl = SCHEMA + schema_id + Optional(schema_version_id) + ';' + schema_body + END_SCHEMA + ';'
# Resolving forward declarations
simple_factor <<= entity_constructor | query_expression | expr_or_primary | aggregate_initializer | enumeration_reference | interval
simple_factor.addParseAction()
declaration <<= entity_decl | function_decl | procedure_decl | subtype_constraint_decl | type_decl
stmt <<= alias_stmt | assignment_stmt | case_stmt | compound_stmt | if_stmt | procedure_call_stmt | repeat_stmt | return_stmt | skip_stmt | escape_stmt | null_stmt
# Start
syntax = OneOrMore(schema_decl)
# White space enabled for detecting tail remarks
spaces = Suppress(ZeroOrMore(White(' \t')))
remark_tag = spaces + simple_id + ZeroOrMore('.' + simple_id)
tail_remark = ('--' + OneOrMore(remark_tag) + spaces + Suppress(LineEnd())).setName("Tail Remark")
tail_remark.leaveWhitespace()
# Replaced by the 'comments' rule
# embedded_remark = Forward()
# embedded_remark <<= ('(*' + OneOrMore(remark_tag) + ZeroOrMore(
# OneOrMore(not_paren_star) | lparen_then_not_lparen_star | OneOrMore(
# '*') | not_rparen_star_then_rparen | embedded_remark) + '*)')
#
# remark = embedded_remark | tail_remark
# Combine(Regex(r"/_\*(?:[^*]|_\*(?!_/))*") + '*/').setName("C style comment")
comments = Combine(Regex(r"\(\*(?:[^*]|\*(?!\)))*") + '*)').setName("Express Comment")
syntax.ignore(comments)
syntax.ignore(tail_remark)
class Tokens:
""" Helper class for testing. """
def __init__(self, it: Iterable):
self._tokens = tuple(it)
def __eq__(self, other):
if type(other) == type(self):
return self._tokens == other.nodes
# compare with iterable of string tokens, just for testing
elif isinstance(other, Iterable):
return tuple(self.string_tokens) == tuple(other)
else:
return NotImplemented
def __hash__(self):
return hash(self._tokens)
def __len__(self):
return len(self._tokens)
def __getitem__(self, item):
return self._tokens[item]
def __str__(self):
return ' '.join(self.string_tokens)
@property
def string_tokens(self) -> Iterable:
for t in self._tokens:
if hasattr(t, 'string_tokens'):
yield from t.string_tokens
else:
yield str(t)
| StarcoderdataPython |
3312614 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests scenarios for pausing/resuming a microVM."""
import host_tools.network as net_tools
def test_pause_resume(test_microvm_with_ssh, network_config):
"""Test pausing and resuming the vCPUs."""
test_microvm = test_microvm_with_ssh
test_microvm.spawn()
# Set up the microVM with 2 vCPUs, 256 MiB of RAM and a root file
# system with the rw permission.
test_microvm.basic_config()
# Create tap before configuring interface.
_tap1, _, _ = test_microvm.ssh_network_config(network_config, '1')
# Pausing the microVM before being started is not allowed.
response = test_microvm.vm.patch(state='Paused')
assert test_microvm.api_session.is_status_bad_request(response.status_code)
# Resuming the microVM before being started is also not allowed.
response = test_microvm.vm.patch(state='Resumed')
assert test_microvm.api_session.is_status_bad_request(response.status_code)
# Start microVM.
test_microvm.start()
ssh_connection = net_tools.SSHConnection(test_microvm.ssh_config)
# Verify guest is active.
exit_code, _, _ = ssh_connection.execute_command("ls")
assert exit_code == 0
# Pausing the microVM after it's been started is successful.
response = test_microvm.vm.patch(state='Paused')
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Verify guest is no longer active.
exit_code, _, _ = ssh_connection.execute_command("ls")
assert exit_code != 0
# Pausing the microVM when it is already `Paused` is allowed
# (microVM remains in `Paused` state).
response = test_microvm.vm.patch(state='Paused')
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Resuming the microVM is successful.
response = test_microvm.vm.patch(state='Resumed')
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Verify guest is active again.
exit_code, _, _ = ssh_connection.execute_command("ls")
assert exit_code == 0
# Resuming the microVM when it is already `Resumed` is allowed
# (microVM remains in the running state).
response = test_microvm.vm.patch(state='Resumed')
assert test_microvm.api_session.is_status_no_content(response.status_code)
# Verify guest is still active.
exit_code, _, _ = ssh_connection.execute_command("ls")
assert exit_code == 0
| StarcoderdataPython |
3291080 | # -*- coding: utf-8 -*-
from collections import deque
from proxy2 import *
class SSLStripRequestHandler(ProxyRequestHandler):
replaced_urls = deque(maxlen=1024)
def request_handler(self, req, req_body):
if req.path in self.replaced_urls:
req.path = req.path.replace('http://', 'https://')
def response_handler(self, req, req_body, res, res_body):
def replacefunc(m):
http_url = "http://" + m.group(1)
self.replaced_urls.append(http_url)
return http_url
re_https_url = r"https://([-_.!~*'()a-zA-Z0-9;/?:@&=+$,%]+)"
if 'Location' in res.headers:
res.headers['Location'] = re.sub(re_https_url, replacefunc, res.headers['Location'])
return re.sub(re_https_url, replacefunc, res_body)
if __name__ == '__main__':
test(HandlerClass=SSLStripRequestHandler)
| StarcoderdataPython |
4841113 | <filename>kdeepmodel/transformer/detr.py
#!/usr/bin/env python
"""End-to-End Object Detection with Transformers
https://arxiv.org/abs/2005.12872
https://github.com/facebookresearch/detr
https://colab.research.google.com/github/facebookresearch/detr/blob/colab/notebooks/detr_demo.ipynb#scrollTo=h91rsIPl7tVl
https://github.com/facebookresearch/detr/blob/master/main.py
"""
import torch
from torch import nn
from torchvision.models import resnet50
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.transform import resize
import torchvision.transforms as T
import numpy as np
class DETR(nn.Module):
def __init__(self, num_classes, hidden_dim, nheads, num_encoder_layers, num_decoder_layers):
super().__init__()
self.backbone = nn.Sequential(*list(resnet50(pretrained=True).children())[:-2])
self.conv = nn.Conv2d(2048, hidden_dim, 1)
self.transformer = nn.Transformer(hidden_dim, nheads, num_encoder_layers, num_decoder_layers)
self.linear_class = nn.Linear(hidden_dim, num_classes+1)
self.linear_bbox = nn.Linear(hidden_dim, 4)
self.query_pos = nn.Parameter(torch.rand(100, hidden_dim))
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
self.col_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
def forward(self, inputs):
x = self.backbone(inputs)
h = self.conv(x)
H, W = h.shape[-2:]
pos = torch.cat([
self.col_embed[:W].unsqueeze(0).repeat(H, 1, 1),
self.row_embed[:H].unsqueeze(1).repeat(1, W, 1)
], dim=-1).flatten(0, 1).unsqueeze(1)
h = self.transformer(pos + h.flatten(2).permute(2, 0, 1), self.query_pos.unsqueeze(1))
return self.linear_class(h), self.linear_bbox(h).sigmoid()
if __name__ == '__main__':
import argparse
import matplotlib.pyplot as plt
params = {}
params['image'] = '/home/ktdiedrich/data/lung-xray/ChinaSet_AllFiles/CXR_png/CHNCXR_0497_1.png'
img = imread(params['image'])
rimg = resize(img, (1200, 800))
img_rgb = np.array(([rimg] * 3))
transform = T.Compose([T.ToTensor()])
detr = DETR(num_classes=91, hidden_dim=256, nheads=8, num_encoder_layers=6, num_decoder_layers=6)
detr.eval()
tens_img = transform(img_rgb).permute((1, 0, 2)).unsqueeze(0).float()
inputs = torch.randn(1, 3, 800, 1200)
logits, boxes = detr(inputs)
detr1 = DETR(num_classes=1, hidden_dim=256, nheads=8, num_encoder_layers=6, num_decoder_layers=6)
img_logits, img_boxes = detr1(tens_img)
print("fin")
| StarcoderdataPython |
3352427 | <gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import subprocess
import json
from knack.log import get_logger
from knack.prompting import prompt
from knack.util import CLIError
from azext_aks_deploy.dev.common.prompting import prompt_user_friendly_choice_list
logger = get_logger(__name__)
def get_default_subscription_info():
"""
Returns the Id, name, tenantID and environmentName of the default subscription
None if no default is set or no subscription is found
"""
from azure.cli.core._profile import Profile
profile = Profile()
_dummy = profile.get_current_account_user()
subscriptions = profile.load_cached_subscriptions(False)
for subscription in subscriptions:
if subscription['isDefault']:
return subscription['id'], subscription['name'], subscription['tenantId'], subscription['environmentName']
logger.debug('Your account does not have a default Azure subscription. Please run \'az login\' to setup account.')
return None, None, None, None
def create_aks_cluster(cluster_name, resource_group):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning('Using your default Azure subscription %s for creating new AKS cluster. '
'This may take several minutes', subscription_name)
try:
aks_create = subprocess.check_output(('az aks create --name {cluster_name} -g {group_name} -o json').format(
cluster_name=cluster_name, group_name=resource_group), shell=True)
aks_create_json = json.loads(aks_create)
return aks_create_json
except Exception as ex:
raise CLIError(ex)
def create_acr(registry_name, resource_group, sku):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning('Using your default Azure subscription %s for creating new Azure Container Registry.',
subscription_name)
try:
acr_create = subprocess.check_output(
('az acr create --name {acr_name} --sku {sku} -g {group_name} -o json')
.format(acr_name=registry_name, sku=sku, group_name=resource_group), shell=True)
acr_create = json.loads(acr_create)
return acr_create
except Exception as ex:
raise CLIError(ex)
def create_functionapp(_app_name, _resource_group, show_warning=True):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
if show_warning:
logger.warning('Using your default Azure subscription %s for creating new Azure Container Registry.',
subscription_name)
# Todo-atbagga
# place holder function et to implement
return ""
def get_resource_group():
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning("Using your default Azure subscription %s for fetching Resource Groups.", subscription_name)
group_list = subprocess.check_output('az group list -o json', shell=True)
group_list = json.loads(group_list)
if not group_list:
return None
group_choice = 0
group_choice_list = []
for group in group_list:
group_choice_list.append(group['name'])
group_choice = prompt_user_friendly_choice_list(
"In which resource group do you want to create your resources?", group_choice_list)
return group_list[group_choice]['name']
def get_aks_details(name=None):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning("Using your default Azure subscription %s for fetching AKS clusters.", subscription_name)
aks_list = subprocess.check_output('az aks list -o json', shell=True)
aks_list = json.loads(aks_list)
if not aks_list:
# Do we want to fail here??
return None
cluster_choice = 0
cluster_choice_list = []
for aks_cluster in aks_list:
if not name:
cluster_choice_list.append(aks_cluster['name'])
elif name.lower() == aks_cluster['name'].lower():
return aks_cluster
if name is not None:
raise CLIError('Cluster with name {} could not be found. Please check using command az aks list.'
.format(name))
cluster_choice_list.append('Create a new AKS cluster')
cluster_details = None
cluster_choice = prompt_user_friendly_choice_list(
"Which kubernetes cluster do you want to target?", cluster_choice_list)
if cluster_choice == len(cluster_choice_list) - 1:
cluster_name = prompt('Please enter name of the cluster to be created: ')
resource_group = get_resource_group()
# check if cluster already exists
for aks_cluster in aks_list:
if cluster_name.lower() == aks_cluster['name'].lower() and aks_cluster['resourceGroup']:
logger.warning('AKS cluster with the same name already exists. Using the existing cluster.')
cluster_details = aks_cluster
if not cluster_details:
cluster_details = create_aks_cluster(cluster_name, resource_group)
else:
cluster_details = aks_list[cluster_choice]
return cluster_details
def get_acr_details(name=None):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning("Using your default Azure subscription %s for fetching Azure Container Registries.",
subscription_name)
acr_list = subprocess.check_output('az acr list -o json', shell=True)
acr_list = json.loads(acr_list)
if not acr_list:
return None
registry_choice = 0
registry_choice_list = []
for acr_clusters in acr_list:
if not name:
registry_choice_list.append(acr_clusters['name'])
elif name.lower() == acr_clusters['name'].lower():
return acr_clusters
if name is not None:
raise CLIError('Container Registry with name {} could not be found. '
'Please check using command az acr list.'.format(name))
registry_choice_list.append('Create a new Azure Container Registry')
registry_choice = prompt_user_friendly_choice_list(
"Which Azure Container Registry do you want to use?", registry_choice_list)
acr_details = None
if registry_choice == len(registry_choice_list) - 1:
registry_name = prompt('Please enter name of the Azure Container Registry to be created: ')
for registry in acr_list:
if registry_name.lower() == registry['name'].lower():
logger.warning('Azure Container Registry with the same name already exists. '
'Using the existing registry.')
acr_details = registry
if not acr_details:
sku = get_sku()
resource_group = get_resource_group()
acr_details = create_acr(registry_name, resource_group, sku)
else:
acr_details = acr_list[registry_choice]
return acr_details
def get_functionapp_details(name=None):
_subscription_id, subscription_name, _tenant_id, _environment_name = get_default_subscription_info()
logger.warning("Using your default Azure subscription %s for fetching Functionapps.", subscription_name)
functionapp_list = subprocess.check_output('az functionapp list -o json', shell=True)
functionapp_list = json.loads(functionapp_list)
if not functionapp_list:
logger.debug("No Functionapp deployments found in your Azure subscription.")
functionapp_choice = 0
app_choice_list = []
for app in functionapp_list:
if not name:
app_choice_list.append(app['name'])
elif name.lower() == app['name'].lower():
return app
if name is not None:
raise CLIError('Functionapp with name {} could not be found. Please check using command az functionapp list.'
.format(name))
app_choice_list.append('Create a new Functionapp.')
app_details = None
functionapp_choice = prompt_user_friendly_choice_list(
"Which Functionapp do you want to target?", app_choice_list)
if functionapp_choice == len(app_choice_list) - 1:
app_name = prompt('Please enter name of the Functionapp to be created: ')
resource_group = get_resource_group()
# check if app already exists
for app in functionapp_list:
if app_name.lower() == app['name'].lower() and app['resourceGroup']:
logger.warning('Functionapp with the same name already exists. Using the existing app.')
app_details = app
if not app_details:
app_details = create_functionapp(app_name, resource_group)
else:
app_details = functionapp_list[functionapp_choice]
return app_details
def get_sku():
sku_list = ['Basic', 'Classic', 'Premium', 'Standard']
sku_choice = prompt_user_friendly_choice_list(
"Select the SKU of the container registry?", sku_list)
return sku_list[sku_choice]
def configure_aks_credentials(cluster_name, resource_group):
try:
subscription_id, subscription_name, tenant_id, environment_name = get_default_subscription_info()
logger.warning("Using your default Azure subscription %s for getting AKS cluster credentials.",
subscription_name)
_aks_creds = subprocess.check_output(
'az aks get-credentials -n {cluster_name} -g {group_name} -o json'.format(
cluster_name=cluster_name, group_name=resource_group), shell=True)
except Exception as ex:
raise CLIError(ex)
| StarcoderdataPython |
1751537 | <reponame>MarcoMene/epidemics-suppression<gh_stars>0
"""
Contains a function that runs the algorithm several times, each with a different choice of the input parameters.
"""
from bsp_epidemic_suppression_model.model_utilities.epidemic_data import (
make_scenario_parameters_for_asymptomatic_symptomatic_model,
)
from bsp_epidemic_suppression_model.model_utilities.scenario import Scenario
from bsp_epidemic_suppression_model.math_utilities.functions_utils import (
DeltaMeasure,
RealRange,
)
from bsp_epidemic_suppression_model.algorithm.model_blocks import effectiveness_from_R
from bsp_epidemic_suppression_model.algorithm.time_evolution_main_function import (
compute_time_evolution,
)
from bsp_epidemic_suppression_model.math_utilities.functions_utils import round2
import warnings
warnings.filterwarnings("ignore")
def time_evolution_with_varying_parameters():
"""
Run the algorithm several times, each with a different choice of the parameters s^S, s^C, xi, and p^app.
"""
tau_max = 30
integration_step = 0.1
n_iterations = 8
# gs = [asymptomatic, symptomatic]
p_gs, beta0_gs = make_scenario_parameters_for_asymptomatic_symptomatic_model()
ssnoapp = 0.2
scnoapp = 0.2
DeltaATapp = 2
DeltaATnoapp = 4
# Varying parameters
ssapp_list = [0.2, 0.5, 0.8]
scapp_list = [0.5, 0.8]
xi_list = [0.7, 0.9]
papp_list = [0.2, 0.5, 0.7, 0.9]
for ssapp in ssapp_list:
for scapp in scapp_list:
for xi in xi_list:
for papp in papp_list:
scenario = Scenario(
p_gs=p_gs,
beta0_gs=beta0_gs,
t_0=0,
ssapp=[0, ssapp],
ssnoapp=[0, ssnoapp],
scapp=scapp,
scnoapp=scnoapp,
xi=xi,
papp=lambda tau: papp,
p_DeltaATapp=DeltaMeasure(position=DeltaATapp),
p_DeltaATnoapp=DeltaMeasure(position=DeltaATnoapp),
)
step_data_list = compute_time_evolution(
scenario=scenario,
real_range=RealRange(0, tau_max, integration_step),
n_iterations=n_iterations,
verbose=False,
)
Rinfty = step_data_list[-1].R
Effinfty = effectiveness_from_R(Rinfty)
print(
f" {ssapp} & {scapp} & {xi} & {papp} & {round2(Rinfty)} & {round2(Effinfty)} \\\ "
)
| StarcoderdataPython |
8955 | <gh_stars>0
try:
from tango import DeviceProxy, DevError
except ModuleNotFoundError:
pass
class PathFixer(object):
"""
Basic pathfixer which takes a path manually.
"""
def __init__(self):
self.directory = None
class SdmPathFixer(object):
"""
MAX IV pathfixer which takes a path from a Tango device.
"""
def __init__(self, sdm_device):
self.device = DeviceProxy(sdm_device)
self.TRIALS = 10
self.cache = None
@property
def directory(self):
for trial in range(self.TRIALS):
try:
val = self.device.SamplePath
self.cache = val
return val
except DevError:
print('Failed in getting SDM path from Tango. Trying again...')
print('Failed %u times, using cached value: %s'
% (self.TRIALS, self.cache))
return self.cache
| StarcoderdataPython |
1679347 | <gh_stars>0
# -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
import unittest
import torch
# Package import
import pynet
class TestModels(unittest.TestCase):
""" Test the models defined in pynet.
"""
def setUp(self):
""" Setup test.
"""
self.networks = pynet.get_tools()["networks"]
self.x2 = torch.randn(1, 1, 127, 128)
self.x3 = torch.randn(1, 1, 64, 64, 64)
def tearDown(self):
""" Run after each test.
"""
pass
def test_unet(self):
""" Test the UNet.
"""
params = {
"num_classes": 2,
"in_channels": 1,
"depth": 3,
"start_filts": 16,
"up_mode": "upsample",
"merge_mode": "concat",
"batchnorm": False,
"input_shape": self.x2.shape[2:]
}
net = self.networks["UNet"](dim="2d", **params)
y = net(self.x2)
def test_nvnet(self):
""" Test the NvNet.
"""
params = {
"input_shape": self.x3.shape[2:],
"in_channels": 1,
"num_classes": 2,
"activation": "relu",
"normalization": "group_normalization",
"mode": "trilinear",
"with_vae": False
}
net = self.networks["NvNet"](**params)
y = net(self.x3)
def test_vtnet(self):
""" Test the VTNet.
"""
params = {
"input_shape": self.x3.shape[2:],
"in_channels": 2,
"kernel_size": 3,
"padding": 1,
"flow_multiplier": 1,
"nb_channels": 16
}
net = self.networks["VTNet"](**params)
y = net(torch.cat((self.x3, self.x3), dim=1))
def test_addnet(self):
""" Test the ADDNet.
"""
params = {
"input_shape": self.x3.shape[2:],
"in_channels": 2,
"kernel_size": 3,
"padding": 1,
"flow_multiplier": 1
}
net = self.networks["ADDNet"](**params)
y = net(torch.cat((self.x3, self.x3), dim=1))
def test_voxelmorphnet(self):
""" Test the VoxelMorphNet.
"""
params = {
"vol_size": self.x3.shape[2:],
"enc_nf": [16, 32, 32, 32],
"dec_nf": [32, 32, 32, 32, 32, 16, 16],
"full_size": True
}
net = self.networks["VoxelMorphNet"](**params)
y = net(torch.cat((self.x3, self.x3), dim=1))
def test_rcnet(self):
""" Test the RCNet.
"""
params = {
"input_shape": self.x3.shape[2:],
"in_channels": 1,
"base_network": "VTNet",
"n_cascades": 1,
"rep": 1
}
net = self.networks["RCNet"](**params)
y = net(self.x3)
def test_brainnetcnn(self):
""" Test the BrainNetCNN.
"""
params = {
"input_shape": self.x3.shape[2: -1],
"in_channels": 1,
"num_classes": 2,
"nb_e2e": 32,
"nb_e2n": 64,
"nb_n2g": 30,
"dropout": 0.5,
"leaky_alpha": 0.33,
"twice_e2e": False,
"dense_sml": True
}
net = self.networks["BrainNetCNN"](**params)
y = net(self.x3[..., 0])
def test_pspnet(self):
""" Test the PSPNet.
"""
params = {
"n_classes": 2,
"sizes": (1, 2, 3, 6),
"psp_size": 512,
"deep_features_size": 256,
"backend": "resnet18",
"drop_rate": 0
}
net = self.networks["PSPNet"](**params)
y = net(self.x3[..., 0])
def test_deeplabnet(self):
""" Test the DeepLabNet.
"""
params = {
"n_classes": 2,
"drop_rate": 0
}
net = self.networks["DeepLabNet"](**params)
y = net(self.x3[..., 0])
if __name__ == "__main__":
from pynet.utils import setup_logging
setup_logging(level="debug")
unittest.main()
| StarcoderdataPython |
1687093 | <gh_stars>0
"""
General utilities.
MIT license.
Copyright (c) 2017 <NAME> <<EMAIL>>
"""
from markdown.inlinepatterns import InlineProcessor
import xml.etree.ElementTree as etree
from collections import namedtuple
import sys
import copy
import re
import html
from urllib.request import pathname2url, url2pathname
from urllib.parse import urlparse
RE_WIN_DRIVE_LETTER = re.compile(r"^[A-Za-z]$")
RE_WIN_DRIVE_PATH = re.compile(r"^[A-Za-z]:(?:\\.*)?$")
RE_URL = re.compile('(http|ftp)s?|data|mailto|tel|news')
RE_WIN_DEFAULT_PROTOCOL = re.compile(r"^///[A-Za-z]:(?:/.*)?$")
if sys.platform.startswith('win'):
_PLATFORM = "windows"
elif sys.platform == "darwin": # pragma: no cover
_PLATFORM = "osx"
else:
_PLATFORM = "linux"
def is_win(): # pragma: no cover
"""Is Windows."""
return _PLATFORM == "windows"
def is_linux(): # pragma: no cover
"""Is Linux."""
return _PLATFORM == "linux"
def is_mac(): # pragma: no cover
"""Is macOS."""
return _PLATFORM == "osx"
def url2path(path):
"""Path to URL."""
return url2pathname(path)
def path2url(url):
"""URL to path."""
path = pathname2url(url)
# If on windows, replace the notation to use a default protocol `///` with nothing.
if is_win() and RE_WIN_DEFAULT_PROTOCOL.match(path):
path = path.replace('///', '', 1)
return path
def get_code_points(s):
"""Get the Unicode code points."""
return [c for c in s]
def get_ord(c):
"""Get Unicode ord."""
return ord(c)
def get_char(value):
"""Get the Unicode char."""
return chr(value)
def escape_chars(md, echrs):
"""
Add chars to the escape list.
Don't just append as it modifies the global list permanently.
Make a copy and extend **that** copy so that only this Markdown
instance gets modified.
"""
escaped = copy.copy(md.ESCAPED_CHARS)
for ec in echrs:
if ec not in escaped:
escaped.append(ec)
md.ESCAPED_CHARS = escaped
def parse_url(url):
"""
Parse the URL.
Try to determine if the following is a file path or
(as we will call anything else) a URL.
We return it slightly modified and combine the path parts.
We also assume if we see something like c:/ it is a Windows path.
We don't bother checking if this **is** a Windows system, but
'nix users really shouldn't be creating weird names like c: for their folder.
"""
is_url = False
is_absolute = False
scheme, netloc, path, params, query, fragment = urlparse(html.unescape(url))
if RE_URL.match(scheme):
# Clearly a URL
is_url = True
elif scheme == '' and netloc == '' and path == '':
# Maybe just a URL fragment
is_url = True
elif scheme == 'file' and (RE_WIN_DRIVE_PATH.match(netloc)):
# file://c:/path or file://c:\path
path = '/' + (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file' and netloc.startswith('\\'):
# file://\c:\path or file://\\path
path = (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file':
# file:///path
is_absolute = True
elif RE_WIN_DRIVE_LETTER.match(scheme):
# c:/path
path = '/%s:%s' % (scheme, path.replace('\\', '/'))
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme == '' and netloc != '' and url.startswith('//'):
# //file/path
path = '//' + netloc + path
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme != '' and netloc != '':
# A non-file path or strange URL
is_url = True
elif path.startswith(('/', '\\')):
# /root path
is_absolute = True
return (scheme, netloc, path, params, query, fragment, is_url, is_absolute)
class PatSeqItem(namedtuple('PatSeqItem', ['pattern', 'builder', 'tags'])):
"""Pattern sequence item item."""
class PatternSequenceProcessor(InlineProcessor):
"""Processor for handling complex nested patterns such as strong and em matches."""
PATTERNS = []
def build_single(self, m, tag, idx):
"""Return single tag."""
el1 = etree.Element(tag)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
return el1
def build_double(self, m, tags, idx):
"""Return double tag."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el2, None, idx)
el1.append(el2)
if len(m.groups()) == 3:
text = m.group(3)
self.parse_sub_patterns(text, el1, el2, idx)
return el1
def build_double2(self, m, tags, idx):
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
text = m.group(3)
el1.append(el2)
self.parse_sub_patterns(text, el2, None, idx)
return el1
def parse_sub_patterns(self, data, parent, last, idx):
"""
Parses sub patterns.
`data` (`str`):
text to evaluate.
`parent` (`etree.Element`):
Parent to attach text and sub elements to.
`last` (`etree.Element`):
Last appended child to parent. Can also be None if parent has no children.
`idx` (`int`):
Current pattern index that was used to evaluate the parent.
"""
offset = 0
pos = 0
length = len(data)
while pos < length:
# Find the start of potential emphasis or strong tokens
if self.compiled_re.match(data, pos):
matched = False
# See if the we can match an emphasis/strong pattern
for index, item in enumerate(self.PATTERNS):
# Only evaluate patterns that are after what was used on the parent
if index <= idx:
continue
m = item.pattern.match(data, pos)
if m:
# Append child nodes to parent
# Text nodes should be appended to the last
# child if present, and if not, it should
# be added as the parent's text node.
text = data[offset:m.start(0)]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
el = self.build_element(m, item.builder, item.tags, index)
parent.append(el)
last = el
# Move our position past the matched hunk
offset = pos = m.end(0)
matched = True
if not matched:
# We matched nothing, move on to the next character
pos += 1
else:
# Increment position as no potential emphasis start was found.
pos += 1
# Append any leftover text as a text node.
text = data[offset:]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
def build_element(self, m, builder, tags, index):
"""Element builder."""
if builder == 'double2':
return self.build_double2(m, tags, index)
elif builder == 'double':
return self.build_double(m, tags, index)
else:
return self.build_single(m, tags, index)
def handleMatch(self, m, data):
"""Parse patterns."""
el = None
start = None
end = None
for index, item in enumerate(self.PATTERNS):
m1 = item.pattern.match(data, m.start(0))
if m1:
start = m1.start(0)
end = m1.end(0)
el = self.build_element(m1, item.builder, item.tags, index)
break
return el, start, end
class PymdownxDeprecationWarning(UserWarning): # pragma: no cover
"""Deprecation warning for Pymdownx that is not hidden."""
| StarcoderdataPython |
1607924 | <reponame>int-brain-lab/iblscripts
import numpy as np
import random
import shutil
from ibllib.ephys.np2_converter import NP2Converter
from ibllib.io import spikeglx
from ci.tests import base
class TestNeuropixel2ConverterNP24(base.IntegrationTest):
"""
Check NP2 converter with NP2.4 type probes
"""
def setUp(self) -> None:
self.file_path = self.data_path.joinpath('ephys', 'ephys_np2', 'raw_ephys_data', 'probe00',
'_spikeglx_ephysData_g0_t0.imec0.ap.bin')
meta_file = self.file_path.parent.joinpath('NP24_meta',
'_spikeglx_ephysData_g0_t0.imec0.ap.meta')
self.meta_file = self.file_path.parent.joinpath('_spikeglx_ephysData_g0_t0.imec0.ap.meta')
shutil.copy(meta_file, self.meta_file)
self.sglx_instances = []
self.temp_directories = []
def tearDown(self):
_ = [sglx.close() for sglx in self.sglx_instances]
# here should look for any directories with test in it and delete
test_dir = list(self.file_path.parent.parent.glob('*test*'))
_ = [shutil.rmtree(test) for test in test_dir]
self.meta_file.unlink()
def testDecimate(self):
"""
Check integrity of windowing and downsampling by comparing results when using different
window lengths for iterating through data
:return:
"""
FS = 30000
np_a = NP2Converter(self.file_path, post_check=False)
np_a.init_params(nwindow=0.5 * FS, extra='_0_5s_test', nshank=[0])
np_a.process()
np_b = NP2Converter(self.file_path, post_check=False)
np_b.init_params(nwindow=1 * FS, extra='_1s_test', nshank=[0])
np_b.process()
np_c = NP2Converter(self.file_path, post_check=False)
np_c.init_params(nwindow=3 * FS, extra='_2s_test', nshank=[0])
np_c.process()
sr = spikeglx.Reader(self.file_path)
self.sglx_instances.append(sr)
sr_a_ap = spikeglx.Reader(np_a.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_a_ap)
sr_b_ap = spikeglx.Reader(np_b.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_b_ap)
sr_c_ap = spikeglx.Reader(np_c.shank_info['shank0']['ap_file'])
self.sglx_instances.append(sr_c_ap)
# Make sure all the aps are the same regardless of window size we used
assert np.array_equal(sr_a_ap[:, :], sr_b_ap[:, :])
assert np.array_equal(sr_a_ap[:, :], sr_c_ap[:, :])
assert np.array_equal(sr_b_ap[:, :], sr_c_ap[:, :])
# For AP also check that all values are the same as the original file
assert np.array_equal(sr_a_ap[:, :], sr[:, np_a.shank_info['shank0']['chns']])
assert np.array_equal(sr_b_ap[:, :], sr[:, np_b.shank_info['shank0']['chns']])
assert np.array_equal(sr_c_ap[:, :], sr[:, np_c.shank_info['shank0']['chns']])
sr_a_lf = spikeglx.Reader(np_a.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_a_lf)
sr_b_lf = spikeglx.Reader(np_b.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_b_lf)
sr_c_lf = spikeglx.Reader(np_c.shank_info['shank0']['lf_file'])
self.sglx_instances.append(sr_c_lf)
# Make sure all the lfps are the same regardless of window size we used
assert np.array_equal(sr_a_lf[:, :], sr_b_lf[:, :])
assert np.array_equal(sr_a_lf[:, :], sr_c_lf[:, :])
assert np.array_equal(sr_b_lf[:, :], sr_c_lf[:, :])
def testProcessNP24(self):
"""
Check normal workflow of splittig data into individual shanks
:return:
"""
# Make sure normal workflow runs without problems
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process()
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# Test a random ap metadata file and make sure it all makes sense
shank_n = random.randint(0, 3)
sr_ap = spikeglx.Reader(np_conv.shank_info[f'shank{shank_n}']['ap_file'])
assert np.array_equal(sr_ap.meta['acqApLfSy'], [96, 0, 1])
assert np.array_equal(sr_ap.meta['snsApLfSy'], [96, 0, 1])
assert np.equal(sr_ap.meta['nSavedChans'], 97)
assert (sr_ap.meta['snsSaveChanSubset'] == '0:96')
assert np.equal(sr_ap.meta['NP2.4_shank'], shank_n)
assert (sr_ap.meta['original_meta'] == 'False')
sr_ap.close()
# Test a random lf metadata file and make sure it all makes sense
shank_n = random.randint(0, 3)
sr_lf = spikeglx.Reader(np_conv.shank_info[f'shank{shank_n}']['lf_file'])
assert np.array_equal(sr_lf.meta['acqApLfSy'], [0, 96, 1])
assert np.array_equal(sr_lf.meta['snsApLfSy'], [0, 96, 1])
assert np.equal(sr_lf.meta['nSavedChans'], 97)
assert (sr_lf.meta['snsSaveChanSubset'] == '0:96')
assert np.equal(sr_lf.meta['NP2.4_shank'], shank_n)
assert (sr_lf.meta['original_meta'] == 'False')
assert np.equal(sr_lf.meta['imSampRate'], 2500)
sr_lf.close()
# Rerun again and make sure that nothing happens because folders already exists
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process()
self.assertTrue(np_conv.already_exists)
self.assertFalse(status)
# But if we set the overwrite flag to True we force rerunning
np_conv = NP2Converter(self.file_path)
np_conv.init_params(extra='_test')
status = np_conv.process(overwrite=True)
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# Change some of the data and make sure the checking function is working as expected
shank_n = random.randint(0, 3)
ap_file = np_conv.shank_info[f'shank{shank_n}']['ap_file']
with open(ap_file, "r+b") as f:
f.write((chr(10) + chr(20) + chr(30) + chr(40)).encode())
# Now that we have changed the file we expect an assertion error when we do the check
with self.assertRaises(AssertionError) as context:
np_conv.check_NP24()
self.assertTrue('data in original file and split files do no match'
in str(context.exception))
# Finally test that we cannot process a file that has already been split
np_conv = NP2Converter(ap_file)
status = np_conv.process()
self.assertTrue(np_conv.already_processed)
self.assertFalse(status)
class TestNeuropixel2ConverterNP21(base.IntegrationTest):
"""
Check NP2 converter with NP2.1 type probes
"""
def setUp(self) -> None:
self.file_path = self.data_path.joinpath('ephys', 'ephys_np2', 'raw_ephys_data', 'probe00',
'_spikeglx_ephysData_g0_t0.imec0.ap.bin')
meta_file = self.file_path.parent.joinpath('NP21_meta',
'_spikeglx_ephysData_g0_t0.imec0.ap.meta')
self.meta_file = self.file_path.parent.joinpath('_spikeglx_ephysData_g0_t0.imec0.ap.meta')
shutil.copy(meta_file, self.meta_file)
self.sglx_instances = []
def tearDown(self):
_ = [sglx.close() for sglx in self.sglx_instances]
# here should look for anything with test in it and delete
lf_file = self.file_path.parent.joinpath(self.file_path.name.replace('ap', 'lf'))
lf_meta = self.meta_file.parent.joinpath(self.meta_file.name.replace('ap', 'lf'))
lf_file.unlink()
lf_meta.unlink()
self.meta_file.unlink()
def testProcessNP21(self):
"""
Check normal workflow of getting LFP data out and storing in main probe folder
:return:
"""
# make sure it runs smoothly
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
# test the meta file
sr_ap = spikeglx.Reader(np_conv.shank_info[f'shank0']['lf_file'])
assert np.array_equal(sr_ap.meta['acqApLfSy'], [0, 384, 1])
assert np.array_equal(sr_ap.meta['snsApLfSy'], [0, 384, 1])
assert np.equal(sr_ap.meta['nSavedChans'], 385)
assert (sr_ap.meta['snsSaveChanSubset'] == '0:384')
assert np.equal(sr_ap.meta['NP2.1_shank'], 0)
assert (sr_ap.meta['original_meta'] == 'False')
sr_ap.close()
# now run again and make sure that it doesn't run
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertTrue(np_conv.already_exists)
self.assertFalse(status)
# Now try with the overwrite flag and make sure it runs
np_conv = NP2Converter(self.file_path)
status = np_conv.process(overwrite=True)
self.assertFalse(np_conv.already_exists)
self.assertTrue(status)
class TestNeuropixel2ConverterNP1(base.IntegrationTest):
"""
Check NP2 converter with NP1 type probes
"""
def setUp(self) -> None:
self.file_path = self.data_path.joinpath('ephys', 'ephys_np2', 'raw_ephys_data', 'probe00',
'_spikeglx_ephysData_g0_t0.imec0.ap.bin')
meta_file = self.file_path.parent.joinpath('NP1_meta',
'_spikeglx_ephysData_g0_t0.imec0.ap.meta')
self.meta_file = self.file_path.parent.joinpath('_spikeglx_ephysData_g0_t0.imec0.ap.meta')
shutil.copy(meta_file, self.meta_file)
self.sglx_instances = []
self.temp_directories = []
def tearDown(self):
# here should look for anything with test in it and delete
self.meta_file.unlink()
def testProcessNP1(self):
"""
Check normal workflow -> nothing should happen!
"""
np_conv = NP2Converter(self.file_path)
status = np_conv.process()
self.assertFalse(status)
if __name__ == "__main__":
import unittest
unittest.main(exit=False)
| StarcoderdataPython |
3374293 | import useeioapi.data as data
import useeioapi.calc as calc
from flask import Flask, jsonify, request, abort
app = Flask(__name__)
data_dir = 'data'
# no caching -> just for dev ...
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
r.headers['Pragma'] = 'no-cache'
r.headers['Expires'] = '0'
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route('/api/models')
def get_models():
infos = data.read_model_infos(data_dir)
return jsonify(infos)
@app.route('/api/<model>/demands')
def get_demands(model: str):
demands = data.read_demand_infos(data_dir, model)
return jsonify(demands)
@app.route('/api/<model_id>/demands/<demand_id>')
def get_demand(model_id: str, demand_id: str):
demand = data.read_demand(data_dir, model_id, demand_id)
if demand is None:
abort(404)
return jsonify(demand)
@app.route('/api/<model>/sectors')
def get_sectors(model: str):
sectors = data.read_sectors(data_dir, model)
return jsonify(sectors)
@app.route('/api/<model>/sectors/<path:sector_id>')
def get_sector(model: str, sector_id: str):
sectors = data.read_sectors(data_dir, model)
for s in sectors:
if s.get('id') == sector_id:
return jsonify(s)
abort(404)
@app.route('/api/<model>/flows')
def get_flows(model: str):
flows = data.read_flows(data_dir, model)
return jsonify(flows)
@app.route('/api/<model>/flows/<path:flow_id>')
def get_flow(model: str, flow_id: str):
flows = data.read_flows(data_dir, model)
for flow in flows:
if flow.get('id') == flow_id or flow.get('uuid') == flow_id:
return jsonify(flow)
abort(404)
@app.route('/api/<model>/indicators')
def get_indicators(model: str):
indicators = data.read_indicators(data_dir, model)
return jsonify(indicators)
@app.route('/api/<model>/indicators/<path:indicator_id>')
def get_indicator(model: str, indicator_id: str):
indicators = data.read_indicators(data_dir, model)
for indicator in indicators:
if indicator.get('id') == indicator_id:
return jsonify(indicator)
abort(404)
@app.route('/api/<model_id>/calculate', methods=['POST'])
def calculate(model_id: str):
# we set force true here, because otherwise `get_json`
# returns None when the request header
# `Content-Type: application/json` was not set
# see https://stackoverflow.com/a/20001283
demand = request.get_json(force=True)
result = calc.calculate(data_dir, model_id, demand)
return jsonify(result)
@app.route('/api/<model>/matrix/<name>')
def get_matrix(model: str, name: str):
if name in ('A', 'B', 'C', 'D', 'L', 'U'):
return __get_numeric_matrix(model, name)
elif name in ('B_dqi', 'D_dqi', 'U_dqi'):
return __get_dqi_matrix(model, name)
else:
abort(404)
def __get_numeric_matrix(model: str, name: str):
mat = data.read_matrix(data_dir, model, name)
if mat is None:
abort(404)
col = __get_index_param('col', mat.shape[1])
if col >= 0:
return jsonify(mat[:, col].tolist())
row = __get_index_param('row', mat.shape[0])
if row >= 0:
return jsonify(mat[row, :].tolist())
return jsonify(mat.tolist())
def __get_dqi_matrix(model: str, name: str):
mat = data.read_dqi_matrix(data_dir, model, name)
if mat is None:
abort(404)
if len(mat) == 0:
abort(404)
col = __get_index_param('col', len(mat[0]))
if col >= 0:
vals = [row[col] for row in mat]
return jsonify(vals)
row = __get_index_param('row', len(mat))
if row >= 0:
return jsonify(mat[row])
return jsonify(mat)
def __get_index_param(name: str, size: int) -> int:
val = request.args.get(name)
if val is None or len(val) == 0:
return -1
try:
idx = int(val)
if idx >= size:
abort(400)
return idx
except:
abort(400)
def serve(data_folder: str, port='5000'):
global data_dir, app
data_dir = data_folder
app.run('0.0.0.0', port)
| StarcoderdataPython |
4819305 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# This signature was contributed by RedSocks - http://redsocks.nl
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Dibik(Signature):
name = "rat_dibik"
description = "Creates known Dibik/Shark Backdoor files, registry keys and/or mutexes"
severity = 3
categories = ["rat"]
families = ["dibik"]
authors = ["RedSocks"]
minimum = "2.0"
mutexes_re = [
".*\\$OK",
".*\\$AZ",
".*\\$ZR",
]
files_re = [
"C:\\\\Windows\\\\(system32|syswow64)\\\\eeS.EXE",
]
def on_complete(self):
for indicator in self.mutexes_re:
match = self.check_mutex(pattern=indicator, regex=True)
if match:
self.mark_ioc("mutex", match)
for indicator in self.files_re:
match = self.check_file(pattern=indicator, regex=True)
if match:
self.mark_ioc("file", match)
return self.has_marks()
| StarcoderdataPython |
1633606 | from mp4box.utils.stream_reader import StreamReader
from mp4box.box_parser import BoxParser
# This class represents both an mp4 and m4s files
class ISOFile:
class Track:
def __init__(self, id, user, trak):
self.id = id
self.user = user
self.trak = trak
self.segmentStream = None
self.nb_samples = 1000
self.samples = []
class FragmentedTrack(Track):
def __init__(self, id, user, trak):
super().__init__(id, user, trak)
self.rap_alignment = True
class ExtractedTrack(Track):
def __init__(self, id, user, trak):
super().__init__(id, user, trak)
def __init__(self, file):
# self.stream = stream if stream else StreamReader()
self.boxes = []
self.mdats = []
self.moofs = []
self.is_progressive = False
self.moov_start_found = False
self.on_moov_start = None
self.on_ready = None
self.ready_sent = False
self.on_segment = None
self.on_samples = None
self.on_error = None
self.sample_list_built = False
self.fragmented_tracks = []
self.extracted_tracks = []
self.is_fragmentation_initialized = False
self.sample_process_started = False
self.next_moof_number = 0
self.item_list_built = False
self.on_sidx = None
self.sidx_sent = False
self.box_parser = BoxParser(file)
self.info = None
self.video_trak = None
self.audio_trak = None
def __entry__(self):
pass
def __exit__(self, type, val, tb):
pass
def parse(self):
self.box_parser.parse()
def get_all_info(self):
if self.info is None:
self.info = self.box_parser.get_all_info()
# TODO abhi - now, the thing is there can be multiple traks.
# However, at the moment, we deal only with A/V traks.
if self.info["tracks"][0].is_audio:
self.audio_trak = self.info["tracks"][0]
if len(self.info["tracks"]) > 1:
self.video_trak = self.info["tracks"][1]
else:
self.video_trak = self.info["tracks"][0]
if len(self.info["tracks"]) > 1:
self.audio_trak = self.info["tracks"][1]
return self.info
def get_video_nalu_gen(self):
# TODO abhi: sigh! perhaps, get_nalu_gen() can figure out the
# video trak? but it deals only with higher level boxes like
# ftyp, moov, mdat et. al. So for now, we have to call
# get_all_info() which inits self.video_trak
self.get_all_info()
return self.box_parser.get_nalu_gen(self.video_trak)
def frames(self, media_type):
return self.box_parser.get_frames(media_type)
| StarcoderdataPython |
4814694 | <reponame>walidpiano/Udacity_Learning<gh_stars>0
from flask import Flask
app = Flask(__name__)
@app.route("/index")
def home():
return "Hi there"
@app.route("/SayHello/<name>")
def say_hello(name):
return f"Hello {name}"
app.run() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.