id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3325615 | import json
import sys
import timeit
from collections import Counter
from dxtbx.model.experiment_list import ExperimentListFactory
from libtbx import easy_mp
from scitbx.array_family import flex
def build_hist(nproc=1):
# FIXME use proper optionparser here. This works for now
if len(sys.argv) >= 2 and sys.argv[1].startswith("nproc="):
nproc = int(sys.argv[1][6:])
sys.argv = sys.argv[1:]
experiments = ExperimentListFactory.from_args(sys.argv[1:])
if len(experiments) == 0:
experiments = ExperimentListFactory.from_filenames(sys.argv[1:])
for experiment in experiments:
imageset = experiment.imageset
limit = experiment.detector[0].get_trusted_range()[1]
n0, n1 = experiment.scan.get_image_range()
image_count = n1 - n0 + 1
binfactor = 5 # register up to 500% counts
histmax = (limit * binfactor) + 0.0
histbins = int(limit * binfactor) + 1
use_python_counter = histbins > 90000000 # empirically determined
print(
"Processing %d images in %d processes using %s\n"
% (
image_count,
nproc,
"python Counter" if use_python_counter else "flex arrays",
)
)
def process_image(process):
last_update = start = timeit.default_timer()
i = process
if use_python_counter:
local_hist = Counter()
else:
local_hist = flex.histogram(
flex.double(), data_min=0.0, data_max=histmax, n_slots=histbins
)
max_images = image_count // nproc
if process >= image_count % nproc:
max_images += 1
while i < image_count:
data = imageset.get_raw_data(i)[0]
if not use_python_counter:
data = flex.histogram(
data.as_double().as_1d(),
data_min=0.0,
data_max=histmax,
n_slots=histbins,
)
local_hist.update(data)
i = i + nproc
if process == 0:
if timeit.default_timer() > (last_update + 3):
last_update = timeit.default_timer()
if sys.stdout.isatty():
sys.stdout.write("\033[A")
print(
"Processed %d%% (%d seconds remain) "
% (
100 * i // image_count,
round((image_count - i) * (last_update - start) / (i + 1)),
)
)
return local_hist
results = easy_mp.parallel_map(
func=process_image,
iterable=range(nproc),
processes=nproc,
preserve_exception_message=True,
)
print("Merging results")
result_hist = None
for hist in results:
if result_hist is None:
result_hist = hist
else:
result_hist.update(hist)
if not use_python_counter:
# reformat histogram into dictionary
result = list(result_hist.slots())
result_hist = {b: count for b, count in enumerate(result) if count > 0}
results = {
"scale_factor": 1 / limit,
"overload_limit": limit,
"counts": result_hist,
}
print("Writing results to overload.json")
with open("overload.json", "w") as fh:
json.dump(results, fh, indent=1, sort_keys=True)
if __name__ == "__main__":
build_hist()
| StarcoderdataPython |
122598 | <gh_stars>1-10
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .utils import can_object_page_be_shown_to_pubilc
PUBLISH_STATUS_LIST = (
(-1, 'NEVER Available'),
(0, 'USE "Live as of Date"'),
(1, 'ALWAYS Available')
)
"""
There are two different types of models that use gatekeeping:
1. GatekeeperAbstractModel:
Models that have individual objects, where each one is handled separately.
e.g., the objects in an Article model may be live, might be waiting for their
live_as_of date to happen, might be in preparation, or might be turned off.
2. GatekeeperSerialAbstractModel:
Models where only ONE object is meant to be live at any time.
e.g., a Homepage model might have several "queued up" to go live at any given time.
For these models there is an extra field for default_live
which is used if the logic doesn't return any particular object.
"""
class GatekeeperAbstractModel(models.Model):
publish_status = models.IntegerField (
_('Publish Status'),
default = 0, null = False,
choices = PUBLISH_STATUS_LIST
)
###
### live_as_of starts out as NULL
### meaning "I am still being worked on" (if publish_status == 0)
### OR "I have deliberately been pushed live (if publish_status == 1)"
###
### if set, then after that date/time the object is "live".
###
### This allows content producers to 'set it and forget it'.
###
live_as_of = models.DateTimeField (
_('Live As Of'),
null = True, blank = True,
help_text = 'You can Set this to a future date/time to schedule availability.'
)
### This sets up the ability for gatekeeping hierarchies.
#parental_model_field = None
def __available_to_public(self):
"""
THIS IS ONLY TO BE USED IN TEMPLATES.
It RELIES on the gatekeeper - so using it in front of the gatekeeper is counter-productive.
(I made this mistake, thinking I was taking a shortcut. It cost me a day's work. Don't be like me.)
RAD 4 Oct 2018
"""
### This needs to be integrated if standalone object support is put back in the package.
#try:
# if self.treat_as_standalone == 0:
# return can_object_page_be_shown(None, self, including_parents = True)
#except:
# pass
return can_object_page_be_shown_to_pubilc(self)
available_to_public = property(__available_to_public)
class Meta:
abstract = True
class GatekeeperSerialAbstractModel(GatekeeperAbstractModel):
"""
This builds on the previous abstract model.
It added the "default_live" field to allow one object to be a "fall back" in case all the other objects fail to pass the gate.
"""
default_live = models.BooleanField (
_('Default as Live'), default = False,
help_text = "If everything else fails, then return this as the live instance"
)
class Meta:
abstract = True | StarcoderdataPython |
3366606 | import os
from crossasr.utils import make_dir
class ASR:
def __init__(self, name):
self.name = name
self.transcription = ""
def getName(self) :
return self.name
def setName(self, name:str):
self.name = name
def getTranscription(self):
return self.transcription
def setTranscription(self, transcription: str):
self.transcription = transcription
def recognizeAudio(self, audio_fpath: str) -> str:
# abstract function need to be implemented by the child class
raise NotImplementedError()
def saveTranscription(self, transcription_dir: str, filename: str):
transcription_dir = os.path.join(transcription_dir, self.getName())
make_dir(transcription_dir)
transcription_path = os.path.join(transcription_dir, filename + ".txt")
with open(transcription_path, "w+") as f :
f.write(self.getTranscription())
def loadTranscription(self, transcription_dir: str, filename: str):
transcription_dir = os.path.join(transcription_dir, self.getName())
transcription_path = os.path.join(transcription_dir, filename + ".txt")
f = open(transcription_path, "r")
lines = f.readlines()
if len(lines) == 0 : return ""
transcription = lines[0]
f.close()
return transcription | StarcoderdataPython |
4825386 | def update_from_ref(self, LUT_ref):
"""Update the equivalent circuit according to the LUT
Parameters
----------
self : EEC_SCIM
an EEC_SCIM object
LUT_ref : LUTslip
reference LUTslip object
"""
# Update skin effect to OP/T
self.comp_skin_effect()
self.comp_K21()
eec_ref = LUT_ref.get_eec()
Tsta_ref, Trot_ref = eec_ref.Tsta, eec_ref.Trot
Xkr_skinS_ref, Xke_skinS_ref = eec_ref.Xkr_skinS, eec_ref.Xke_skinS
Xkr_skinR_ref, Xke_skinR_ref = eec_ref.Xkr_skinR, eec_ref.Xke_skinS
# Compute stator winding resistance
if eec_ref.R1 is None:
self.comp_R1()
else:
self.comp_R1(R1_ref=eec_ref.R1 / Xkr_skinS_ref, T_ref=Tsta_ref)
# Compute stator winding inductance
if eec_ref.L1 is None:
self.comp_L1()
else:
self.comp_L1(L1_ref=eec_ref.L1 / Xke_skinS_ref)
# Iron loss resistance
if self.Rfe is None:
self.Rfe = 1e12 # TODO calculate (or estimate at least)
# Compute rotor winding resistance
if eec_ref.R2 is None:
self.comp_R2()
else:
self.comp_R2(R2_ref=eec_ref.R2 / Xkr_skinR_ref, T_ref=Trot_ref)
# Compute rotor winding inductance
if eec_ref.L2 is None:
self.comp_L2()
else:
self.comp_L2(L2_ref=eec_ref.L2 / Xke_skinR_ref)
# Get Im/Lm tables from LUT
if eec_ref.Lm_table is None or eec_ref.Im_table is None:
raise Exception("LUT must contains Lm/Im tables to solve EEC_SCIM")
self.Lm_table = eec_ref.Lm_table / Xke_skinS_ref * self.Xke_skinS
self.Im_table = eec_ref.Im_table
| StarcoderdataPython |
3212910 | <filename>Clustering/K-means.py
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
import random
import os
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
np.random.seed(0);
X, y = make_blobs(n_samples=5000, centers=[[4,4], [-2, -1], [2, -3], [1, 1]], cluster_std=0.9)
plt.scatter(X[:, 0], X[:, 1], marker='.')
k_means = KMeans(init = "k-means++", n_clusters = 4, n_init = 12)
k_means.fit(X)
k_means_labels = k_means.labels_
print(k_means_labels)
k_means_cluster_centers = k_means.cluster_centers_
print(k_means_cluster_centers)
# Initialize the plot with the specified dimensions.
fig = plt.figure(figsize=(6, 4))
# Colors uses a color map, which will produce an array of colors based on
# the number of labels there are. We use set(k_means_labels) to get the
# unique labels.
colors = plt.cm.Spectral(np.linspace(0, 1, len(set(k_means_labels))))
# Create a plot
ax = fig.add_subplot(1, 1, 1)
# For loop that plots the data points and centroids.
# k will range from 0-3, which will match the possible clusters that each
# data point is in.
for k, col in zip(range(len([[4,4], [-2, -1], [2, -3], [1, 1]])), colors):
# Create a list of all data points, where the data poitns that are
# in the cluster (ex. cluster 0) are labeled as true, else they are
# labeled as false.
my_members = (k_means_labels == k)
# Define the centroid, or cluster center.
cluster_center = k_means_cluster_centers[k]
# Plots the datapoints with color col.
ax.plot(X[my_members, 0], X[my_members, 1], 'w', markerfacecolor=col, marker='.')
# Plots the centroids with specified color, but with a darker outline
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6)
# Title of the plot
ax.set_title('KMeans')
# Remove x-axis ticks
ax.set_xticks(())
# Remove y-axis ticks
ax.set_yticks(())
# Show the plot
plt.show()
DATASET_PATH = os.path.join("../","datasets")
csv_file_path = os.path.join(DATASET_PATH,"Cust_Segmentation.csv")
cust_df = pd.read_csv(csv_file_path)
print(cust_df.head())
df = cust_df.drop('Address', axis=1)
print(df.head())
from sklearn.preprocessing import StandardScaler
X = df.values[:,1:]
X = np.nan_to_num(X)
Clus_dataSet = StandardScaler().fit_transform(X)
Clus_dataSet
clusterNum = 3
k_means = KMeans(init = "k-means++", n_clusters = clusterNum, n_init = 12)
k_means.fit(X)
labels = k_means.labels_
print(labels)
df["Clus_km"] = labels
print(df.head(5))
df.groupby('Clus_km').mean()
area = np.pi * ( X[:, 1])**2
plt.scatter(X[:, 0], X[:, 3], s=area, c=labels.astype(np.float), alpha=0.5)
plt.xlabel('Age', fontsize=18)
plt.ylabel('Income', fontsize=16)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1, figsize=(8, 6))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
# plt.ylabel('Age', fontsize=18)
# plt.xlabel('Income', fontsize=16)
# plt.zlabel('Education', fontsize=16)
ax.set_xlabel('Education')
ax.set_ylabel('Age')
ax.set_zlabel('Income')
ax.scatter(X[:, 1], X[:, 0], X[:, 3], c= labels.astype(np.float))
plt.show()
| StarcoderdataPython |
1755679 | import logging
from spacel.model.aws import VALID_REGIONS, INSTANCE_TYPES
logger = logging.getLogger('spacel.model.orbit')
NAT_CONFIGURATIONS = {
'disabled', # No NAT, don't even try
'enabled', # Single NAT gateway for every AZ (default)
'per-az' # NAT gateway per AZ (suggested for production)
}
class Orbit(object):
def __init__(self, name=None, regions=(), **kwargs):
self.name = name
self.regions = {}
for region in regions:
if region in VALID_REGIONS:
self.regions[region] = OrbitRegion(self, region, **kwargs)
continue
logger.warning(
'Orbit "%s" has invalid region "%s". Valid regions: %s',
name, region, ', '.join(VALID_REGIONS))
@property
def valid(self):
if not self.name or not self.regions:
return False
for orbit_region in self.regions.values():
if not orbit_region.valid:
return False
return True
class OrbitRegion(object):
def __init__(self,
orbit,
region,
bastion_instance_count=1,
bastion_instance_type='t2.nano',
bastion_sources=('0.0.0.0/0',),
deploy_stack=None,
domain=None,
parent_stack=None,
nat='enabled',
private_network='192.168',
provider='spacel'):
self.orbit = orbit
self.region = region
self.provider = provider
# provider: spacel:
self.bastion_instance_count = bastion_instance_count
self.bastion_instance_type = bastion_instance_type
self.bastion_sources = bastion_sources
self.domain = domain
self.nat = nat
self.private_network = private_network
# provider: GDH:
self.deploy_stack = deploy_stack
self.parent_stack = parent_stack
# Queried from EC2:
self._azs = {}
# Output from VPC:
self.vpc_id = None
self.nat_eips = []
self.private_cache_subnet_group = None
self.private_rds_subnet_group = None
self.public_rds_subnet_group = None
self.spot_fleet_role = None
self.bastion_eips = []
self.bastion_sg = None
@property
def azs(self):
return self._azs
@property
def az_keys(self):
return sorted(self._azs.keys())
@az_keys.setter
def az_keys(self, value):
self._azs = {az: OrbitRegionAz() for az in value}
@property
def private_elb_subnets(self):
return [self._azs[az_key].private_elb_subnet
for az_key in self.az_keys]
@property
def private_instance_subnets(self):
return [self._azs[az_key].private_instance_subnet
for az_key in self.az_keys]
@property
def public_elb_subnets(self):
return [self._azs[az_key].public_elb_subnet
for az_key in self.az_keys]
@property
def public_instance_subnets(self):
return [self._azs[az_key].public_instance_subnet
for az_key in self.az_keys]
@property
def private_nat_gateway(self):
return self.nat != 'disabled'
@property
def nat_per_az(self):
return self.nat == 'per-az'
@property
def valid(self):
name = (self.orbit and self.orbit.name) or '(no name)'
valid = True
if self.provider == 'spacel':
valid = valid and self._valid_spacel(name)
elif self.provider == 'gdh':
valid = valid and self._valid_gdh(name)
else:
logger.error('App "%s" has invalid "provider": %s', name,
self.provider)
valid = False
return valid
def _valid_spacel(self, name):
valid = True
if self.bastion_instance_type not in INSTANCE_TYPES:
logger.error('App "%s" has invalid "bastion_instance_type": %s',
name, self.provider)
valid = False
if self.nat not in NAT_CONFIGURATIONS:
logger.error('App "%s" has invalid "nat": %s',
name, self.provider)
valid = False
return valid
def _valid_gdh(self, name):
valid = True
if not self.deploy_stack:
logger.error('App "%s" is missing "deploy_stack".', name)
valid = False
if not self.parent_stack:
logger.error('App "%s" is missing "parent_stack".', name)
valid = False
return valid
class OrbitRegionAz(object):
def __init__(self,
private_elb_subnet=None,
private_instance_subnet=None,
public_elb_subnet=None,
public_instance_subnet=None):
self.private_elb_subnet = private_elb_subnet
self.private_instance_subnet = private_instance_subnet
self.public_elb_subnet = public_elb_subnet
self.public_instance_subnet = public_instance_subnet
| StarcoderdataPython |
190414 | #! /usr/bin/python
from distutils.dir_util import copy_tree
from pathlib import Path
import os
from .common import order_version, log_normal, log_info, mpk_lang
lang_map = {
"en": 2,
"jp": 1,
}
def main(original_assets_path, cached_static_path):
log_normal("Copy cached static data from majsoul-plus...")
asset_path = Path(cached_static_path) / str(lang_map[mpk_lang])
subdirs = sorted(next(os.walk(asset_path))[1], key=order_version)
Path(original_assets_path).mkdir(parents=True, exist_ok=True)
for subdir in subdirs:
subdir_path = asset_path / subdir
log_info(f"Merge {subdir_path}")
copy_tree(str(subdir_path), original_assets_path)
log_info("Copy complete")
if __name__ == "__main__":
main(str(Path("./assets-original")), str(Path("../../static")))
| StarcoderdataPython |
3366374 | <gh_stars>10-100
from __future__ import absolute_import
from collections import defaultdict
from datetime import datetime
from pytz import UTC
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_noop
from django_countries.fields import CountryField
from course_modes.models import CourseMode
from openedx.core.djangoapps.content.course_overviews.models import (
CourseOverview,
)
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from six.moves import range
class UserProfile(models.Model):
'''
The production model is student.models.UserProfile
'''
user = models.OneToOneField(User, unique=True, db_index=True, related_name='profile')
name = models.CharField(blank=True, max_length=255, db_index=True)
country = CountryField(blank=True, null=True)
# Optional demographic data we started capturing from Fall 2012
this_year = datetime.now(UTC).year
VALID_YEARS = list(range(this_year, this_year - 120, -1))
year_of_birth = models.IntegerField(blank=True, null=True, db_index=True)
GENDER_CHOICES = (
('m', ugettext_noop('Male')),
('f', ugettext_noop('Female')),
# Translators: 'Other' refers to the student's gender
('o', ugettext_noop('Other/Prefer Not to Say'))
)
gender = models.CharField(
blank=True, null=True, max_length=6, db_index=True, choices=GENDER_CHOICES
)
# [03/21/2013] removed these, but leaving comment since there'll still be
# p_se and p_oth in the existing data in db.
# ('p_se', 'Doctorate in science or engineering'),
# ('p_oth', 'Doctorate in another field'),
LEVEL_OF_EDUCATION_CHOICES = (
('p', ugettext_noop('Doctorate')),
('m', ugettext_noop("Master's or professional degree")),
('b', ugettext_noop("Bachelor's degree")),
('a', ugettext_noop("Associate degree")),
('hs', ugettext_noop("Secondary/high school")),
('jhs', ugettext_noop("Junior secondary/junior high/middle school")),
('el', ugettext_noop("Elementary/primary school")),
# Translators: 'None' refers to the student's level of education
('none', ugettext_noop("No formal education")),
# Translators: 'Other' refers to the student's level of education
('other', ugettext_noop("Other education"))
)
level_of_education = models.CharField(
blank=True, null=True, max_length=6, db_index=True,
choices=LEVEL_OF_EDUCATION_CHOICES
)
mailing_address = models.TextField(blank=True, null=True)
city = models.TextField(blank=True, null=True)
country = CountryField(blank=True, null=True)
goals = models.TextField(blank=True, null=True)
allow_certificate = models.BooleanField(default=1)
bio = models.CharField(blank=True, null=True, max_length=3000, db_index=False)
profile_image_uploaded_at = models.DateTimeField(null=True, blank=True)
@property
def has_profile_image(self):
"""
Convenience method that returns a boolean indicating whether or not
this user has uploaded a profile image.
"""
return self.profile_image_uploaded_at is not None
class CourseEnrollmentManager(models.Manager):
def enrollment_counts(self, course_id):
query = super(CourseEnrollmentManager, self).get_queryset().filter(
course_id=course_id, is_active=True).values(
'mode').order_by().annotate(models.Count('mode'))
total = 0
enroll_dict = defaultdict(int)
for item in query:
enroll_dict[item['mode']] = item['mode__count']
total += item['mode__count']
enroll_dict['total'] = total
return enroll_dict
class CourseEnrollment(models.Model):
'''
The production model is student.models.CourseEnrollment
The purpose of this mock is to provide the model needed to
retrieve:
* The learners enrolled in a course
* When a learner enrolled
* If the learner is active
'''
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
created = models.DateTimeField(null=True)
# If is_active is False, then the student is not considered to be enrolled
# in the course (is_enrolled() will return False)
is_active = models.BooleanField(default=True)
mode = models.CharField(default=CourseMode.DEFAULT_MODE_SLUG, max_length=100)
objects = CourseEnrollmentManager()
class Meta(object):
unique_together = (('user', 'course_id'),)
ordering = ('user', 'course_id')
def __init__(self, *args, **kwargs):
super(CourseEnrollment, self).__init__(*args, **kwargs)
# Private variable for storing course_overview to minimize calls to the database.
# When the property .course_overview is accessed for the first time, this variable will be set.
self._course_overview = None
@property
def course_overview(self):
if not self._course_overview:
try:
self._course_overview = CourseOverview.get_from_id(self.course_id)
except (CourseOverview.DoesNotExist, IOError):
self._course_overview = None
return self._course_overview
class CourseAccessRole(models.Model):
user = models.ForeignKey(User)
# blank org is for global group based roles such as course creator (may be deprecated)
org = models.CharField(max_length=64, db_index=True, blank=True)
# blank course_id implies org wide role
course_id = CourseKeyField(max_length=255, db_index=True, blank=True)
role = models.CharField(max_length=64, db_index=True)
class Meta(object):
unique_together = ('user', 'org', 'course_id', 'role')
@property
def _key(self):
"""
convenience function to make eq overrides easier and clearer. arbitrary decision
that role is primary, followed by org, course, and then user
"""
return (self.role, self.org, self.course_id, self.user_id)
def __eq__(self, other):
"""
Overriding eq b/c the django impl relies on the primary key which requires fetch. sometimes we
just want to compare roles w/o doing another fetch.
"""
return type(self) == type(other) and self._key == other._key # pylint: disable=protected-access
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
"""
Lexigraphic sort
"""
return self._key < other._key # pylint: disable=protected-access
def __unicode__(self):
return "[CourseAccessRole] user: {} role: {} org: {} course: {}".format(self.user.username, self.role, self.org, self.course_id)
| StarcoderdataPython |
3374931 | """
@Filename: test.py.py
@Author: dulanj
@Time: 2021-08-23 16.12
"""
import torch
from data_loader import get_test_loader
from params import configs, DEVICE
from pretrained_models import initialize_model
def test():
# model = ClassificationModel(num_classes=configs["data"]["no_of_classes"]).to(DEVICE)
model_name = "efficientnet"
num_classes = 2
feature_extract = False
model, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=False)
model_file_name = configs["model"]["model_load_path"]
checkpoint = torch.load(model_file_name)
model.load_state_dict(checkpoint['model_state_dict'])
test_loader = get_test_loader()
# get accuracy
model.eval()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
images = images.to(DEVICE)
labels = labels.to(DEVICE)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
_, labels = torch.max(labels.data, 1)
print("Actual: ", labels.data.cpu().numpy())
print("Predicted: ", predicted.cpu().numpy())
total += labels.size(0)
correct += sum(predicted == labels)
print("Accuracy: ", 100 * correct / total)
print('Accuracy of the network on the {} test images: {}%'.format(total, 100 * correct / total))
# for x, y in test_loader:
#
# x = x.to(DEVICE)
# outputs = model(x)
#
# _, predicted = torch.max(outputs.data, 1)
# print(predicted)
# print(y)
# print("Image {} predicted as {}".format(y, predicted[0]))
# # tensor_image = torch.squeeze(x).to('cpu')
# # print(tensor_image.shape)
# # plt.imshow(tensor_image.permute(1, 2, 0))
# # plt.show()
# #
# # import sys
# # sys.exit()
if __name__ == '__main__':
test()
| StarcoderdataPython |
1700091 | <reponame>bellaz89/pyFEL
'''
Implementation of the Hammersley low discrepancy sequence
'''
import numba
from numba import int64, objmode
from numba.experimental import jitclass
import numpy as np
from scipy.special import erfcinv
spec = [
('base', int64),
('idx', int64)
]
@jitclass(spec)
class Hammersley(object):
'''
Hammersley low discrepancy sequence generator
'''
def __init__(self, prime_idx):
'''
Initialize the sequence with the nth prime number
'''
self.base = int64(PRIME_VECTOR[prime_idx])
self.idx = int64(0)
def set_idx(self, new_idx):
'''
Skip the sequence to 'new_idx'
'''
if new_idx < 0:
self.idx = 0
else:
self.idx = new_idx
def get_value(self):
'''
Get an element from the sequence
'''
xs = 0.0
xsi = 1.0
i1 = int64(0)
self.idx += 1
i2 = int64(self.idx)
while True:
xsi /= self.base
i1 = int64(i2/self.base)
xs += (i2-self.base*i1)*xsi
i2 = i1
if i2 <= 0:
return xs
def get_array(self, n):
'''
Get a numpy 1D array with length 'n'
'''
sequence = np.empty(n, dtype=np.float64)
for i in range(n):
sequence[i] = self.get_value()
return sequence
def get_normal_array(self, n):
'''
Get a normal distribuited 1D array with length 'n'
'''
x = self.get_array(n)*2.0
y = np.zeros_like(x)
# The JIT has to be disabled because erfcinv is not recognized
# by Numba
with objmode(y='double[:]'):
y = erfcinv(x)
return y
PRIME_VECTOR = np.array([
2, 3, 5, 7, 11, 13, 17, 19, 23, 29,
31, 37, 41, 43, 47, 53, 59, 61, 67, 71,
73, 79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163, 167, 173,
179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397, 401, 409,
419, 421, 431, 433, 439, 443, 449, 457, 461, 463,
467, 479, 487, 491, 499, 503, 509, 521, 523, 541,
547, 557, 563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647, 653, 659,
661, 673, 677, 683, 691, 701, 709, 719, 727, 733,
739, 743, 751, 757, 761, 769, 773, 787, 797, 809,
811, 821, 823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929, 937, 941,
947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013,
1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069,
1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151,
1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,
1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291,
1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373,
1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451,
1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511,
1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583,
1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657,
1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733,
1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811,
1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889,
1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987,
1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053,
2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129,
2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213,
2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287,
2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357,
2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423,
2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,
2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617,
2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687,
2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741,
2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819,
2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903,
2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999,
3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079,
3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181,
3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257,
3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331,
3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413,
3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511,
3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571,
3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643,
3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727,
3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821,
3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,
3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989,
4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057,
4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139,
4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231,
4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297,
4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409,
4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493,
4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583,
4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657,
4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751,
4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831,
4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937,
4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003,
5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087,
5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179,
5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279,
5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387,
5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443,
5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521,
5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639,
5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693,
5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791,
5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857,
5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939,
5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053,
6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133,
6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221,
6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301,
6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367,
6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473,
6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571,
6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673,
6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761,
6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,
6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917,
6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997,
7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103,
7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207,
7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297,
7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411,
7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499,
7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561,
7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643,
7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723,
7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829,
7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919,
7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017,
8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111,
8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219,
8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291,
8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387,
8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501,
8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597,
8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677,
8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741,
8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831,
8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929,
8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011,
9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109,
9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199,
9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283,
9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377,
9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439,
9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533,
9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631,
9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733,
9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811,
9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887,
9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973,10007,
10009,10037,10039,10061,10067,10069,10079,10091,10093,10099,
10103,10111,10133,10139,10141,10151,10159,10163,10169,10177,
10181,10193,10211,10223,10243,10247,10253,10259,10267,10271,
10273,10289,10301,10303,10313,10321,10331,10333,10337,10343,
10357,10369,10391,10399,10427,10429,10433,10453,10457,10459,
10463,10477,10487,10499,10501,10513,10529,10531,10559,10567,
10589,10597,10601,10607,10613,10627,10631,10639,10651,10657,
10663,10667,10687,10691,10709,10711,10723,10729,10733,10739,
10753,10771,10781,10789,10799,10831,10837,10847,10853,10859,
10861,10867,10883,10889,10891,10903,10909,10937,10939,10949,
10957,10973,10979,10987,10993,11003,11027,11047,11057,11059,
11069,11071,11083,11087,11093,11113,11117,11119,11131,11149,
11159,11161,11171,11173,11177,11197,11213,11239,11243,11251,
11257,11261,11273,11279,11287,11299,11311,11317,11321,11329,
11351,11353,11369,11383,11393,11399,11411,11423,11437,11443,
11447,11467,11471,11483,11489,11491,11497,11503,11519,11527,
11549,11551,11579,11587,11593,11597,11617,11621,11633,11657,
11677,11681,11689,11699,11701,11717,11719,11731,11743,11777,
11779,11783,11789,11801,11807,11813,11821,11827,11831,11833,
11839,11863,11867,11887,11897,11903,11909,11923,11927,11933,
11939,11941,11953,11959,11969,11971,11981,11987,12007,12011,
12037,12041,12043,12049,12071,12073,12097,12101,12107,12109,
12113,12119,12143,12149,12157,12161,12163,12197,12203,12211,
12227,12239,12241,12251,12253,12263,12269,12277,12281,12289,
12301,12323,12329,12343,12347,12373,12377,12379,12391,12401,
12409,12413,12421,12433,12437,12451,12457,12473,12479,12487,
12491,12497,12503,12511,12517,12527,12539,12541,12547,12553,
12569,12577,12583,12589,12601,12611,12613,12619,12637,12641,
12647,12653,12659,12671,12689,12697,12703,12713,12721,12739,
12743,12757,12763,12781,12791,12799,12809,12821,12823,12829,
12841,12853,12889,12893,12899,12907,12911,12917,12919,12923,
12941,12953,12959,12967,12973,12979,12983,13001,13003,13007,
13009,13033,13037,13043,13049,13063,13093,13099,13103,13109,
13121,13127,13147,13151,13159,13163,13171,13177,13183,13187,
13217,13219,13229,13241,13249,13259,13267,13291,13297,13309,
13313,13327,13331,13337,13339,13367,13381,13397,13399,13411,
13417,13421,13441,13451,13457,13463,13469,13477,13487,13499], dtype=np.int64)
| StarcoderdataPython |
3244361 | <gh_stars>0
# Generated by Django 4.0 on 2021-12-13 09:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(default='Test Product - 16 ', max_length=100),
),
]
| StarcoderdataPython |
168772 | from django.conf import settings
URL_CDN_BASE = getattr(settings, 'SITETABLES_URL_CDN_BASE', '//cdn.datatables.net/')
| StarcoderdataPython |
1612591 | <reponame>joshfriend/sqlalchemy-utils
import sqlalchemy as sa
from datetime import datetime, date, time
import pytest
from pytest import mark
cryptography = None
try:
import cryptography
except ImportError:
pass
from tests import TestCase
from sqlalchemy_utils import EncryptedType, PhoneNumberType, ColorType
from sqlalchemy_utils.types.encrypted import AesEngine, FernetEngine
@mark.skipif('cryptography is None')
class EncryptedTypeTestCase(TestCase):
@pytest.fixture(scope='function')
def user(self, request):
# set the values to the user object
self.user = self.User()
self.user.username = self.user_name
self.user.phone = self.user_phone
self.user.color = self.user_color
self.user.date = self.user_date
self.user.time = self.user_time
self.user.enum = self.user_enum
self.user.datetime = self.user_datetime
self.user.access_token = self.test_token
self.user.is_active = self.active
self.user.accounts_num = self.accounts_num
self.session.add(self.user)
self.session.commit()
# register a finalizer to cleanup
def finalize():
del self.user_name
del self.test_token
del self.active
del self.accounts_num
del self.test_key
del self.searched_user
request.addfinalizer(finalize)
return self.session.query(self.User).get(self.user.id)
def generate_test_token(self):
import string
import random
token = ''
characters = string.ascii_letters + string.digits
for i in range(60):
token += ''.join(random.choice(characters))
return token
def create_models(self):
# set some test values
self.test_key = 'secretkey1234'
self.user_name = u'someone'
self.user_phone = u'(555) 555-5555'
self.user_color = u'#fff'
self.user_enum = 'One'
self.user_date = date(2010, 10, 2)
self.user_time = time(10, 12)
self.user_datetime = datetime(2010, 10, 2, 10, 12)
self.test_token = self.generate_test_token()
self.active = True
self.accounts_num = 2
self.searched_user = None
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
username = sa.Column(EncryptedType(
sa.Unicode,
self.test_key,
self.__class__.encryption_engine)
)
access_token = sa.Column(EncryptedType(
sa.String,
self.test_key,
self.__class__.encryption_engine)
)
is_active = sa.Column(EncryptedType(
sa.Boolean,
self.test_key,
self.__class__.encryption_engine)
)
accounts_num = sa.Column(EncryptedType(
sa.Integer,
self.test_key,
self.__class__.encryption_engine)
)
phone = sa.Column(EncryptedType(
PhoneNumberType,
self.test_key,
self.__class__.encryption_engine)
)
color = sa.Column(EncryptedType(
ColorType,
self.test_key,
self.__class__.encryption_engine)
)
date = sa.Column(EncryptedType(
sa.Date,
self.test_key,
self.__class__.encryption_engine)
)
time = sa.Column(EncryptedType(
sa.Time,
self.test_key,
self.__class__.encryption_engine)
)
datetime = sa.Column(EncryptedType(
sa.DateTime,
self.test_key,
self.__class__.encryption_engine)
)
enum = sa.Column(EncryptedType(
sa.Enum('One', name='user_enum_t'),
self.test_key,
self.__class__.encryption_engine)
)
self.User = User
class Team(self.Base):
__tablename__ = 'team'
id = sa.Column(sa.Integer, primary_key=True)
key = sa.Column(sa.String(50))
name = sa.Column(EncryptedType(
sa.Unicode,
lambda: self._team_key,
self.__class__.encryption_engine)
)
self.Team = Team
def test_unicode(self, user):
assert user.username == self.user_name
def test_string(self, user):
assert user.access_token == self.test_token
def test_boolean(self, user):
assert user.is_active == self.active
def test_integer(self, user):
assert user.accounts_num == self.accounts_num
def test_phone_number(self, user):
assert str(user.phone) == self.user_phone
def test_color(self, user):
assert user.color.hex == self.user_color
def test_date(self, user):
assert user.date == self.user_date
def test_datetime(self, user):
assert user.datetime == self.user_datetime
def test_time(self, user):
assert user.time == self.user_time
def test_enum(self, user):
assert user.enum == self.user_enum
def test_lookup_key(self):
# Add teams
self._team_key = 'one'
team = self.Team(key=self._team_key, name=u'One')
self.session.add(team)
self.session.commit()
team_1_id = team.id
self._team_key = 'two'
team = self.Team(key=self._team_key)
team.name = u'Two'
self.session.add(team)
self.session.commit()
team_2_id = team.id
# Lookup teams
self._team_key = self.session.query(self.Team.key).filter_by(
id=team_1_id
).one()[0]
team = self.session.query(self.Team).get(team_1_id)
assert team.name == u'One'
with pytest.raises(Exception):
self.session.query(self.Team).get(team_2_id)
self.session.expunge_all()
self._team_key = self.session.query(self.Team.key).filter_by(
id=team_2_id
).one()[0]
team = self.session.query(self.Team).get(team_2_id)
assert team.name == u'Two'
with pytest.raises(Exception):
self.session.query(self.Team).get(team_1_id)
self.session.expunge_all()
# Remove teams
self.session.query(self.Team).delete()
self.session.commit()
class TestAesEncryptedTypeTestcase(EncryptedTypeTestCase):
encryption_engine = AesEngine
def test_lookup_by_encrypted_string(self, user):
test = self.session.query(self.User).filter(
self.User.username == self.user_name
).first()
assert test.username == user.username
class TestFernetEncryptedTypeTestCase(EncryptedTypeTestCase):
encryption_engine = FernetEngine
| StarcoderdataPython |
25461 | from typing import Optional
import requests
from mstrio.connection import Connection
from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(err_msg='Error while creating the package holder')
def create_package_holder(connection: Connection, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a new in-memory metadata package holder.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while updating the package holder with id: {id}')
def update_package_holder(connection: Connection, body: dict, id: str,
project_id: Optional[str] = None, prefer: str = "respond-async",
error_msg: Optional[str] = None) -> requests.Response:
"""Fill the content of the in-memory metadata package holder per supplied
specification. Currently, it's only supported when the holder is empty.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
body (dict): dictionarized PackageConfig object (with `to_dict()`)
id (str): ID of the package to be updated
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
json=body
)
@ErrorHandler(err_msg='Error while downloading the package with id: {id}')
def download_package(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download a package binary.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be downloaded.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while uploading the package with id: {id}')
def upload_package(connection: Connection, id: str, file: bytes, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Upload package to sandbox directly.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be uploaded.
file (bytes): package in a format of a binary string.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.put(
url=f'{connection.base_url}/api/packages/{id}/binary',
headers={'X-MSTR-ProjectID': project_id},
files={'file': file}
)
@ErrorHandler(err_msg='Error while getting the package holder with id: {id}')
def get_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
show_content: bool = True,
error_msg: Optional[str] = None) -> requests.Response:
"""Get definition of a package, including package status and its detail
content.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be retrieved.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
show_content (bool, optional): Show package content or not. Defaults to
False.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/{id}',
headers={'X-MSTR-ProjectID': project_id},
params={'showContent': show_content}
)
@ErrorHandler(err_msg='Error while deleting the package holder with id: {id}')
def delete_package_holder(connection: Connection, id: str, project_id: Optional[str] = None,
prefer: str = 'respond-async',
error_msg: Optional[str] = None) -> requests.Response:
"""Delete the in-memory metadata package holder, releasing associated
Intelligence Server resources.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package to be deleted.
prefer (str, optional): API currently just supports asynchronous mode,
not support synchronous mode, so header parameter ‘Prefer’ must be set
to ‘respond-async’ in your request. Defaults to "respond-async".
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the import for package holder with id: {id}')
def create_import(connection: Connection, id: str, project_id: Optional[str] = None,
generate_undo: bool = False,
error_msg: Optional[str] = None) -> requests.Response:
"""Create a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): ID of the package for which import process will be
created.
generate_undo (bool, optional): Generate undo package or not. Defaults
to False.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.post(
url=f'{connection.base_url}/api/packages/imports',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
},
params={
'packageId': id,
'generateUndo': generate_undo
},
)
@ErrorHandler(err_msg='Error while getting the import with id: {id}')
def get_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Get result of a package import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={'X-MSTR-ProjectID': project_id}
)
@ErrorHandler(err_msg='Error while deleting the import with id: {id}')
def delete_import(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Closes an existing import process previously created.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
# TODO: Change to a parameter when any other values are supported
prefer = 'respond-async'
project_id = project_id if project_id is not None else connection.project_id
return connection.delete(
url=f'{connection.base_url}/api/packages/imports/{id}',
headers={
'X-MSTR-ProjectID': project_id,
'Prefer': prefer
}
)
@ErrorHandler(err_msg='Error while creating the undo for import with id: {id}')
def create_undo(connection: Connection, id: str, project_id: Optional[str] = None,
error_msg: Optional[str] = None) -> requests.Response:
"""Download undo package binary for this import process.
Args:
connection (Connection): Object representation of connection to
MSTR Server.
id (str): Import process ID.
project_id (Optional[str]): Optional ID of a project. Defaults to None.
error_msg (Optional[str]): Optional error message. Defaults to None.
Returns:
requests.Response: Response object containing all of the information
returned by the server.
"""
project_id = project_id if project_id is not None else connection.project_id
return connection.get(
url=f'{connection.base_url}/api/packages/imports/{id}/undoPackage/binary',
headers={'X-MSTR-ProjectID': project_id}
)
| StarcoderdataPython |
3363553 | #abc209_d
import sys, math, collections, itertools, heapq, functools, bisect
input = sys.stdin.readline
sys.setrecursionlimit(10**7)
m = 10**9 + 7
def intm1(num):
return int(num) - 1
intfunc = [int, intm1]
def II(i=0):
return intfunc[i](input())
def LII(i=0):
return list(map(intfunc[i], input().split()))
def SI():
return input().rstrip()
def LSI():
return list(input().rstrip().split())
inf = float("inf")
def yes():
return print("Yes")
def no():
return print("No")
#abc209_d
N, Q = LII()
edge = [[] for _ in range(N)]
for _ in range(N - 1):
a, b = LII(1)
edge[a].append(b)
edge[b].append(a)
point = [inf] * N
point[0] = 0
q = collections.deque()
q.append(0)
while q:
now = q.popleft()
for ed in edge[now]:
if point[ed] > point[now] + 1:
q.append(ed)
point[ed] = point[now] + 1
for _ in range(Q):
a, b = LII(1)
c = abs(point[a] - point[b])
if c % 2 == 0:
print("Town")
else:
print("Road")
| StarcoderdataPython |
3212319 | <reponame>pablobsanchez/eq1fa2<gh_stars>0
"""
Created on Tue Mar 1 09:55:26 2022
@author: po-po
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
import pandas as pd
from simpleim2D import *
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
filename = r'C:\Users\po-po\Desktop\DOC\Fibras\validation_cases\videos\e05dr2rt120.avi'
cap = cv2.VideoCapture(filename)
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
#initialize time to display diameter
t0 = time.time()
i = 0
#create empty array for diameter saving
diam_arr = np.zeros((1000000,2))
#initialize graphic windows
cv2.namedWindow('Lined Feed')
#define empty function for trackbars
def nothing(x):
pass
#iniatilize trackbar for P parameter
#cv2.createTrackbar('Tol Selector','Lined Feed',33,100,nothing)
#initialize trackbar for array saving
cv2.createTrackbar('Save Array','Lined Feed',0,1,nothing)
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
# Display the resulting frame
orgimg = frame
img = frame
#simple border search
#get center slice as array
slice_center = img[:,int(np.size((img,1))/2),1]
y3 = 0
y4 = 0
#compute histogram and get most common value, background brightness
hist = np.histogram(slice_center,255,[0, 255])[0]
bg_ii0 = np.argmax(hist)
if bg_ii0 == 0:
bg_ii0 = 1
#compute change array
change = np.empty(len(slice_center))
for i in range(len(change)):
change[i] = np.abs(slice_center[i]-bg_ii0)/(255)
#compute most common change value after bg, this should correlate to fiber region
histchange = np.histogram(change,100,[0, 1])[0]
fb_change = np.max(change)
#top down search for fiber edge
y3 = np.argmax(change > (33/100*fb_change))
#flip array and do bottom up search for fiber edge
change = np.flip(change)
y4 = len(change) - np.argmax(change > (33/100*fb_change))
#add center line of fiber diameter
x3 = int(len(img[0])/2)
cv2.line(img,(x3,y3),(x3,y4),(0,0,255),2)
#compute fiber diameter and show
fiber_diam_pixels = (y4-y3)
fiber_diam_micras = str(np.round(203/464 * fiber_diam_pixels, decimals = 0))
cv2.putText(img,'Fiber Diameter = %s micron'% fiber_diam_micras, (50,1000),cv2.FONT_HERSHEY_SIMPLEX,2,(0,0,255),2)
#cv2.putText(img,'e05dr5rt120', (50,50),cv2.FONT_HERSHEY_SIMPLEX,2,(0,0,255),2)
#save fiber diameter to array if saved array flag is 1
save_flag = cv2.getTrackbarPos('Save Array', 'Lined Feed');
if save_flag == 1:
diam_arr[i,0] = time.time()-t0
diam_arr[i,1] = fiber_diam_micras
i += 1
if i == len(diam_arr):
i = 0
# resize images and show
scale = 50
rszx = int(img.shape[1]*scale/100)
rszy = int(img.shape[0]*scale/100)
imgrsz = cv2.resize(img, (rszx,rszy))
framersz = cv2.resize(frame, (rszx, rszy))
cv2.imshow('Lined Feed',imgrsz)
# Press Q on keyboard to exit
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# Press P on keyboard to pause
if cv2.waitKey(100) & 0xFF == ord('p'):
cv2.waitKey(5000)
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
#delete 0 values from array and transform to pandas dataframe
mod_diam_arr = diam_arr[diam_arr[:,1] != 0]
clean_arr = pd.DataFrame(data = mod_diam_arr, columns = ['Time','Diameter'])
#perform rolling average on pandas dataframe of clean data
interval = 25
clean_arr['Average'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).mean()
clean_arr['Std'] = clean_arr['Diameter'].rolling(window = interval, center = True, min_periods = 1).std()
clean_arr['Clean'] = clean_arr.Diameter[(clean_arr['Diameter'] >= clean_arr['Average']-clean_arr['Std']) & (clean_arr['Diameter'] <= clean_arr['Average']+clean_arr['Std'])]
clean_arr['Dirty'] = clean_arr.Diameter[(clean_arr['Diameter'] <= clean_arr['Average']-clean_arr['Std']) | (clean_arr['Diameter'] >= clean_arr['Average']+clean_arr['Std'])]
#plot diameter array
plt.plot(clean_arr['Time'],clean_arr['Dirty'],'rx')
plt.plot(clean_arr['Time'],clean_arr['Clean'],'kx')
plt.plot(clean_arr['Time'],clean_arr['Average'],'b-')
plt.plot(clean_arr['Time'],clean_arr['Average']-clean_arr['Std'],'r--')
plt.plot(clean_arr['Time'],clean_arr['Average']+clean_arr['Std'],'r--')
plt.show()
#save array to csv, plot file
filename = input('Input filename: ')
clean_arr.to_csv('%s.csv'%filename)
plt.savefig(filename+'_plot.jpg')
| StarcoderdataPython |
92252 | from .detector3d_template import Detector3DTemplate
class PVSECONDFPNUNet(Detector3DTemplate):
def __init__(self, model_cfg, num_class, dataset, nusc=False):
super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset, nusc=nusc)
self.module_list = self.build_networks()
self.model_cfg = model_cfg
def forward(self, batch_dict, t_mode='det', l=1):
# print("batch_dict", batch_dict)
batch_dict['t_mode'] = t_mode
batch_dict['l'] = l
for cur_module in self.module_list:
batch_dict = cur_module(batch_dict)
if self.training:
loss, tb_dict, disp_dict = self.get_training_loss(t_mode=t_mode)
ret_dict = {
'loss': loss
}
return ret_dict, tb_dict, disp_dict
else:
pred_dicts, recall_dicts = self.post_processing(batch_dict)
num_fpn_up = self.model_cfg.get('FPN_UP_LAYERS', 0)
num_fpn_down = self.model_cfg.get('FPN_DOWN_LAYERS',0)
num_fpn_downup = self.model_cfg.get('FPN_DOWNUP_LAYERS', 0)
fpn_layers = [str(3 - l) for l in range(num_fpn_up)] + [str(4 + l) for l in range(num_fpn_downup+1) if num_fpn_downup > 0] + [str(4 + 1 + l) for l in range(num_fpn_down) if num_fpn_down > 0]
if num_fpn_up + num_fpn_downup > 0:
pred_dicts_fpn, recall_dicts_fpn = self.post_processing_FPN(batch_dict, fpn_layers)
return pred_dicts, recall_dicts, pred_dicts_fpn, recall_dicts_fpn
return pred_dicts, recall_dicts
def get_training_loss(self, t_mode='det'):
disp_dict = {}
if 'det' in t_mode or 'pseudo' in t_mode:
loss_point, tb_dict = self.point_head.get_loss()
if not self.fpn_only:
loss_rpn, tb_dict = self.dense_head.get_loss(tb_dict)
loss_rpn_fpn, tb_dict = self.dense_head.get_fpn_loss(tb_dict)
disp_dict.update({
'loss_point': loss_point.item(),
'loss_rpn': loss_rpn.item(),
'loss_rpn_fpn': loss_rpn_fpn.item()
})
if not self.fpn_only:
loss = loss_rpn + loss_rpn_fpn + loss_point
else:
loss = loss_rpn_fpn + loss_point
return loss, tb_dict, disp_dict
elif 'dom' in t_mode:
loss_dom_point, tb_dict = self.point_head.get_dom_loss()
if not self.fpn_only:
loss_dom, tb_dict = self.dense_head.get_dom_loss(tb_dict)
loss_dom_fpn, tb_dict = self.dense_head.get_fpn_dom_loss(tb_dict)
# loss_dom_rcnn, tb_dict_dom = self.roi_head.get_dom_loss(tb_dict)
tb_dict = {
'loss_dann': loss_dom.item(),
'loss_dom_fpn': loss_dom_fpn.item(),
**tb_dict
}
if not self.fpn_only:
loss = loss_dom_point + loss_dom + loss_dom_fpn
else:
loss = loss_dom_point + loss_dom_fpn
# disp_dict = {
# 'loss_dom': loss_dom.item(),
# 'loss_dom_point': loss_dom_point.item(),
# **disp_dict
# }
return loss, tb_dict, disp_dict
| StarcoderdataPython |
1722893 | import numpy as np
def ask_sumxy(level):
ask = level_sumxy(level)
if ask:
return [
'''Halle dos números enteros tales que la suma sea {} y la diferencia sea {}".'''.format(ask[2], ask[3]),
[ask[0], ask[1]]
]
def level_sumxy(level):
min = 2 * level + 3
max = 5 * level + 15
return _sumxy(min, max)
def _sumxy(min, max):
if min < 3 or max - min < 5:
return None
sum = np.random.randint(min, max)
y = np.random.randint(1, min - 1)
x = sum - y
return [x, y, sum, abs(x - y)]
## 1:E
## 2:S
def ask_multiplesxy(level):
ask = level_multiplesxy(level)
if ask:
return [
'''La suma de 2 números es {}. Si el mayor es {} veces el menor. Halle el número menor".'''.format(ask[2], ask[3]),
[ask[0], ask[1]]
]
def level_multiplesxy(level):
min = (level + 30)
max = (3 * level + 30)
return _multiplesxy(min, max)
# x + y = z
# x = k * y
# y = z / (k + 1)
def _multiplesxy(min, max):
if min < 30 or min > max:
return None
z = np.random.randint(min, max)
k = np.random.randint(2, 10)
y = z // (k + 1)
x = k * y
# lowest, highest, sum, factor
return [y, x, x + y, k]
## 2:E
print('level_sumxy')
for i in range(2, 30):
print(ask_sumxy(i))
print('level_multiplesxy')
for i in range(2, 30):
print(ask_multiplesxy(i))
| StarcoderdataPython |
3273088 | def test_layout(layout_from_family2):
assert layout_from_family2 is not None
def test_layout__intervals(
layout_from_family2, individuals_intervals_from_family2):
assert layout_from_family2[0]._intervals == \
individuals_intervals_from_family2
def test_layout_lines(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout.lines) == 3
assert layout.lines[0].x1 == 31.0
assert layout.lines[0].y1 == 60.5
assert layout.lines[0].x2 == 53.5
assert layout.lines[0].y2 == 60.5
assert layout.lines[0].curved is False
assert layout.lines[0].curve_base_height is None
assert layout.lines[1].x1 == 42.25
assert layout.lines[1].y1 == 60.5
assert layout.lines[1].x2 == 42.25
assert layout.lines[1].y2 == 75.5
assert layout.lines[1].curved is False
assert layout.lines[1].curve_base_height is None
assert layout.lines[2].x1 == 42.25
assert layout.lines[2].y1 == 80.0
assert layout.lines[2].x2 == 42.25
assert layout.lines[2].y2 == 75.5
assert layout.lines[2].curved is False
assert layout.lines[2].curve_base_height is None
def test_layout_positions(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout.positions) == 2
assert len(layout.positions[0]) == 2
assert (
layout.positions[0][0].individual.member.person_id
== "mom2"
)
assert layout.positions[0][0].x == 10.0
assert layout.positions[0][0].y == 50.0
assert layout.positions[0][0].size == 21.0
assert layout.positions[0][0].scale == 1.0
assert (
layout.positions[0][1].individual.member.person_id
== "dad2"
)
assert layout.positions[0][1].x == 53.5
assert layout.positions[0][1].y == 50.0
assert layout.positions[0][1].size == 21.0
assert layout.positions[0][1].scale == 1.0
assert len(layout.positions[1]) == 1
assert (
layout.positions[1][0].individual.member.person_id
== "id2"
)
assert layout.positions[1][0].x == 31.75
assert layout.positions[1][0].y == 80.0
assert layout.positions[1][0].size == 21.0
assert layout.positions[1][0].scale == 1.0
def test_layout__individuals_by_rank(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout._individuals_by_rank) == 2
assert len(layout._individuals_by_rank[0]) == 2
assert (
layout._individuals_by_rank[0][0].member.person_id
== "mom2"
)
assert (
layout._individuals_by_rank[0][1].member.person_id
== "dad2"
)
assert len(layout._individuals_by_rank[1]) == 1
assert (
layout._individuals_by_rank[1][0].member.person_id
== "id2"
)
def test_layout__id_to_position(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout._id_to_position) == 3
assert (
layout._id_to_position[
layout._individuals_by_rank[0][0]
].individual.member.person_id
== "mom2"
)
assert (
layout._id_to_position[
layout._individuals_by_rank[0][1]
].individual.member.person_id
== "dad2"
)
assert (
layout._id_to_position[
layout._individuals_by_rank[1][0]
].individual.member.person_id
== "id2"
)
def test_layout_id_to_position(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout.id_to_position) == 3
assert (
layout.id_to_position["mom2"].individual.member.person_id
== "mom2"
)
assert (
layout.id_to_position["dad2"].individual.member.person_id
== "dad2"
)
assert (
layout.id_to_position["id2"].individual.member.person_id
== "id2"
)
def test_layout_individuals_by_rank(layout_from_family2):
layout = layout_from_family2[0]
assert len(layout.individuals_by_rank) == 3
assert layout.individuals_by_rank["mom2"] == 1
assert layout.individuals_by_rank["dad2"] == 1
assert layout.individuals_by_rank["id2"] == 2
| StarcoderdataPython |
1794129 | # Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Repository rule for providing system libraries for Bazel build."""
def _system_repo_impl(ctx):
symlinks = ctx.attr.symlinks
for link in symlinks:
target = symlinks[link]
ctx.symlink(target, link)
ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name = ctx.name))
ctx.file("BUILD.bazel", ctx.read(ctx.attr.build_file))
system_repo = repository_rule(
implementation = _system_repo_impl,
attrs = {
"symlinks": attr.string_dict(
doc = """
Symlinks to create for this system repo. The key is the link path under this repo,
the value should be an absolute target path on the system that we want to link.
""",
),
"build_file": attr.label(
allow_single_file = True,
mandatory = True,
doc = "The file to use as the BUILD file for this repository.",
),
},
doc = "A repository rule for providing system libraries for Bazel build",
)
| StarcoderdataPython |
143605 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Initialization module for artellapipe-dccs-maya
"""
import os
import logging.config
# =================================================================================
PACKAGE = 'artellapipe.dccs.maya'
# =================================================================================
def init(dev=False):
"""
Initializes module
"""
from tpDcc.libs.python import importer
from artellapipe.dccs.maya import register
logger = create_logger(dev=dev)
register.register_class('logger', logger)
importer.init_importer(package=PACKAGE)
def create_logger(dev=False):
"""
Returns logger of current module
"""
logger_directory = os.path.normpath(os.path.join(os.path.expanduser('~'), 'artellapipe', 'logs'))
if not os.path.isdir(logger_directory):
os.makedirs(logger_directory)
logging_config = os.path.normpath(os.path.join(os.path.dirname(__file__), '__logging__.ini'))
logging.config.fileConfig(logging_config, disable_existing_loggers=False)
logger = logging.getLogger(PACKAGE.replace('.', '-'))
if dev:
logger.setLevel(logging.DEBUG)
for handler in logger.handlers:
handler.setLevel(logging.DEBUG)
return logger
| StarcoderdataPython |
3386399 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
from lxml import html
from config import getConfig
from util import isWindow
import os
import codecs
login_url = 'https://monocloud.net/login'
if isWindow():
template = codecs.open('shadowsocks_win_tpl.bat', encoding='utf-8').read()
else:
template = codecs.open('shadowsocks_tpl.sh', encoding='utf-8').read()
def sendRequest(form):
session = requests.Session()
response = session.get(login_url)
token = html.fromstring(response.content).xpath('/html/body/div[3]/div[1]/div[2]/form/input')[0].value
form['_token'] = token
response = session.post(login_url, data=form)
# response = session.get('https://monocloud.net/home')
tree = html.fromstring(response.content)
print("login as:", form['email'])
# import ipdb; ipdb.set_trace()
myService = tree.xpath(u'//*[@id="sidebar-menu"]/ul')[0]
shadowsocksPage = myService.xpath('//a[starts-with(@href, "https://monocloud.net/service/")]/@href')
shadowsocksPageName = myService.xpath('//a[starts-with(@href, "https://monocloud.net/service/")]/text()')
for url, pageName in zip(shadowsocksPage, shadowsocksPageName):
if 'Classic' not in pageName:
getShadowsocksInfo(session, url.replace('service', 'shadowsocks'))
def getShadowsocksInfo(session, url):
print("get url: ", url)
response = session.get(url)
tree = html.fromstring(response.content)
entries = tree.xpath('//table[@class="table"]')
serviceName = tree.xpath('//*[@class="page-title"]/text()')[0]
print serviceName
print "================="
if not os.path.isdir(serviceName):
os.mkdir(serviceName)
if isWindow():
for entry in entries:
generateFileWin(serviceName, entry)
else:
for entry in entries:
generateFile(serviceName, entry)
def generateFileWin(folder, entry):
texts = entry.xpath('.//td/text()')
extra, server, ratio, port, password, method = texts[:6]
command = template.format(server=server, method=method, password=password, port=port, ratio=ratio, extra=extra)
with codecs.open(folder + '/' + server + '.bat', 'w', encoding='utf-8') as file:
file.write(command)
print server
def generateFile(folder, entry):
texts = entry.xpath('.//td/text()')
extra, server, ratio, port, password, method = texts[:6]
command = template.format(server=server, method=method, password=password, port=port, ratio=ratio, extra=extra)
with codecs.open(folder + '/' + server + '.sh', 'w', encoding='utf-8') as file:
file.write(command)
os.chmod(folder + '/' + server + '.sh', 0700)
print server
def main():
sendRequest(getConfig())
main()
| StarcoderdataPython |
3324454 | import sched
import time
import pickle
import socket
def send_udp(s, packet):
try:
s.send(pickle.dumps(packet))
except ConnectionRefusedError:
pass
def send_packets(channel, *,
send_rate=30,
packet=lambda: None,
send_func=lambda *args: None,
stop_condition=lambda: False):
def send_loop():
if not stop_condition():
sc.enter(1 / send_rate, 3, send_loop)
send_func(channel, packet())
sc = sched.scheduler(time.time, time.sleep)
sc.enter(1 / send_rate, 3, send_loop)
sc.run()
def get_socket(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect(address)
return sock
| StarcoderdataPython |
105284 | <reponame>ferrerinicolas/python_samples<filename>Variables/3.2.5.py
string_variable = "Nicolas"
int_variable = 24
print(string_variable)
print(int_variable)
| StarcoderdataPython |
1729760 | from collections import defaultdict
from typing import Tuple
class PoolConnections:
def __init__(self):
self._disconnected_till = defaultdict(int)
def are_connected(self, ts: int, nodes: Tuple[str, str]):
if nodes[0] == nodes[1]:
return True
nodes = self._sort_nodes(nodes)
return ts >= self._disconnected_till[nodes]
def disconnect_till(self, ts: int, nodes: Tuple[str, str]):
nodes = self._sort_nodes(nodes)
ts = max(ts, self._disconnected_till[nodes])
self._disconnected_till[nodes] = ts
@staticmethod
def _sort_nodes(nodes: Tuple[str, str]):
if nodes[0] > nodes[1]:
return (nodes[1], nodes[0])
return nodes
| StarcoderdataPython |
3323109 | #!/usr/bin/env python3
from troposphere import Template, Sub, GetAtt, Ref
from troposphere.s3 import Bucket
from troposphere.codepipeline import Pipeline, ArtifactStore
from awslambdacontinuousdelivery.tools.iam import createCodepipelineRole
from awslambdacontinuousdelivery.source.codecommit import getCodeCommit
from awslambdacontinuousdelivery.python.test.unittest import getUnittest
from awslambdacontinuousdelivery.python.build import getBuild
from awslambdacontinuousdelivery.deploy import getDeploy
from awslambdacontinuousdelivery.python.test import getTest
from awslambdacontinuousdelivery.notifications.sns import getEmailTopic
from awslambdacontinuousdelivery.notifications import addFailureNotifications
import argparse
def create_template() -> str:
stages = STAGES
template = Template()
# AWS will substitute this with the stack name during deployment
stack_name = Sub("${AWS::StackName}")
source_code = "SourceCode"
deploy_pkg_artifact = "FunctionDeployPackage"
cf_artifact = "CfOutputTemplate"
pipeline_stages = [] # list holding all stages, order matters!
s3 = template.add_resource(
Bucket("ArtifactStoreS3Location"
, AccessControl = "Private"
)
)
pipeline_role = template.add_resource(
createCodepipelineRole("PipelineRole"))
source = getCodeCommit(template, source_code)
pipeline_stages.append(source)
unit_tests = getUnittest(template, source_code)
pipeline_stages.append(unit_tests)
build_stage = getBuild(
template, source_code, deploy_pkg_artifact, cf_artifact, stages)
pipeline_stages.append(build_stage)
for s in stages:
pipeline_stages.append(
getDeploy( template, cf_artifact, s.capitalize()
, deploy_pkg_artifact, source_code, getTest)
)
PROD = getDeploy(template, cf_artifact, "PROD", deploy_pkg_artifact)
pipeline_stages.append(PROD)
artifact_storage = ArtifactStore( Type = "S3", Location = Ref(s3))
pipeline = Pipeline( "FunctionsPipeline"
, Name = Sub("${AWS::StackName}-Pipeline")
, RoleArn = GetAtt(pipeline_role, "Arn")
, Stages = pipeline_stages
, ArtifactStore = artifact_storage
)
template.add_resource(pipeline)
return template.to_json(indent=2)
if __name__ == "__main__":
print(create_template())
| StarcoderdataPython |
1691676 | import json
import pytest
from verity_sdk.utils import unpack_forward_message
from verity_sdk.utils.Context import Context
from verity_sdk.protocols.Protocol import Protocol
from ..test_utils import get_test_config, cleanup
@pytest.mark.asyncio
async def test_get_message():
message = {'hello': 'world'}
context = await Context.create_with_config(await get_test_config())
packed_message = await Protocol('test-family', '0.1').get_message_bytes(context, message)
unpacked_message = json.dumps(await unpack_forward_message(context, packed_message))
assert json.dumps(message) == unpacked_message
await cleanup(context)
| StarcoderdataPython |
1711750 | class ARGB:
"""
ARGB 色彩值。
"""
_alpha: int
_red: int
_green: int
_blue: int
@property
def alpha(self) -> int:
"""
Alpha 透明度通道。
"""
return self._alpha
@property
def red(self) -> int:
"""
红色通道。
"""
return self._red
@property
def green(self) -> int:
"""
绿色通道。
"""
return self._green
@property
def blue(self) -> int:
"""
蓝色通道。
"""
return self._blue
def __init__(self, alpha: int, red: int, green: int, blue: int) -> None:
"""
初始化 `ARGB` 实例。
参数:
- alpha: Alpha 透明度通道(值范围: 0-255)
- red: 红色通道(值范围: 0-255)
- green: 绿色通道(值范围: 0-255)
- blue: 蓝色通道(值范围: 0-255)
"""
if not (0 <= alpha < 256 and 0 <= red < 256 and 0 <= green < 256 and 0 <= blue < 256):
raise ValueError("值超出范围。")
self._alpha = alpha
self._red = red
self._green = green
self._blue = blue
def to_hex(self) -> int:
"""
转换 `ARGB` 为 HEX 颜色值。
返回:
以 `int` 形式表达的 HEX 颜色值。
"""
return (self.alpha << 24) + (self.red << 16) + (self.green << 8) + self.blue
@staticmethod
def from_hex(number: int) -> "ARGB":
"""
转换 `ARGB` HEX 颜色值的 `int` 形式为 `ARGB`。
参数:
- number: 以 `int` 形式表达的 HEX 颜色值
返回:
`ARGB` 类型表示的 ARGB 颜色值。
"""
alpha = (number & 0xFF000000) >> 24
red = (number & 0x00FF0000) >> 16
green = (number & 0x0000FF00) >> 8
blue = number & 0x000000FF
return ARGB(alpha, red, green, blue)
def __str__(self) -> str:
return f"({self.alpha}, {self.red}, {self.green}, {self.blue})"
| StarcoderdataPython |
3224933 | <gh_stars>10-100
#coding:utf-8
#################################
#Copyright(c) 2014 dtysky
#################################
import G2R
class MovieSp(G2R.SpSyntax):
def Show(self,Flag,Attrs,US,UT,Tmp,FS):
sw=''
name,Attrs=self.Check(Flag,Attrs,UT,FS)
if name=='movie_StopMovie':
sw+=' hide movie with Dissolve(0.6)\n'
sw+=' stop movie\n'
return sw
if Attrs['k']=='special':
sw+=' image movie = Movie(size=(1920,1080),xcenter=0.5,ycenter=0.5)\n'
sw+=' show movie\n'
sw+=' play movie '+name+'\n'
elif Attrs['k']=='back':
sw+=' image movie = Movie(size=(1920,1080),xcenter=0.5,ycenter=0.5)\n'
sw+=' play movie '+name+' loop\n'
sw+=' show movie with Dissolve(1.0)\n'
return sw | StarcoderdataPython |
4840544 | from infi import unittest
from infi.eventlog import LocalEventLog
class FacadeTestCase(unittest.TestCase):
def test__open_system_channel(self):
eventlog = LocalEventLog()
with eventlog.open_channel_context("System"):
pass
def test__get_available_channels(self):
eventlog = LocalEventLog()
channels = list(eventlog.get_available_channels())
self.assertIn("System", channels)
def test_get_query_generator(self):
eventlog = LocalEventLog()
list(eventlog.event_query("System"))
| StarcoderdataPython |
1682859 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
from datadog_checks.utils.common import get_docker_hostname
CHECK_NAME = "couch"
PORT = "5984"
HOST = get_docker_hostname()
URL = "http://{}:{}".format(HOST, PORT)
USER = "dduser"
PASSWORD = "<PASSWORD>"
HERE = os.path.dirname(os.path.abspath(__file__))
COUCH_MAJOR_VERSION = int(re.split(r'\D+', os.getenv('COUCH_VERSION'))[0])
# Publicly readable databases
DB_NAMES = ["_replicator", "_users", "kennel"]
GLOBAL_GAUGES = [
"couchdb.couchdb.auth_cache_hits",
"couchdb.couchdb.auth_cache_misses",
"couchdb.httpd.requests",
"couchdb.httpd_request_methods.GET",
"couchdb.httpd_request_methods.PUT",
"couchdb.couchdb.request_time",
"couchdb.couchdb.open_os_files",
"couchdb.couchdb.open_databases",
"couchdb.httpd_status_codes.200",
"couchdb.httpd_status_codes.201",
"couchdb.httpd_status_codes.400",
"couchdb.httpd_status_codes.401",
"couchdb.httpd_status_codes.404",
]
CHECK_GAUGES = ["couchdb.by_db.disk_size", "couchdb.by_db.doc_count"]
BASIC_CONFIG = {"server": URL}
BASIC_CONFIG_V2 = {"server": URL, "user": "dduser", "password": "<PASSWORD>"}
BASIC_CONFIG_TAGS = ["instance:{}".format(URL)]
BAD_CONFIG = {"server": "http://localhost:11111"}
BAD_CONFIG_TAGS = ["instance:http://localhost:11111"]
NODE1 = {"server": URL, "user": USER, "password": PASSWORD, "name": "node1@127.0.0.1"}
NODE2 = {"server": URL, "user": USER, "password": PASSWORD, "name": "node2@127.0.0.1"}
NODE3 = {"server": URL, "user": USER, "password": PASSWORD, "name": "node3@127.0.0.1"}
| StarcoderdataPython |
3351750 | #!/usr/bin/env python
from setuptools import setup, find_packages
from ssdpy.version import VERSION
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="ssdpy",
version=VERSION,
long_description=long_description,
long_description_content_type="text/markdown",
description="Python SSDP library",
license="MIT",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/MoshiBin/ssdpy",
packages=find_packages(exclude=["tests"]),
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,<4",
entry_points={
"console_scripts": [
"ssdpy-server = ssdpy.cli.server:main",
"ssdpy-discover = ssdpy.cli.client:main",
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: System :: Networking",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| StarcoderdataPython |
3367050 | <gh_stars>1-10
import abc
from ... import utils
class Loss(abc.ABC):
@abc.abstractmethod
def __call__(self, y_true, y_pred) -> float:
"""Returns the loss."""
@abc.abstractmethod
def gradient(self, y_true, y_pred) -> float:
"""Returns the gradient with respect to ``y_pred``."""
class ClassificationLoss(Loss):
@staticmethod
def clamp_proba(p):
return utils.clamp(p, minimum=1e-15, maximum=1 - 1e-15)
class BinaryClassificationLoss(ClassificationLoss):
"""A loss appropriate binary classification tasks."""
class MultiClassificationLoss(ClassificationLoss):
"""A loss appropriate for multi-class classification tasks."""
class RegressionLoss(Loss):
"""A loss appropriate for regression tasks."""
| StarcoderdataPython |
3312489 | <gh_stars>0
from django.shortcuts import render,redirect
from products.models import Product
from django.http import JsonResponse
# Create your views here.
from addresses.forms import AddressForm
from addresses.models import Address
from accounts.models import GuestEmail
from billing.models import BillingProfile
from accounts.forms import LoginForm,GuestForm
from orders.models import Order
from .models import Cart
def cart_home(request):
cart_obj,new = Cart.objects.new_or_get(request)
return render(request,"carts/home.html",{'cart':cart_obj})
def cart_update(request):
product_id=request.POST.get('product_id')
if product_id is not None:
try:
product_obj=Product.objects.get(id=product_id)
except Product.DoesNotExist:
print("show message to user,product is gone?")
return redirect("carts:home")
cart_obj,new = Cart.objects.new_or_get(request)
if product_obj in cart_obj.products.all():
cart_obj.products.remove(product_obj)
added=False
else:
cart_obj.products.add(product_obj)
added=True
request.session['cart_item']=cart_obj.products.count()
# return redirect(obj.get_absolute_url())
if request.is_ajax():
print("Ajax Request")
json_data={
"added":added,
"removed": not added,
"cartItemCount":cart_obj.products.count()
}
return JsonResponse(json_data)
return redirect("carts:home")
def checkout_home(request):
cart_obj,cart_created = Cart.objects.new_or_get(request)
order_obj=None
if cart_created or cart_obj.products.count() == 0:
return redirect("carts:home")
login_form=LoginForm()
guest_form=GuestForm()
address_form=AddressForm()
billing_address_id=request.session.get("billing_address_id",None)
shipping_address_id=request.session.get("shipping_address_id",None)
billing_profile,billing_profile_created=BillingProfile.objects.new_or_get(request)
address_qs=None
if billing_profile is not None:
if request.user.is_authenticated:
address_qs=Address.objects.filter(billing_profile=billing_profile)
order_obj,order_obj_created=Order.objects.new_or_get(billing_profile,cart_obj)
if shipping_address_id:
order_obj.shipping_address=Address.objects.get(id=shipping_address_id)
del request.session['shipping_address_id']
if billing_address_id:
order_obj.billing_address=Address.objects.get(id=billing_address_id)
del request.session['billing_address_id']
if billing_address_id or shipping_address_id:
order_obj.save()
if request.method=="POST":
is_done=order_obj.check_done()
if is_done:
order_obj.mark_paid()
request.session['cart_item']=0
del request.session['cart_id']
return redirect("carts:success")
context={
"login_form":login_form,
"object":order_obj,
"billing_profile":billing_profile,
"guest_form":guest_form,
"address_form":address_form,
"address_qs":address_qs
}
return render(request,"carts/checkout.html",context)
def checkout_done_view(request):
return render(request,"carts/checkout-done.html",{})
| StarcoderdataPython |
3269209 | <reponame>NimbleStorage/container-examples
import falcon
import json
import imprint
import requests
import mysql.connector
import sys
from os import environ
from time import time
class PingResource:
def __init__(self):
self.v = version()
def on_get(self, req, resp, **kwargs):
db = Ping()
ping = {
'response_time_ms': db.response_time,
'served_by': environ['HOSTNAME'],
'version': self.v
}
resp.body = json.dumps(ping)
class ImprintResource:
def on_get(self, req, resp):
resp.set_header('Content-Type', 'image/png')
resp.body = imprint.main()
class PopulousResource:
def on_post(self, req, resp):
f = open('template.json', 'r')
r = requests.post('%s/api/v1/data' % \
(environ['DATAGEN']), data=json.dumps(json.load(f)))
db = Populate()
for row in r.json():
db.insert(row['guid'], row['name'], row['zip'], row['city'], \
row['pid'], row['street'], row['email'], imprint.main())
db.commit()
resp.body = json.dumps({ 'status': 200 })
class DbConnect:
def __init__(self):
try:
self.cnx = mysql.connector.connect(user=environ['POPULOUS_USER'], \
password=environ['<PASSWORD>'], \
host=environ['POPULOUS_HOST'], \
db=environ['POPULOUS_DB'])
except Exception:
raise falcon.HTTPServiceUnavailable()
class Populate:
def __init__(self):
self.cnx = DbConnect().cnx
self.cursor = self.cnx.cursor()
self.stmt = ("INSERT INTO main "
"(guid, name, zip, city, pid, street, email, imprint) "
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)")
def insert(self, guid, name, zip, city, pid, street, email, imprint):
self.cursor.execute(self.stmt, (guid, name, zip, city, pid, street, email, imprint))
def commit(self):
try:
self.cnx.commit()
except mysql.connector.Error as err:
sys.stderr.write("Commit error: {}".format(err))
self.cursor.close()
self.cnx.close()
class CensusResource:
def on_get(self, req, resp):
db = Census()
census = {
'census': db.census
}
resp.body = json.dumps(census)
class Census:
def __init__(self):
self.census = 0
cnx = DbConnect().cnx
cursor = cnx.cursor()
query = "SHOW TABLE STATUS"
cursor.execute(query)
rows = cursor.fetchall()
self.census = rows[0][4]
cursor.close()
cnx.close()
class Ping:
def __init__(self):
self.response_time = 0
t1 = time()
cnx = DbConnect().cnx
cursor = cnx.cursor()
query = ("SELECT * "
" FROM main AS r1 JOIN "
" (SELECT CEIL(RAND() * "
" (SELECT MAX(id) "
" FROM main)) AS id) "
" AS r2 "
"WHERE r1.id >= r2.id "
"ORDER BY r1.id ASC "
"LIMIT 1")
cursor.execute(query)
rows = cursor.fetchall()
cursor.close()
cnx.close()
t2 = time()
self.response_time = (t2 - t1) * 1000
def error_serializer(req, resp, exception):
resp.body = exception.to_json()
def version():
try:
f = open('VERSION')
version = f.read()
except Exception:
version = 'unknown'
return version
api = falcon.API()
api.set_error_serializer(error_serializer)
api.add_route('/api/populous', PopulousResource())
api.add_route('/api/census', CensusResource())
api.add_route('/api/_ping', PingResource())
api.add_route('/api/_imprint', ImprintResource())
| StarcoderdataPython |
1773484 | # (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
from ecmwfapi import ECMWFService
from pyaviso.notification_manager import NotificationManager
# define function to be called
def retrieve_from_mars(notification):
print(f"Notification for step {notification['request']['step']} received")
# now do a MARS request with this notification...
mars_server = ECMWFService("mars")
request = notification["request"]
# extend the notification with the attributes needed
request.update({"type": "fc", "levtype": "sfc", "param": 167.128, "area": "75/-20/10/60"})
mars_server.execute(request, "my_data.grib")
# define the trigger
trigger = {"type": "function", "function": retrieve_from_mars}
# create a event listener request that uses that trigger
request = {"class": "od", "stream": "oper", "expver": 1, "domain": "g", "step": 1}
listener = {"event": "mars", "request": request, "triggers": [trigger]}
listeners = {"listeners": [listener]}
# run it
aviso = NotificationManager()
aviso.listen(listeners=listeners)
| StarcoderdataPython |
40257 | import torch
import numpy as np
import cv2
def tonumpyimg(img):
"""
Convert a normalized tensor image to unnormalized uint8 numpy image
For single channel image, no unnormalization is done.
:param img: torch, normalized, (3, H, W), (H, W)
:return: numpy: (H, W, 3), (H, W). uint8
"""
return touint8(tonumpy(unnormalize_torch(img)))
def tonumpy(img):
"""
Convert torch image map to numpy image map
Note the range is not change
:param img: tensor, shape (C, H, W), (H, W)
:return: numpy, shape (H, W, C), (H, W)
"""
if len(img.size()) == 2:
return img.cpu().detach().numpy()
return img.permute(1, 2, 0).cpu().detach().numpy()
def touint8(img):
"""
Convert float numpy image to uint8 image
:param img: numpy image, float, (0, 1)
:return: uint8 image
"""
img = img * 255
return img.astype(np.uint8)
def normalize_torch(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Normalize a torch image.
:param img: (3, H, W), in range (0, 1)
"""
img = img.clone()
img -= torch.tensor(mean).view(3, 1, 1)
img /= torch.tensor(std).view(3, 1, 1)
return img
def unnormalize_torch(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Convert a normalized Tensor image to unnormalized form
For single channel image, no normalization is done.
:param img: (C, H, W), (H, W)
"""
if img.size()[0] == 3:
img = img.clone()
img *= torch.Tensor(std).view(3, 1, 1)
img += torch.Tensor(mean).view(3, 1, 1)
return img
def gray2RGB(img_raw):
"""
Convert a gray image to RGB
:param img_raw: (H, W, 3) or (H, W), uint8, numpy
:return: (H, W, 3)
"""
if len(img_raw.shape) == 2:
img_raw = np.repeat(img_raw[:, :, None], 3, axis=2)
if img_raw.shape[2] > 3:
img_raw = img_raw[:, :, :3]
return img_raw
def color_scale(attention):
"""
Visualize a attention map
:param scale_map: (C, H, W), attention map, softmaxed
:return: (3, H, W), colored version
"""
colors = torch.Tensor([
[1, 0, 0], # red
[0, 1, 0], # green
[0, 0, 1], # blue
[0, 0, 0], # black
]).float()
# (H, W)
attention = torch.argmax(attention, dim=0)
# (H, W, C)
color_map = colors[attention]
color_map = color_map.permute(2, 0, 1)
return color_map
def warp_torch(map, H):
"""
Warp a torch image.
:param map: either (C, H, W) or (H, W)
:param H: (3, 3)
:return: warped iamge, (C, H, W) or (H, W)
"""
map = tonumpy(map)
h, w = map.shape[-2:]
map = cv2.warpPerspective(map, H, dsize=(w, h))
return totensor(map)
def torange(array, low, high):
"""
Render an array to value range (low, high)
:param array: any array
:param low, high: the range
:return: new array
"""
min, max = array.min(), array.max()
# normalized to [0, 1]
array = array - min
array = array / (max - min)
# to (low, high)
array = array * (high - low) + low
return array
def tofloat(img):
"""
Convert a uint8 image to float image
:param img: numpy image, uint8
:return: float image
"""
return img.astype(np.float) / 255
def tonumpy_batch(imgs):
"""
Convert a batch of torch images to numpy image map
:param imgs: (B, C, H, W)
:return: (B, H, W, C)
"""
return imgs.permute(0, 2, 3, 1).cpu().detach().numpy()
def totensor(img, device=torch.device('cpu')):
"""
Do the reverse of tonumpy
"""
if len(img.shape) == 2:
return torch.from_numpy(img).to(device).float()
return torch.from_numpy(img).permute(2, 0, 1).to(device).float()
def totensor_batch(imgs, device=torch.device('cpu')):
"""
Do the reverse of tonumpy_batch
"""
return torch.from_numpy(imgs).permute(0, 3, 1, 2).to(device).float()
def RGB2BGR(*imgs):
return [cv2.cvtColor(x, cv2.COLOR_RGB2BGR) for x in imgs]
def unnormalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
"""
Convert a normalized tensor image to unnormalized form
:param img: (B, C, H, W)
"""
img = img.detach().cpu()
img *= torch.tensor(std).view(3, 1, 1)
img += torch.tensor(mean).view(3, 1, 1)
return img
def toUint8RGB(img):
return (tonumpy(unnormalize(img)) * 255.).astype(np.uint8)
| StarcoderdataPython |
4825947 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import numpy as np
import os
import re
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from itertools import product
from scipy.interpolate import griddata
from serial import Serial
from time import sleep, time
class Probe():
def __init__(self, device, input_gcode, grid_spacing, feed_rate, overscan, min_z, max_z):
self.ser = None
self.device = device
self.input_gcode = input_gcode
self.grid_spacing = grid_spacing
self.feed_rate = feed_rate
self.overscan = overscan
self.min_z = min_z
self.max_z = max_z
self.ser_timeout = 120
self.fine_feed_probe = 1
self.coarse_feed_probe = 40
self.z_max_travel = 40
self.x_coords_re = re.compile(r'X\s*(-?[0-9]+(?:\.[0-9]+)?)')
self.y_coords_re = re.compile(r'Y\s*(-?[0-9]+(?:\.[0-9]+)?)')
self.mpos_re = re.compile(r'\|MPos:(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+)')
self.probe_re = re.compile(r'\[PRB:(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+):([0-1])\]')
def init_grbl(self):
# open serial port and wait for welcome msg
self.ser = Serial(self.device, 115200, timeout=self.ser_timeout)
data = ''
while "Grbl 1.1f ['$' for help]" != data:
data = self.ser.readline().strip()
self.ser.timeout = 1
if '''[MSG:'$H'|'$X' to unlock]''' in self.ser.readline().strip():
self.send('$X', wait_for_idle=False)
self.ser.reset_input_buffer()
self.ser.timeout = self.ser_timeout
# set millimeter mode
self.send('G21')
# set adbsolute coords
self.send('G90')
# reset work coords
self.send('G92X0Y0Z0')
# set local relative offset
self.zero_wpos = self.get_abs_pos()
def send(self, data, newline=True, wait_for_idle=True):
# open serial only on first send
if self.ser is None:
self.init_grbl()
# wait for machine to be idle (not moving)
if wait_for_idle:
while True:
self.ser.write('?')
if '<Idle|' in self.ser.readline():
break
sleep(.25)
# send data and wait for answer
self.ser.write(data + ('\n' if newline else ''))
resp = self.ser.readline().strip()
# parse and return responses
if resp == 'ok':
return True
elif 'error:' in resp or 'ALARM:' in resp:
raise Exception(resp)
elif resp.startswith('['):
out = [resp]
while True:
resp = self.ser.readline().strip()
if resp.startswith('['):
out.append(resp)
elif resp == 'ok':
return '\n'.join(out)
return resp
def get_rel_coord(self, coords):
resp = {}
for coord in 'xyz':
if coord in coords:
resp[coord] = -self.zero_wpos[coord] + coords[coord]
return resp
def get_abs_pos(self):
# wait for machine to be idle
while True:
mpos = self.send('?', newline=False)
if '<Idle|' in mpos:
break
sleep(.25)
mpos = tuple(map(float, self.mpos_re.findall(mpos)[0]))
return {'x': mpos[0], 'y': mpos[1], 'z': mpos[2]}
def get_pos(self):
# get current position in relative coords
return self.get_rel_coord(self.get_abs_pos())
def probe(self, min_z, feed_rate, retract=None, zero_coords=False):
assert (min_z < 0)
assert (retract is None or retract >= 0)
resp = self.send('G38.3 Z{:.5f} F{:.0f}'.format(min_z, feed_rate))
resp = self.probe_re.findall(resp)[0]
probe_point, probe_success = tuple(map(float, resp[:3])), bool(resp[-1])
# zero out work coords
if probe_success and zero_coords:
# zero out work offset
self.send('G92Z{:.5f}'.format(self.get_abs_pos()['z'] - probe_point[2]))
# go to effective zero since probe might have stopped after
# the probe touchdown (due to deceleration)
self.send('G01Z0F1')
# set new local relative offset
self.zero_wpos = self.get_abs_pos()
if retract is not None:
self.send('G0Z{:.5f}'.format(retract))
probe_point = {'x': probe_point[0], 'y': probe_point[1], 'z': 0. if zero_coords else probe_point[2]}
return self.get_rel_coord(probe_point), probe_success
def probe_origin(self):
sys.stdout.write('\n[I] Zeroing Z in origin using coarse mode (F{:.0f})... '.format(self.coarse_feed_probe))
sys.stdout.flush()
# raise Z axis a bit to avoid potential alarm
self.send('G0Z1')
if not self.probe(-self.z_max_travel, self.coarse_feed_probe, zero_coords=True)[1]:
print('\n\n[E] Probe error!')
sys.exit(1)
self.send('G1Z.1F1')
sys.stdout.write('Done.\n[I] Zeroing Z in origin using fine mode (F{:.0f})... '.format(self.fine_feed_probe))
sys.stdout.flush()
if not self.probe(-.4, self.fine_feed_probe, zero_coords=True)[1]:
print('\n\n[E] Probe error!')
sys.exit(1)
print('Done.')
def return_home(self):
print('\n[I] Returning home. X0 Y0 Z0.2')
self.send('G0Z5')
self.send('G0X0Y0')
self.send('G0Z.5')
self.send('G1Z.2F10')
def get_workspace_size(self):
# get all X and Y coords in the gcode file
X = np.asarray(self.x_coords_re.findall(self.input_gcode), np.double)
Y = np.asarray(self.y_coords_re.findall(self.input_gcode), np.double)
# find boundaries
return min(X), max(X), min(Y), max(Y)
def get_probe_coords(self):
minx, maxx, miny, maxy = self.get_workspace_size()
print('\n[I] Gcode area (WxH): {:.2f}mm x {:.2f}mm'.format(abs(maxx - minx), abs(maxy - miny)))
if self.overscan != 0:
minx, maxx = minx - self.overscan, maxx + self.overscan
miny, maxy = miny - self.overscan, maxy + self.overscan
print('[I] Probe area with overscan (WxH): {:.2f}mm x {:.2f}mm'.format(abs(maxx - minx), abs(maxy - miny)))
x_steps = max(2, int(round(abs(maxx - minx) / self.grid_spacing)) + 1)
x_spacing = abs(maxx - minx) / (x_steps - 1)
X = np.linspace(minx, maxx, x_steps)
y_steps = max(2, int(round(abs(maxy - miny) / self.grid_spacing)) + 1)
y_spacing = abs(maxy - miny) / (y_steps - 1)
Y = np.linspace(miny, maxy, y_steps)
coords = tuple(product(X, Y))
# sort probing coords in zig-zag to minimize path length
sorted_coords = []
for x in sorted(X):
tmp = [point for point in coords if point[0] == x]
sorted_coords.append(sorted(tmp, key=lambda point: point[1], reverse=len(sorted_coords) % 2 == 1))
sorted_coords = [item for sublist in sorted_coords for item in sublist]
self.probe_coords = sorted_coords
self.X, self.Y = X, Y
print('[I] Probing {:d} points, {:.5f}mm x-grid, {:.5f}mm y-grid:'.format(
len(sorted_coords), x_spacing, y_spacing))
# return the probing grid
return sorted_coords
def probe_grid(self):
# probe the surface using the calculated grid
self.probe_result = []
start_t = time()
for i, (x, y) in enumerate(self.probe_coords):
sys.stdout.write('[{:03d}] Probing x: {:.1f} y: {:.1f} '.format(i + 1, x, y))
sys.stdout.flush()
# skip probing point X0 Y0 if exists
if x == y == 0.:
probe_point, probe_success = {'z': 0.}, True
else:
# raising probe Z to max_z
self.send('G0Z{:.5f}'.format(self.max_z))
# moving to next probe point
self.send('G0X{:.5f}Y{:.5f}'.format(x, y))
# do probe
probe_point, probe_success = self.probe(self.min_z, self.feed_rate, retract=self.max_z)
if not probe_success:
print('\n[E] Unable to probe point!')
self.return_home()
sys.exit(1)
now = datetime.fromtimestamp(int(time())).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
result = {
"sent": True,
"done": True,
"x": float(x),
"y": float(y),
"z": float(probe_point['z']),
"ts": now,
"xindx": int(np.where(self.X == x)[0][0]),
"yindx": int(np.where(self.Y == y)[0][0]),
}
self.probe_result.append(result)
elapsed_t = time() - start_t
eta_t = (elapsed_t / (i + 1)) * (len(self.probe_coords) - (i + 1))
print('z: {:.5f}\t\tETA: {}'.format(result['z'], timedelta(seconds=int(eta_t))))
print('')
def get_json(self):
# return a json string with the probe result
return json.dumps(self.probe_result)
def correct_gcode(input_gcode, probe_json):
probe_json = json.loads(probe_json)
X = np.asarray([point['x'] for point in probe_json], np.double)
Y = np.asarray([point['y'] for point in probe_json], np.double)
points = np.vstack((X, Y)).T
values = np.asarray([point['z'] for point in probe_json], np.double)
regexps = {
'x': re.compile(r'x\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
'y': re.compile(r'y\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
'z': re.compile(r'z\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
}
# split input gcode by line, filtering empty lines
input_gcode = list(filter(lambda x: x, map(lambda x: x.strip(), input_gcode.split('\n'))))
result = []
cur_coords = [0] * 3
for i, line in enumerate(input_gcode):
# skip comments
if line.startswith(';') or line.startswith('('):
continue
cur_line = ''
# update current gcode coordinates
for j, coord in enumerate(('x', 'y', 'z')):
match = regexps[coord].search(line)
if match:
cur_coords[j] = float(match.group(1))
# keep track of which coordinate we have found in this gcode line
cur_line += coord
# if this gcode line contains a Z coord, correct it
if 'z' in cur_line:
result.append((i, 'sub', cur_coords[:]))
# no Z coord in this line, let's add it
elif 'x' in cur_line or 'y' in cur_line:
result.append((i, 'append', cur_coords[:]))
# points that we need to adjust (x,y,z)
gcode_points = np.vstack(zip(*[item[2] for item in result])).T
# calculate new Z value for each point in gcode_points using both linear and nearest interpolation
newZval_lin = griddata(points, values, gcode_points[:, :2], method='linear') + gcode_points[:, 2]
newZval_near = griddata(points, values, gcode_points[:, :2], method='nearest') + gcode_points[:, 2]
for i, newZval in enumerate(newZval_lin):
j, action = result[i][:2]
# if the new Z value is nan, than the point is probably outside the probing grid
# we use the nearest point as an approximation
if np.isnan(newZval):
newZval = newZval_near[i]
# replace or add the new Z value
if action == 'sub':
input_gcode[j] = regexps['z'].sub('Z{:.5f}'.format(newZval), input_gcode[j])
else:
input_gcode[j] += ' Z{:.5f}'.format(newZval)
return '\n'.join(input_gcode).encode('ascii')
def parse_args():
# parse command line arguments
parser = ArgumentParser(description='pcb surface autoprober')
subparsers = parser.add_subparsers(title='actions')
probe_parsers = subparsers.add_parser('probe', help='probe the surface and generate JSON report')
probe_parsers.set_defaults(which='probe')
probe_parsers.add_argument(
'-i',
metavar='INPUT_GCODE',
dest='input_gcode',
help='input gcode for automatic surface probing',
required=True)
probe_parsers.add_argument('-l', dest='output', help='output JSON file containing probe points', required=True)
probe_parsers.add_argument(
'-g', '--grid', metavar='mm', type=float, dest='grid_spacing', help='probe grid spacing (mm)', required=True)
probe_parsers.add_argument(
'-d', '--device', metavar='serial_device', dest='device', default='/dev/ttyUSB0', help='GRBL device')
probe_parsers.add_argument(
'-f',
'--feed',
metavar='mm/min',
type=int,
dest='feed_rate',
default=5,
help='probing feed rate on Z axis (default 5 mm/min)')
probe_parsers.add_argument(
'--maxz',
metavar='mm',
type=float,
dest='max_z',
default=.5,
help='start probing at this Z axis value (default 0.5 mm)')
probe_parsers.add_argument(
'--minz',
metavar='mm',
type=float,
dest='min_z',
default=-.5,
help='stop probing if Z axis reaches this value (default -0.5 mm)')
probe_parsers.add_argument(
'--overscan',
metavar='mm',
type=float,
default=1.0,
dest='overscan',
help='probe grid overscan. the probe grid will be this value larger on every edge (mm)')
correct_parsers = subparsers.add_parser('correct', help='correct the input gcode with the probing result')
correct_parsers.set_defaults(which='correct')
correct_parsers.add_argument(
metavar='INPUT_GCODE', dest='input_gcode', help='input gcode file to be corrected', nargs='+')
# correct_parsers.add_argument('-o', metavar='OUTPUT_GCODE', dest='output',
# help='corrected output gcode file (default to lvl_<input_gcode_name>)')
correct_parsers.add_argument(
'-l', dest='input_json', help='input JSON file containing probe points', required=True)
args = parser.parse_args()
if args.which == 'probe':
assert args.max_z > args.min_z
assert args.feed_rate > 0
assert args.grid_spacing > 0
return args
if __name__ == '__main__':
args = parse_args()
if args.which in ['probe']:
try:
with open(args.input_gcode, 'rb') as input_f:
input_gcode = input_f.read().decode('utf-8')
except IOError:
print('[E] Unable to open input file.')
sys.exit(1)
if not args.output:
dirname = os.path.dirname(args.input_gcode)
filename = os.path.basename(args.input_gcode)
args.output = os.path.join(dirname, 'lvl_{}'.format(filename))
try:
with open(args.output, 'ab') as output_f:
pass
except IOError:
print('[E] Unable to write to output file.')
sys.exit(1)
if args.which == 'probe':
try:
with open(args.input_gcode, 'rb') as input_f:
input_gcode = input_f.read().decode('utf-8')
except IOError:
print('[E] Unable to open input file.')
sys.exit(1)
try:
with open(args.output, 'ab') as output_f:
pass
except IOError:
print('[E] Unable to write to output file.')
sys.exit(1)
prober = Probe(args.device, input_gcode, args.grid_spacing, args.feed_rate, args.overscan, args.min_z,
args.max_z)
prober.get_probe_coords()
# python 2/3 compatibility
_input = getattr(__builtins__, 'raw_input', input)
if _input('[?] Do you want to probe the surface? [y/N] ') != 'y':
sys.exit(0)
prober.probe_origin()
try:
prober.probe_grid()
with open(args.output, 'wb') as output_f:
output_f.write(prober.get_json())
print('\n[I] All done.')
except KeyboardInterrupt:
prober.get_pos()
prober.return_home()
elif args.which == 'correct':
try:
with open(args.input_json, 'rb') as input_f:
input_json = input_f.read().decode('utf-8')
except IOError:
print('[E] Unable to open JSON file.')
sys.exit(1)
for fname in args.input_gcode:
try:
with open(fname, 'rb') as input_f:
input_gcode = input_f.read().decode('utf-8')
except IOError:
print('[E] Unable to open input file.')
sys.exit(1)
output_gcode = correct_gcode(input_gcode, input_json)
dirname = os.path.dirname(fname)
filename = os.path.basename(fname)
try:
output = os.path.join(dirname, 'lvl_{}'.format(filename))
with open(output, 'wb') as output_f:
output_f.write(output_gcode)
except IOError:
print('[E] Unable to write to output file.')
sys.exit(1)
print('[I] All done.')
| StarcoderdataPython |
85688 | <reponame>Moggs/Workbench
from tkinter import *
from tkinter.ttk import *
from Moggs import *
import win32print
import os
import sys
class Printer(Toplevel):
def __init__(self, parent, filelist=[], printer=None):
Toplevel.__init__(self, parent)
self.filelist=filelist
self.parent=parent
self.printer_list=[]
## biuld the print list
## network printers first I think...?
try:
for p in win32print.EnumPrinters(win32print.PRINTER_ENUM_CONNECTIONS):
self.printer_list.append(repr(p[2]).replace("'", ""))
except:
pass
## local printers?
for p in win32print.EnumPrinters(win32print.PRINTER_ENUM_LOCAL):
self.printer_list.append(str(p[2]))
## get and set the default printer
self.default_printer=win32print.GetDefaultPrinter()
if printer:
self.default_printer=printer
self.last_printer = self.default_printer
lf = LabelFrame(self, text="Print")
lf.pack(fill="both", expand="yes")
self.printer = LabelCombobox(lf, text = "Printer")
self.printer.set(self.default_printer)
self.printer.setlist(self.printer_list)
self.printer.pack(fill="x")
bb = Buttonbox(self)
bb.add("Ok", command=self.print_file)
bb.add("Close", command=self.cancel)
bb.pack(fill="both")
def cancel(self):
for file in self.filelist:
os.remove(file)
self.withdraw()
def print_file(self):
printer = win32print.OpenPrinter(self.printer.get())
bytes=None
for file in self.filelist:
jid = win32print.StartDocPrinter(printer, 1, ('FamilyTree', None, 'RAW'))
bytes = win32print.WritePrinter(printer, open(file, 'rb').read())
win32print.EndDocPrinter(printer)
win32print.ClosePrinter(printer)
self.withdraw()
if __name__=="__main__":
Printer(None, "", "").mainloop()
| StarcoderdataPython |
3214387 | <filename>Kivy/Kivy/Bk_Interractive/sample/Chapter_06_code/10 - Search - query the TED Developer API/loaddialog.py
# File name: loaddialog.py
import kivy
kivy.require('1.9.0')
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_file('loaddialog.kv')
class LoadDialog(FloatLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
| StarcoderdataPython |
44093 | <reponame>wax911/anime-meta
from .type_registries import CoreEntityCodec, CacheLogEntityCodec, SigningPolicyEntityCodec, \
IndexEntityCodec, AdBreakEntityCodec, EpisodeEntityCodec, SeasonEntityCodec, SeriesEntityCodec, \
ImageContainerEntityCodec, MoviePanelEntityCodec, SearchMetaEntityCodec, SeriesPanelEntityCodec, \
ImageEntityCodec, MovieEntityCodec, PanelEntityCodec
| StarcoderdataPython |
3333498 | import os
import sys
from os import listdir, makedirs
from os.path import isfile, join
import hashlib
from typing import Counter
from PIL import Image
from send2trash import send2trash
directory = ""
def main():
# delete (send to trach) dublicates inside every subfolder of the current directory the .py file placed in
# then delete the dublicates over subfolders:
directory = os.getcwd()
subs = []
print('current directory:',directory)
print("Started deleting (sending to trach) inside subfolders..")
for folderfile in os.listdir(directory):
if not folderfile.endswith(".py"):
print(os.fsdecode(folderfile),":")
subs.append(os.fsdecode(folderfile))
folder = getAllImageHashes(folderfile)
printDifferences(folder, None)
print("Started deleting (sending to trach) over subfolders....")
for index in range(len(subs)):
print(os.fsdecode(subs[index]),":")
for sub_fold in (subs[index+1:]):
print(os.fsdecode(subs[index]),'-',os.fsdecode(sub_fold),":")
firstFolder = getAllImageHashes(subs[index])
secondFolder = getAllImageHashes(sub_fold)
printDifferences(firstFolder, secondFolder)
print("Done. All dublicates sent to trach.")
def printDifferences(folder1, folder2):
matchFound = False
matchCount = 0
del_count = 0
# Only dissalow matching filenames if in the same directory
if folder2 == None:
for f1 in folder1:
for f2 in folder1:
if f1[1] == f2[1] and f1[0] != f2[0]:
try:
matchCount += 1
matchFound = True
# print("{ Match found: ")
# print("\t" + f1[0])
# print("\t" + f2[0])
# print("}")
#delete dublicated file:
#os.remove(os.path.join(os.getcwd(),f2[0]))
send2trash(os.path.join(directory,f2[0]))
del_count +=1
except:
continue
print('found and deleted ', str(int(del_count/2)),' matches.')
else:
for f1 in folder1:
for f2 in folder2:
if f1[1] == f2[1]:
matchCount += 1
matchFound = True
processMatchedImages(f1, f2)
del_count +=1
print('found and deleted ', str(del_count),' matches.')
if not matchFound:
print("No matches found!")
def processMatchedImages(img1, img2):
# print("{ Match #" + str(matchCount) + " found: ")
# print(" " + img1[0])
# print(" " + img2[0])
# print("}")
#delete dublicated file:
#os.remove(os.path.join(os.getcwd(),f2[0]))
send2trash(os.path.join(directory,img2[0]))
def getOnlyFilename(fullpath):
return fullpath.split("\\")[-1]
def getAllImageHashes(folder):
onlyfiles = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f)) and not f.endswith(".ini") and not f.endswith(".db")]
hashedFiles = []
fileLength = len(onlyfiles)
for f in onlyfiles:
hashedFiles.append((f, dhash(Image.open(f))))
#print("Hashed all files from folder: "+ folder)
return hashedFiles
def dhash(image, hash_size = 8):
# Grayscale and shrink the image in one step.
image = image.convert('L').resize(
(hash_size + 1, hash_size),
Image.ANTIALIAS,
)
pixels = list(image.getdata())
# Compare adjacent pixels.
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col + 1, row))
difference.append(pixel_left > pixel_right)
# Convert the binary array to a hexadecimal string.
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1786948 | <reponame>uktrade/directory-forms-api<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-08 11:32
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('submission', '0006_auto_20190103_1604'),
]
operations = [
migrations.CreateModel(
name='Sender',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, null=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, null=True, verbose_name='modified')),
('email_address', models.EmailField(max_length=254)),
('is_blacklisted', models.BooleanField()),
('is_whitelisted', models.BooleanField()),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
]
| StarcoderdataPython |
1738057 | <reponame>MakerAsia/lvgl<filename>examples/libs/rlottie/lv_example_rlottie_1.py<gh_stars>1000+
#!/opt/bin/lv_micropython -i
import lvgl as lv
import display_driver
#
# Load a lottie animation from flash
#
from lv_example_rlottie_approve import lv_example_rlottie_approve
lottie = lv.rlottie_create_from_raw(lv.scr_act(), 100, 100, lv_example_rlottie_approve)
lottie.center()
| StarcoderdataPython |
3304610 | <filename>opyapi/http/errors/http_error.py
from typing import Optional
from opyapi import OpyapiError
from opyapi.http import HttpResponse
class HttpError(OpyapiError, HttpResponse):
status_code: int = 500
http_message = "Internal Server Error"
def __init__(
self, http_message: Optional[str] = None, status_code: Optional[int] = None
):
HttpResponse.__init__(self, status_code if status_code else self.status_code)
self.write(http_message if http_message else self.http_message)
__all__ = ["HttpError"]
| StarcoderdataPython |
157438 | <reponame>vihaton/MBH<gh_stars>0
import torch
from torchvision import datasets, transforms
import numpy as np
# ## MNIST data
def get_dataset(data_name: str):
if data_name == 'digit':
return datasets.MNIST
elif data_name == 'fashion':
return datasets.FashionMNIST
else:
raise ValueError(f'unknown dataset name {data_name}')
def load_data(data_dir, data_name: str, batch_size, test_batch_size, mean=None, std=None, shuffle=True, kwargs={}):
def normalize_features(image):
if mean is None and std is None:
return image
out = image.view(-1, 28*28)
# print('before norm', out.mean(), out.std(), out.min(), out.max())
if mean is not None:
out = (out - mean)
if std is not None:
out = out / std
# print('after norm', out.mean(), out.std(), out.min(), out.max())
return out.float()
dataset = get_dataset(data_name)
train_loader = torch.utils.data.DataLoader(
dataset(data_dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(normalize_features),
])),
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
test_loader = torch.utils.data.DataLoader(
dataset(data_dir, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(normalize_features),
])),
batch_size=test_batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def get_train_and_test_loaders(data_dir, data_name, batch_size, test_batch_size,
normalize_data, normalize_by_features, normalize_by_moving, normalize_by_scaling,
kwargs, device=None):
train_loader, test_loader = load_data(
data_dir, data_name, batch_size, test_batch_size, kwargs=kwargs)
train_samples = train_loader.dataset.data.shape[0]
if normalize_data:
print('normalize the data')
flat_train = train_loader.dataset.data.detach().view(train_samples, -1).numpy()
if normalize_by_features:
mean = np.mean(flat_train, axis=0) / 255
std = np.std(flat_train, axis=0) / 255
std[std == 0] = 1 # if the std is 0, we should divide with 1 instead
# every feature has its own mean and std
assert(len(mean) == len(std) == 784)
else: # use the same transformation to all of the features
mean = np.mean(flat_train) / 255
std = np.std(flat_train) / 255
assert type(mean) == type(
std) == np.float64, "there should be only one value for the whole dataset"
print(type(mean), type(std))
if normalize_by_scaling:
print('std: shape, mean', std.shape, np.mean(std))
else:
std = None
if normalize_by_moving:
print('mean: shape, mean, max, min', mean.shape,
np.mean(mean), np.max(mean), np.min(mean))
else:
mean = None
# mean = (0.1307,) # digit mnist
# std = (0.3081,) # digit mnist
train_loader, test_loader = load_data(data_dir,
data_name, batch_size, test_batch_size, mean=mean, std=std, kwargs=kwargs)
else:
print('dont normalize')
if device is not None:
print('move data to device', device)
train_loader.dataset.data = train_loader.dataset.data.to(device)
train_loader.dataset.targets = train_loader.dataset.targets.to(device)
test_loader.dataset.data = test_loader.dataset.data.to(device)
test_loader.dataset.targets = test_loader.dataset.targets.to(device)
return train_loader, test_loader
| StarcoderdataPython |
81677 | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import StackingRegressor
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from sklearn.utils.random import sample_without_replacement
def corrupt(X, y, outlier_ratio=0.1, random_state=None):
random = check_random_state(random_state)
n_samples = len(y)
n_outliers = int(outlier_ratio * n_samples)
W = X.copy()
z = y.copy()
mask = np.ones(n_samples).astype(bool)
outlier_ids = random.choice(n_samples, n_outliers)
mask[outlier_ids] = False
W[~mask, 4] *= 0.1
return W, z
class ENOLS:
def __init__(self, n_estimators=500, sample_size='auto'):
"""
Parameters
----------
n_estimators: number of OLS models to train
sample_size: size of random subset used to train the OLS models, default to 'auto'
- If 'auto': use subsets of size n_features+1 during training
- If int: use subsets of size sample_size during training
- If float: use subsets of size ceil(n_sample*sample_size) during training
"""
self.n_estimators = n_estimators
self.sample_size = sample_size
def fit(self, X, y, random_state=None):
"""
Train ENOLS on the given training set.
Parameters
----------
X: an input array of shape (n_sample, n_features)
y: an array of shape (n_sample,) containing the classes for the input examples
Return
------
self: the fitted model
"""
# use random instead of np.random to sample random numbers below
random = check_random_state(random_state)
estimators = [('lr', LinearRegression())]
if isinstance(self.sample_size, int):
self.sample_size = 'reservoir_sampling'
# add all the trained OLS models to this list
self.estimators_lr, self.estimators_TSR, self.estimators_enols = [], [], []
for i in range(self.n_estimators):
samples = sample_without_replacement(n_population=random.choice([50, 100]),
n_samples=random.choice([10, 20]),
random_state=random_state, method=self.sample_size)
X_train, y_train = [], []
for i in samples:
X_train.append(X[i]), y_train.append(y[i])
reg = LinearRegression()
reg.fit(np.array(X_train), np.array(y_train))
tsr = TheilSenRegressor()
tsr.fit(np.array(X_train), np.array(y_train))
enol = StackingRegressor(estimators=estimators, final_estimator=LinearRegression())
enol.fit(np.array(X_train), np.array(y_train))
self.estimators_lr.append(reg), self.estimators_TSR.append(tsr), self.estimators_enols.append(enol)
return self
def predict(self, X, method='average'):
"""
Parameters
----------
X: an input array of shape (n_sample, n_features)
method: 'median' or 'average', corresponding to predicting median and
mean of the OLS models' predictions respectively.
Returns
-------
y: an array of shape (n_samples,) containing the predicted classes
"""
ols, ts_reg, enols = [], [], []
for reg in self.estimators_lr:
ols.append(reg.predict(X))
for tsr in self.estimators_TSR:
ts_reg.append(tsr.predict(X))
for enol in self.estimators_enols:
enols.append(enol.predict(X))
if method == 'average':
ols = np.average(ols, axis=0)
enols = np.average(enols, axis=0)
ts_reg = np.average(ts_reg, axis=0)
else:
ols = np.average(ols, axis=0)
enols = np.median(enols, axis=0)
ts_reg = np.median(ts_reg, axis=0)
return ols, ts_reg, enols
if __name__ == '__main__':
ensemble_ols, tsregressor, ordinaryleastsquare = [], [], []
p = [0, 0.01, 0.05, 0.08, 0.1, 0.15, 0.2, 0.25, 0.30, 0.40, 0.50]
for i in p:
X, y = load_boston(return_X_y=True)
X_tr, X_ts, y_tr, y_ts = train_test_split(X, y, test_size=0.3, random_state=42)
W, z = corrupt(X_tr, y_tr, outlier_ratio=i, random_state=42)
reg = ENOLS(sample_size=42)
reg.fit(W, z, random_state=42)
ols, ts_reg, enols = reg.predict(X_ts, method='median')
mse_ols = mean_squared_error(y_ts, ols)
mse_ts_reg = mean_squared_error(y_ts, ts_reg)
mse_enols = mean_squared_error(y_ts, enols)
ensemble_ols.append(mse_enols), tsregressor.append(mse_ts_reg), ordinaryleastsquare.append(mse_ols)
plt.plot(p, ensemble_ols, 'b', label="enols")
plt.plot(p, tsregressor, 'r', label="tsr")
plt.plot(p, ordinaryleastsquare, 'g', label="ols")
plt.legend(loc="upper right")
plt.xlabel('p', fontsize=18)
plt.ylabel('mse', fontsize=16)
plt.show()
| StarcoderdataPython |
3213266 | <reponame>tdeboer-ilmn/hail<filename>hail/python/hail/hail_logging.py
import abc
import logging
class Logger(abc.ABC):
@abc.abstractmethod
def error(self, msg):
pass
@abc.abstractmethod
def warning(self, msg):
pass
@abc.abstractmethod
def info(self, msg):
pass
class PythonOnlyLogger(Logger):
def __init__(self, skip_logging_configuration=False):
self.logger = logging.getLogger("hail")
self.logger.setLevel(logging.INFO)
if not skip_logging_configuration:
logging.basicConfig()
def error(self, msg):
self.logger.error(msg)
def warning(self, msg):
self.logger.warning(msg)
def info(self, msg):
self.logger.info(msg)
| StarcoderdataPython |
1724763 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Parse some options to transpopy."""
from argparse import ArgumentParser
def return_args():
"""Return a parser object."""
_parser = ArgumentParser(add_help=True, description=(
"Translate msgid's from a POT file with Google Translate API"))
_parser.add_argument('-f', '--file', action='store', required=True,
help="Get the POT file name.")
_parser.add_argument('-o', '--output_file', action='store', required=True,
help="Get name to save the new PO file.")
_parser.add_argument('-t', '--translate', action='store', required=True,
help="Get language to translate to.")
_parser.add_argument('-i', '--imprecise', action='store_true',
help="Save translated texts as fuzzy(draft).")
_parser.add_argument('-e', '--error', action='store_true',
help="Print translate errors if exist.")
_parser.add_argument('-p', '--print_process', action='store_true',
help="Print translate process.")
return _parser
| StarcoderdataPython |
4812227 | """
Script to add the missing slot to the schimbaIntensitateMuzica intent
The slot "nivel" was missing due to an error in the Chatito script corresponding to this intent (in scenarios 0,1,3)
The default format for the input file is RASA
"""
import json
FILES_TO_FIX = [
'data/romanian_dataset/home_assistant/version1/scenario0_uniform_dist/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario0_uniform_dist/testing_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario1_synonyms/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario1_synonyms/testing_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario2_missing_slots/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario2_missing_slots/testing_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.1/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.1/testing_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.2/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.2/testing_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.3/training_dataset.json',
'data/romanian_dataset/home_assistant/version1/scenario3_imbalance/3.3/testing_dataset.json',
]
# These are extracted from the Chatito scripts
SLOT_VALUES = [
'putin mai tare',
'puțin mai tare',
'mai tare',
'mai sus',
'mai incet',
'mai jos',
'mai încet',
]
SLOT_NAME = 'nivel'
def fix_item(itm):
text = itm['text']
entities = itm['entities']
for slot_val in SLOT_VALUES:
slot_index = text.find(slot_val)
if slot_index != -1:
entity = {
'start': slot_index,
'end': slot_index + len(slot_val),
'value': slot_val,
'entity': SLOT_NAME
}
entities.append(entity)
break
itm['entities'] = entities
return itm
def fix(input_path, output_path):
with open(input_path, errors='replace', encoding='utf-8') as f:
data = json.load(f)
examples = data['rasa_nlu_data']['common_examples']
for itm in examples:
if itm['intent'] == 'schimbaIntensitateMuzica':
itm = fix_item(itm)
with open(output_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, separators=(',', ':'))
def check_diff(input_path, output_path):
with open(input_path, errors='replace', encoding='utf-8') as f:
data_in = json.load(f)
with open(output_path, errors='replace', encoding='utf-8') as f:
data_out = json.load(f)
examples_in = data_in['rasa_nlu_data']['common_examples']
examples_out = data_out['rasa_nlu_data']['common_examples']
for ex_in, ex_out in zip(examples_in, examples_out):
if ex_in != ex_out:
print('---------')
print(ex_in)
print('||||||||')
print(ex_out)
def main():
for filename in FILES_TO_FIX:
fix(filename, filename)
print('Fixed dataset saved in ' + filename)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3240128 | from django.db import models
from multiselectfield import MultiSelectField
# Create your models here.
class Hero(models.Model):
Attribute = (
('Strength', 'Strength'),
('Agility', 'Agility'),
('Intelligence', 'Intelligence'),
)
Role = (
('Carry', 'Carry'),
('Mid', 'Mid'),
('Offlaner', 'Offlaner'),
('Soft-Support', 'Soft-Support'),
('Hard-Support','Hard-Support'),
)
Tag = (
('Nuker', 'Nuker'),
('Disabler', 'Disabler'),
('Jungler', 'Jungler'),
('Tank', 'Tank'),
('Escape','Escape'),
('Pusher','Pusher'),
('Initiator','Initiator'),
)
Skill = (
('Easy','Easy'),
('Average','Average'),
('Difficult','Difficult'),
)
Team = (
('Radiant','Radiant'),
('Dire','Dire'),
)
name = models.CharField(max_length=20)
atr = models.CharField(max_length=40, choices=Attribute)
role = MultiSelectField(max_choices=5, choices=Role)
tag = MultiSelectField(max_choices=7, choices=Tag)
skill = models.CharField(max_length=20, choices=Skill)
image = models.ImageField(blank=True, default='default.png')
description = models.TextField()
team = models.CharField(max_length=10, choices=Team)
dotabuff = models.CharField(max_length=100)
build = models.CharField(max_length=100)
def __str__(self):
return self.name
class Guide(models.Model):
build = models.ImageField(blank=True,default='default.png')
build2 = models.ImageField(blank=True,default='default.png')
skillbuild = models.ImageField(blank=True,default='default.png')
hero = models.ForeignKey(Hero,on_delete=models.SET_NULL,null=True)
def __str__(self):
return self.hero.name | StarcoderdataPython |
1707690 | <gh_stars>10-100
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
"""
possibleDups, arrLength = 0, len(arr) - 1
for i in range(arrLength + 1):
if i > arrLength - possibleDups:
break
if arr[i] == 0:
if i == arrLength - possibleDups:
arr[arrLength] = 0
arrLength -= 1
break
possibleDups += 1
last = arrLength - possibleDups
for i in range(last, -1, -1):
if arr[i] == 0:
arr[i + possibleDups] = 0
possibleDups -= 1
arr[i + possibleDups] = 0
else:
arr[i + possibleDups] = arr[i] | StarcoderdataPython |
4810896 | <gh_stars>1-10
try:
import tweepy
except ImportError:
print("ENSURE YOU HAVE INSTALLED tweepy")
try:
from credentials import *
except ImportError:
print("ENSURE YOU HAVE CREATED credentials.py IN THE SAME FOLDER")
try:
from wordcloud import WordCloud, STOPWORDS
except ImportError:
print("ENSURE YOU HAVE INSTALLED wordcloud")
try:
import requests
except ImportError:
print("ENSURE YOU HAVE INSTALLED requests")
try:
import numpy as np
except ImportError:
print("ENSURE YOU HAVE INSTALLED numpy")
from PIL import Image
import urllib
meaningless_words = [
"il","la","az","ez","un","una",
"uno","gli","le","the","with","RT",
"amp","what","who","which","that",
"che","chi","con","I","del","di","della",
"ma","da","will"
]
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
word_cloud_lst = []
def user_tweet(twitter_handle):
try:
tweets = api.user_timeline(screen_name=twitter_handle, count=200, tweet_mode="extended")
clean = []
for tweet in tweets:
for word in tweet.full_text.split():
if 'https:' in word or 'http:' in word or 'www' in word or '.com' in word:
continue
elif word[0] == "@":
continue
else:
word_cloud_lst.append(word)
clean.append(word)
clean = []
except tweepy.TweepError:
word_cloud_lst.append("invalid username")#a cool error message
def generate_wordcloud(words, mask):
stopwords = set(STOPWORDS)
for word in meaningless_words:
stopwords.add(word)
word_cloud = WordCloud(width = 512, height = 512, background_color='white', stopwords=stopwords, mask=mask).generate(words)
path = 'static/images/'+handle+'.png'
word_cloud.to_file(path)
word_cloud_lst = []
if __name__ == '__main__':
handle = "oundleschool"
user_tweet(handle)
words = " ".join(word_cloud_lst)
mask = np.array(Image.open(requests.get('http://www.clker.com/cliparts/O/i/x/Y/q/P/yellow-house-hi.png', stream=True).raw))
generate_wordcloud(words, mask)
| StarcoderdataPython |
3264261 | import functools
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Optional, Set
from procrastinate import builtin_tasks, exceptions, jobs, migration
from procrastinate import retry as retry_module
from procrastinate import store, utils
if TYPE_CHECKING:
from procrastinate import tasks, worker
logger = logging.getLogger(__name__)
@utils.add_sync_api
class App:
"""
The App is the main entry point for procrastinate integration.
Instantiate a single :py:class:`App` in your code
and use it to decorate your tasks with :py:func:`App.task`.
You can run a worker with :py:func:`App.run_worker`.
"""
@classmethod
def from_path(cls, dotted_path: str):
return utils.load_from_path(dotted_path, cls)
def __init__(
self,
*,
job_store: store.BaseJobStore,
import_paths: Optional[Iterable[str]] = None,
):
"""
Parameters
----------
job_store:
Instance of a subclass of :py:class:`BaseJobStore`, typically
:py:class:`PostgresJobStore`. It will be responsible for all
communications with the database.
import_paths:
List of python dotted paths of modules to import, to make sure
that the workers know about all possible tasks.
If you fail to add a path here and a worker encounters
a task defined at that path, the task will be loaded on the
fly and run, but you will get a warning.
You don't need to specify paths that you know have already
been imported, though it doesn't hurt.
A :py:func:`App.task` that has a custom "name" parameter, that is not
imported and whose module path is not in this list will
fail to run.
"""
self.job_store = job_store
self.tasks: Dict[str, "tasks.Task"] = {}
self.builtin_tasks: Dict[str, "tasks.Task"] = {}
self.queues: Set[str] = set()
self.import_paths = import_paths or []
self._register_builtin_tasks()
def task(
self,
_func: Optional[Callable] = None,
*,
queue: str = jobs.DEFAULT_QUEUE,
name: Optional[str] = None,
retry: retry_module.RetryValue = False,
) -> Any:
"""
Declare a function as a task. This method is meant to be used as a decorator::
@app.task(...)
def my_task(args):
...
or::
@app.task
def my_task(args):
...
The second form will use the default value for all parameters.
Parameters
----------
_func :
The decorated function
queue :
The name of the queue in which jobs from this task will be launched, if
the queue is not overridden at launch.
Default is ``"default"``.
When a worker is launched, it can listen to specific queues, or to all
queues.
name :
Name of the task, by default the full dotted path to the decorated function.
if the function is nested or dynamically defined, it is important to give
it a unique name, and to make sure the module that defines this function
is listed in the ``import_paths`` of the :py:class:`procrastinate.App`.
retry :
Details how to auto-retry the task if it fails. Can be:
- A ``boolean``: will either not retry or retry indefinitely
- An ``int``: the number of retries before it gives up
- A :py:class:`procrastinate.RetryStrategy` instance for complex cases
Default is no retry.
"""
# Because of https://github.com/python/mypy/issues/3157, this function
# is quite impossible to type consistently, so, we're just using "Any"
def _wrap(func: Callable[..., "tasks.Task"]):
from procrastinate import tasks
task = tasks.Task(func, app=self, queue=queue, name=name, retry=retry)
self._register(task)
return functools.update_wrapper(task, func)
if _func is None: # Called as @app.task(...)
return _wrap
return _wrap(_func) # Called as @app.task
def _register(self, task: "tasks.Task") -> None:
self.tasks[task.name] = task
if task.queue not in self.queues:
logger.debug(
"Registering queue",
extra={"action": "register_queue", "queue": task.queue},
)
self.queues.add(task.queue)
def _register_builtin_tasks(self) -> None:
builtin_tasks.register_builtin_tasks(self)
def configure_task(self, name: str, **kwargs: Any) -> jobs.JobDeferrer:
"""
Configure a task for deferring, using its name
Parameters
----------
name : str
Name of the task. If not explicitly defined, this will be the dotted path
to the task (``my.module.my_task``)
**kwargs: Any
Parameters from :py:func:`Task.configure`
Returns
-------
``jobs.JobDeferrer``
Launch ``.defer(**task_kwargs)`` on this object to defer your job.
"""
from procrastinate import tasks
return tasks.configure_task(name=name, job_store=self.job_store, **kwargs)
def _worker(self, queues: Optional[Iterable[str]] = None) -> "worker.Worker":
from procrastinate import worker
return worker.Worker(app=self, queues=queues)
@functools.lru_cache(maxsize=1)
def perform_import_paths(self):
"""
Whenever using app.tasks, make sure the apps have been imported by calling
this method.
"""
utils.import_all(import_paths=self.import_paths)
logger.debug(
"All tasks imported",
extra={"action": "imported_tasks", "tasks": list(self.tasks)},
)
async def run_worker_async(
self, queues: Optional[Iterable[str]] = None, only_once: bool = False
) -> None:
"""
Run a worker. This worker will run in the foreground
and the function will not return until the worker stops
(most probably when it receives a stop signal) (except if
`only_once` is True).
Parameters
----------
queues:
List of queues to listen to, or None to listen to
every queue.
only_once:
If True, the worker will run but just for the currently
defined tasks. This function will return when the
listened queues are empty.
"""
worker = self._worker(queues=queues)
if only_once:
try:
await worker.process_jobs_once()
except (exceptions.NoMoreJobs, exceptions.StopRequested):
pass
else:
await worker.run()
@property
def migrator(self) -> migration.Migrator:
return migration.Migrator(job_store=self.job_store)
async def close_connection_async(self):
await self.job_store.close_connection()
| StarcoderdataPython |
1654687 | <gh_stars>1-10
import graphene
from wagtailcommerce.accounts.schema import UserQuery
from wagtailcommerce.addresses.mutations import DeleteAddress, EditAddress
from wagtailcommerce.carts.schema import CartQuery
from wagtailcommerce.carts.mutations import AddToCart, ModifyCartLine, UpdateCartCoupon
from wagtailcommerce.products.schema import CategoriesQuery
from wagtailcommerce.orders.schema import OrdersQuery
from wagtailcommerce.orders.mutations import PlaceOrder
from wagtailcommerce.shipping.schema import ShippingQuery
class WagtailCommerceMutations(graphene.ObjectType):
add_to_cart = AddToCart.Field()
edit_address = EditAddress.Field()
delete_address = DeleteAddress.Field()
place_order = PlaceOrder.Field()
modify_cart_line = ModifyCartLine.Field()
update_cart_coupon = UpdateCartCoupon.Field()
class WagtailCommerceQueries(CategoriesQuery, CartQuery, OrdersQuery, UserQuery, ShippingQuery, graphene.ObjectType):
pass
| StarcoderdataPython |
1646722 | from .models import ClientsID
# Generates a unique id or checks its presence in cookies
class PollMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
if request.user.is_anonymous:
if 'client_id' not in request.COOKIES:
client_id = ClientsID.objects.create()
response.set_cookie('client_id', client_id.unique_id)
return response
else:
try:
ClientsID.objects.get(unique_id=request.COOKIES.get('client_id'))
return response
except ClientsID.DoesNotExist:
client_id = ClientsID.objects.create()
response.set_cookie('client_id', client_id.unique_id)
return response
else:
return response | StarcoderdataPython |
3217515 | from typing import List
from ..models import Brand, Car, Customer, User
USER_TOKENS = []
BRANDS = [
Brand(1, "Acura"),
Brand(2, "<NAME>"),
Brand(3, "Bentley"),
Brand(4, "BMW"),
Brand(5, "Cadillac"),
Brand(6, "Chevrolet"),
Brand(7, "Dodge"),
Brand(8, "Fiat"),
Brand(9, "Ferrari"),
Brand(10, "Hyundai")
]
CARS = [
Car(1, "Integra", BRANDS[0]),
Car(2, "Mito", BRANDS[1]),
Car(3, "Continental", BRANDS[2]),
Car(4, "Bentayga", BRANDS[2]),
Car(5, "Serie 3", BRANDS[3]),
Car(6, "Escalade", BRANDS[4]),
Car(7, "Caprice Classic", BRANDS[5]),
Car(8, "Suburban", BRANDS[5]),
Car(9, "Venture", BRANDS[5]),
Car(10, "Caravan", BRANDS[6]),
Car(11, "Ram", BRANDS[6]),
Car(12, "Ducato", BRANDS[7]),
Car(13, "F-430", BRANDS[8]),
Car(14, "Accent", BRANDS[9])
]
CUSTOMERS = [
Customer(1, '<NAME>', [CARS[6]]),
Customer(2, '<NAME>', [CARS[0], CARS[2]]),
Customer(3, '<NAME>', [CARS[1]]),
Customer(4, '<NAME>', [CARS[3], CARS[8]]),
Customer(5, '<NAME>', CARS[5])
]
USERS = [
User(1, 'donadoe', 'secretpassword', '<PASSWORD>'),
User(2, 'triticky', 'iamthebest', 'salesperson'),
User(3, 'meberave', 'crazy4cars', 'mechanic')
]
| StarcoderdataPython |
4825457 | <reponame>TripSage/TeamFormationAssistant<gh_stars>1-10
# pylint: skip-file
from __future__ import absolute_import
import json
import unittest
import sys
sys.path.append(".")
from app.connection import connect
from app import connection
from app import app
TEST_DATA = {
"name": "XYZ",
"hourlyrate": "40",
"dob": "1995-10-21",
"languages": "JAVA",
"memberrole": "DevOps Engineer",
"experience": "2",
"skillscore": "70",
"availablehoursperweek": "40",
}
WRONG_DATA = {
"name": "XYZ",
"hourlyrate": "40",
"dob": "1995-100-21",
"languages": "JAVA",
"memberrole": "DevOps Engineer",
"experience": "2",
"skillscore": "70",
"availablehoursperweek": "40",
}
PROJECT_DETAIL_DATA = {
"name": "test",
"enddate": "2020-12-12",
"teamsize": "1",
"budget": "100",
"tools": "Vscode",
"priority": "4",
"languagepreferred0": "JAVA",
"skill0": "33",
"memberrole0": "DevOps",
"availablehoursperweek0": "20",
"skillweight": "20",
"experienceweight": "20",
"hoursweight": "20",
"languageweight": "20",
"budgetweight": "20",
}
"""Define all test cases as test_TEST_NAME"""
class Api(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.db = connect()
self.connection = connection
def test_get_team_data(self):
response = self.app.get("/getResults")
data = json.loads(response.get_data())
self.assertEqual(response.status_code, 200)
# self.assertNotEqual(len(data[0]['MemberName']), 0)
# self.assertIsNotNone(data[0]['ProjectId'])
# self.assertNotEqual(len(data[0]['ProjectName']), 0)
def test_member_signup_success(self):
response = self.connection.add_member(TEST_DATA)
self.assertEqual(response, True)
# def test_member_signup_fails(self):
# response = self.connection.add_member(WRONG_DATA)
# self.assertEqual(response, False)
def test_save_project_requirements(self):
response = self.connection.save_project_requirements(PROJECT_DETAIL_DATA)
self.assertEqual(response, True)
def create_project(self):
response = self.connection.create_project(PROJECT_DETAIL_DATA)
self.assertEqual(response, True)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
46384 | <filename>python/prizes.py<gh_stars>1-10
import fnmatch
import codecs
import tablib
import os
import requests
import urlparse
import shutil
import frontmatter
import yaml
import io
class PrizeSpreadsheets(object):
"""
"""
def __init__(self, dir_path):
self.dir_path = dir_path
def get_spreadsheets(self):
sheets = []
for file in os.listdir(self.dir_path):
if file.endswith(".xlsx") and file.startswith("~") == False:
new_filename = "%s.xlsx" % (self.get_region(file))
os.rename(os.path.join(self.dir_path, file), os.path.join(self.dir_path, new_filename))
# print new_filename
sheets.append({
"filename": new_filename,
"region": self.get_region(file),
"sheet": PrizeSpreadsheet(os.path.join(self.dir_path, new_filename))
})
return sheets
def get_region(self, file):
regions = ["AUSTRALIA", "ACT", "NSW", "NT", "QLD", "SA", "TAS", "VIC", "WA", "NZ"]
if " " not in file:
region = os.path.splitext(file)[0]
else:
region = file.split(" ")[0].upper()
if region not in regions:
raise ValueError("Region '%s' is not valid." % (region))
if region == "NATIONAL":
return "AUSTRALIA"
return region
class PrizeSpreadsheet(object):
"""
"""
def __init__(self, file_path):
self.file_path = file_path
self.read_file()
def read_file(self):
from openpyxl import load_workbook
# print "file_path: %s" % (self.file_path)
self.wb = load_workbook(self.file_path)
def get_prize_sheet(self, index=0):
# print self.wb.get_sheet_names()
self.ws = self.wb.get_sheet_by_name(self.wb.get_sheet_names()[index])
return self.ws
def get_headers(self):
headers = []
for row in self.ws.iter_rows("A1:Z1"):
for cell in row:
if cell.value == None:
break
else:
col_name = "".join(cell.value.split()).lower()
if "eligibilitycriteria" in col_name or "eligiblitycriteria" in col_name:
col_name = "eligibilitycriteria"
headers.append(col_name)
return headers
def is_row_empty(self, row):
for cell in row:
if cell.value is not None:
return False
return True
def sheet_to_dict(self, ws):
rows = []
headers = self.get_headers()
print headers
# print
for row in ws.iter_rows(row_offset=1):
# print row
# Consider a single empty row to be the end of the available data
if self.is_row_empty(row) == True:
# print "Row is empty"
break
tmp = {}
for idx, cell in enumerate(row):
# print "%s: %s" % (idx, cell.value)
if idx >= len(headers):
break
else:
if type(cell.value) is unicode:
tmp[headers[idx]] = cell.value.strip()
else:
tmp[headers[idx]] = cell.value
rows.append(tmp)
# print
return rows
# Config
datadir = "python/data/prizes"
prizesdir = "_prizes/2016/"
organisationsdir = "_organisations/"
eventsdir = "_locations/"
# tmpdir = "python/tmp/"
# Init
# For seeing if the mentor's organisations exists
organisation_names = []
for root, dirnames, filenames in os.walk(organisationsdir):
for filename in fnmatch.filter(filenames, "*.md"):
organisation_names.append(filename.split(".")[0])
# For matching mentors to events
event_names = []
event_md_files = {}
for root, dirnames, filenames in os.walk(eventsdir):
for filename in fnmatch.filter(filenames, "*.md"):
event_name = filename.split(".")[0]
event_names.append(event_name)
event_md_files[event_name] = os.path.join(root, filename)
# Ingest available prize sheets for UPSERTing
gids = [] # GIDs used so far for sanity checking
validation_errors = []
sheets = PrizeSpreadsheets(datadir).get_spreadsheets()
for file in sheets:
print "Spreadsheet: %s" % (file["filename"])
print "Region: %s" % (file["region"])
ws = file["sheet"].get_prize_sheet(0)
rows = file["sheet"].sheet_to_dict(ws)
print "Prize Count: %s" % (len(rows))
print
print "Processing prizes..."
print
for idx, row in enumerate(rows):
# Assign our prize a globally unique id
# if "prizename" not in row:
# print row
# exit()
if row["prizename"] is None:
# @TODO
errmsg = "%s: Prize #'%s' has no name" % (file["region"].lower(), idx)
# raise ValueError(errmsg)
validation_errors.append(errmsg)
print errmsg
continue
else:
row["prizename"] = row["prizename"].replace(u'\u2013', "-").encode("utf-8")
gid = file["region"].lower() + "-" + row["prizename"].lower().strip().replace("/", " or ").replace(" ", "-").replace("'", "")
if gid in gids: # Hacky
gid = gid + "-2"
if gid in gids:
raise ValueError("GID '%s' is already in use." % (gid))
else:
gids.append(gid)
print row["prizename"]
print gid
print
if row["prizetype"] is None:
errmsg = "%s: Prize '%s' has no type set." % (file["region"].lower(), row["prizename"])
# raise ValueError(errmsg)
validation_errors.append(errmsg)
print errmsg
continue
jurisdiction = file["region"].lower()
if row["prizetype"].lower() == "international":
jurisdiction = "international"
prize = {
"name": row["prizename"],
"title": row["prizename"],
"gid": gid,
"jurisdiction": jurisdiction,
"type": row["prizetype"].title()
}
# Attach non-national prizes to their events
if file["region"] != "AUSTRALIA":
if row["prizelevelregionwideoreventspecific"] == "Event only":
if "eventspecificlocation" not in row:
raise ValueError("Event-only prize nominated without any accompanying event specified.")
if row["eventspecificlocation"] is None:
# @TODO
errmsg = "%s: Prize '%s' is an Event prize, but no event locations provided." % (file["region"].lower(), row["prizename"])
# raise ValueError(errmsg)
validation_errors.append(errmsg)
print errmsg
continue
event_gid = row["eventspecificlocation"].replace(" ", "-").replace(",", "").lower()
event_gid_original = event_gid
# Hacky fix
if event_gid == "mount-gambier":
event_gid = "mount-gambier-youth"
elif event_gid == "all-brisbane-events":
event_gid = "brisbane"
if event_gid not in event_names:
errmsg = "%s: Event GID '%s' does not exist." % (file["region"].lower(), event_gid)
raise ValueError(errmsg)
validation_errors.append(errmsg)
# print errmsg
else:
print "For Event: %s" % (event_gid)
post = frontmatter.load(event_md_files[event_gid])
prize["category"] = "local"
prize["events"] = [post.metadata["gid"]]
# Hacky fix
if event_gid_original == "all-brisbane-events":
prize["events"].append("brisbane-youth")
prize["events"].append("brisbane-maker")
if post.metadata["gid"] != event_gid:
print "WARNING: Event .md file does not match event gid. %s, %s" % (event_gid, post.metadata["gid"])
else:
prize["category"] = "state"
else:
prize["category"] = "australia"
# Attach sponsoring organisations
prize["organisation_title"] = row["sponsoredby"].strip().replace(" Prize", "")
organisation_gid = row["sponsoredby"].lower().replace(" ", "-").replace(",", "").strip()
if organisation_gid in organisation_names:
prize["organisation"] = organisation_gid
else:
# print "WARNING: Could not resolve organisation: %s (%s)" % (row["sponsoredby"], organisation_gid)
pass
# If a prize already exists, merge the latest info over the top
prize_md_dir = os.path.join(prizesdir, file["region"].lower())
prize_md_file = os.path.join(prize_md_dir, "%s.md" % (gid))
if os.path.exists(prize_md_file):
# print "NOTICE: Found an existing prize. Merging new data."
existing_prize = frontmatter.load(prize_md_file)
existing_prize.metadata.update(prize)
prize = existing_prize.metadata
# Convert prize $$$ value to an integer
estimatedprizevalue = ""
if type(row["estimateprizevalue$"]) is unicode and row["estimateprizevalue$"].strip().replace("$", "").isdigit():
estimatedprizevalue = int(row["estimateprizevalue$"].strip().replace("$", ""))
elif type(row["estimateprizevalue$"]) is float or type(row["estimateprizevalue$"]) is long:
estimatedprizevalue = int(row["estimateprizevalue$"])
elif row["estimateprizevalue$"] is not None:
estimatedprizevalue = row["estimateprizevalue$"].strip()
# Fixing up minor stuff
if row["prizecategorydescription"] is None:
row["prizecategorydescription"] = unicode("")
if row["prizereward"] is None:
row["prizereward"] = unicode("")
if row["eligibilitycriteria"] is None:
row["eligibilitycriteria"] = unicode("")
# print prize
# print row
print
print "---"
print
# continue
if not os.path.exists(prize_md_dir):
os.makedirs(prize_md_dir)
with io.open(prize_md_file, "w", encoding="utf-8") as f:
f.write(u'---\n')
f.write(unicode(yaml.safe_dump(prize, width=200, default_flow_style=False, encoding="utf-8", allow_unicode=True), "utf-8"))
f.write(u'---\n')
f.write(u'\n')
f.write(unicode(row["prizecategorydescription"].replace("|", "\n").rstrip()))
f.write(u'\n\n')
f.write(u'# Prize\n')
f.write(unicode(str(row["prizereward"]).replace("|", "\n").replace(".0", "").rstrip()))
f.write(u'\n\n')
f.write(u'# Eligibility Criteria\n')
f.write(unicode(row["eligibilitycriteria"].replace("|", "\n").rstrip()))
# print "\n"
print "############################################################"
# print "\n\n"
if len(validation_errors) > 0:
for i in validation_errors:
print i
# ---
# name: Prize 1
# id: prize_1
# photo_url: https://static.pexels.com/photos/3084/person-woman-park-music-large.jpg
# jurisdiction: australia
# type: Prize
# organisations:
# - organisation_1
# themes:
# - theme_1
# - theme_3
# datasets:
# - dataset_1
# - dataset_3
# dataportals:
# - dataportal_1
# ---
# Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam at ornare risus, at dignissim sapien. Sed eget est mi. Ut lacinia ornare tellus commodo sagittis. Integer euismod eleifend velit, eget dictum leo sagittis at.
# # Prize Details
# Phasellus rutrum euismod turpis elementum ornare. Donec ut risus id ante gravida molestie. Integer cursus tempus porta. Sed vitae nunc quis nibh dapibus aliquet vel sed dolor. Donec id risus ut ipsum fermentum cursus quis sed massa. Nulla sit amet blandit orci, dapibus condimentum augue. Fusce suscipit purus et ultricies fermentum.
# # Requirements
# Mauris at est urna. Aenean ut elit venenatis augue dictum viverra:
# - **Nulla facilisi.** Donec vel justo odio. Vivamus consequat hendrerit arcu vel vestibulum. Proin malesuada mauris vitae nulla iaculis fringilla.
# - **Proin tempor tempus ipsum id bibendum.** Duis vehicula nisi vel bibendum lacinia.
# - **Suspendisse libero dui**, hendrerit vitae eleifend sed, cursus ut tellus. Vivamus tristique, lectus in ullamcorper interdum, orci nisi vestibulum nisi, ac luctus est mi quis justo.
# - **Phasellus tempor laoreet felis a porta.** Aenean in sodales odio. Curabitur interdum bibendum orci, vitae hendrerit eros tempus at. | StarcoderdataPython |
1634627 | <filename>tools/verify_transplant.py
import caffe
import surgery
import numpy as np
import os
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
base_weights = '../data/imagenet_models/VGG16.v2.fcn-surgery.caffemodel'
weights = '../data/imagenet_models/VGG16.v2.fcn-surgery-all.caffemodel'
#cfg_file= "../experiments/cfgs/faster_rcnn_end2end.yml"
cfg_file= "../experiments/cfgs/detect_end2end.yml"
cfg_from_file(cfg_file)
base_net = caffe.Net('../models/pascal_voc/VGG16/detect_end2end/train.prototxt',
base_weights,
caffe.TEST)
#cfg_file= "../experiments/cfgs/detect_end2end.yml"
#cfg_from_file(cfg_file)
net = caffe.Net('../models/pascal_voc/VGG16/detect_end2end/train.prototxt',
weights,
caffe.TEST)
# set up caffe
#caffe.set_mode_gpu()
#caffe.set_device(0)
for layer in net.params.keys():
if layer in base_net.params.keys():
print("Net 0 : histogram of layer {}: {} ".format(layer,
np.histogram(base_net.params[layer][0].data, [-1, -0.5, 0, 0.2, 0.5, 0.8,
1.0, 1000])[0]))
print("Net 1 : histogram of layer {}: {} ".format(layer,
np.histogram(net.params[layer][0].data, [-1, -0.5, 0, 0.2, 0.5, 0.8,
1.0, 1000])[0]))
# verify that VGG-surgery 1 is the same as 2. the diff: with or w/o the
# yml file of detect_end2end.yml
else:
# only new net has the layer
print("Only Net 1 : histogram of layer {}: {} ".format(layer,
np.histogram(net.params[layer][0].data, [-1, -0.5, 0, 0.2, 0.5, 0.8,
1.0, 1000])[0]))
| StarcoderdataPython |
1611557 | from gym import make
import gym_workflow.envs
import sys
import csv
import os
import json
import matplotlib.pyplot as plt
def main():
env = make('Montage-v12')
# Data Format
# {
# action: {
# makespan: [],
# overhead: [],
# delay etc
# }
# }
records = {}
cs_range = 100
collector_range = 30
for i in range(cs_range):
records[i] = {
'makespan': [],
'queueDelay': [],
'execTime': [],
'postscriptDelay': [],
'clusterDelay': [],
'WENDelay': []
}
for j in range(collector_range):
print("\r Cluster Number {}/{}, Sampling {}/{}".format(i + 1, cs_range, j+1, collector_range), end="")
sys.stdout.flush()
state = env.reset()
next_state, reward, done, exec_record = env.step(i, training=False)
records[i]['makespan'].append(float(exec_record['makespan']))
records[i]['queueDelay'].append(float(exec_record['queue']))
records[i]['execTime'].append(float(exec_record['exec']))
records[i]['postscriptDelay'].append(float(exec_record['postscript']))
records[i]['clusterDelay'].append(float(exec_record['cluster']))
records[i]['WENDelay'].append(float(exec_record['wen']))
file_name = "workflowsim_analysis_record_cn_{}_collect_{}-publication.csv".format(cs_range, collector_range)
# if not os.path.exists(os.getcwd() + '/records/' + file_name):
# with open(os.getcwd() + '/records/' + file_name, 'w', newline='', encoding='utf-8') as r:
# writer = csv.DictWriter(r, ['records'])
# writer.writeheader()
with open(os.getcwd() + '/records/' + file_name, 'w') as r:
json.dump(records, r)
# # reformat the data for re-ploting graph
# makespanV = []
# queueV = []
# execV = []
# postV = []
# clusterV = []
# WENV = []
# for i in range(records):
# makespanV.append(records[i]['makespan'])
# queueV.append(records[i]['queueDelay'])
# execV.append(records[i]['execTime'])
# postV.append(records[i]['postscriptDelay'])
# clusterV.append(records[i]['clusterDelay'])
# WENV.append(records[i]['WENDelay'])
# Process ploting
# fig = plt.figure(1, figsize=(40, 15))
# ax = fig.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Makespan(s)')
# plt.title('Makespan distribution over Actions')
#
# ax.boxplot(makespanV, showfliers=False)
#
# plt.show()
#
# fig2 = plt.figure(1, figsize=(40, 15))
# ax = fig2.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Queue Delay(s)')
# plt.title('Overhead distribution over Actions')
#
# ax.boxplot(queueV, showfliers=True)
#
# plt.show()
#
# fig2 = plt.figure(1, figsize=(40, 15))
# ax = fig2.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Overhead(s)')
# plt.title('Overhead distribution over Actions')
#
# ax.boxplot(overhead_final.values(), showfliers=True)
#
# plt.show()
#
# fig2 = plt.figure(1, figsize=(40, 15))
# ax = fig2.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Overhead(s)')
# plt.title('Overhead distribution over Actions')
#
# ax.boxplot(overhead_final.values(), showfliers=True)
#
# plt.show()
#
# fig2 = plt.figure(1, figsize=(40, 15))
# ax = fig2.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Overhead(s)')
# plt.title('Overhead distribution over Actions')
#
# ax.boxplot(overhead_final.values(), showfliers=True)
#
# plt.show()
#
# fig2 = plt.figure(1, figsize=(40, 15))
# ax = fig2.add_subplot(111)
#
# plt.xlabel('Action')
# plt.ylabel('Overhead(s)')
# plt.title('Overhead distribution over Actions')
#
# ax.boxplot(overhead_final.values(), showfliers=True)
#
# plt.show()
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
4841936 | <gh_stars>10-100
import os
import cv2
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch
import torchvision
from utils.data.structures.bounding_box import BoxList
from utils.data.structures.segmentation_mask import SegmentationMask
from utils.data.structures.hier import Hier
min_hier_per_image = 1
def _count_visible_hier(anno):
return sum(sum(1 for v in ann["hier"][4::5] if v > 0) for ann in anno)
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def has_valid_annotation(anno, ann_types, filter_crowd=True):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
if filter_crowd:
# if image only has crowd annotation, it should be filtered
if 'iscrowd' in anno[0]:
anno = [obj for obj in anno if obj["iscrowd"] == 0]
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
if 'hier' in ann_types:
hier_vis = _count_visible_hier(anno) >= min_hier_per_image
else:
hier_vis = True
if hier_vis:
return True
return False
class COCODataset(torchvision.datasets.coco.CocoDetection):
def __init__(
self, ann_file, root, remove_images_without_annotations, ann_types, transforms=None
):
super(COCODataset, self).__init__(root, ann_file)
# sort indices for reproducible results
self.ids = sorted(self.ids)
# filter images without detection annotations
if remove_images_without_annotations:
ids = []
for img_id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = self.coco.loadAnns(ann_ids)
if has_valid_annotation(anno, ann_types):
ids.append(img_id)
self.ids = ids
self.json_category_id_to_contiguous_id = {
v: i + 1 for i, v in enumerate(self.coco.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k for k, v in self.json_category_id_to_contiguous_id.items()
}
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
category_ids = self.coco.getCatIds()
categories = [c['name'] for c in self.coco.loadCats(category_ids)]
self.classes = ['__background__'] + categories
self.ann_types = ann_types
self._transforms = transforms
def __getitem__(self, idx):
img, anno = super(COCODataset, self).__getitem__(idx)
# filter crowd annotations
# TODO might be better to add an extra field
if len(anno) > 0:
if 'iscrowd' in anno[0]:
anno = [obj for obj in anno if obj["iscrowd"] == 0]
boxes = [obj["bbox"] for obj in anno]
boxes = torch.as_tensor(boxes).reshape(-1, 4) # guard against no boxes
target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")
classes = [obj["category_id"] for obj in anno]
classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
classes = torch.tensor(classes)
target.add_field("labels", classes)
if 'segm' in self.ann_types:
masks = [obj["segmentation"] for obj in anno]
masks = SegmentationMask(masks, img.size, mode='poly')
target.add_field("masks", masks)
if 'hier' in self.ann_types:
if anno and "hier" in anno[0]:
hier = [obj["hier"] for obj in anno]
hier = Hier(hier, img.size)
target.add_field("hier", hier)
target = target.clip_to_image(remove_empty=True)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target, idx
def get_img_info(self, index):
img_id = self.id_to_img_map[index]
img_data = self.coco.imgs[img_id]
return img_data
def pull_image(self, index):
"""Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
img
"""
img_id = self.id_to_img_map[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
return cv2.imread(os.path.join(self.root, path), cv2.IMREAD_COLOR)
| StarcoderdataPython |
1634040 | <filename>isdhic/utils.py
import os
import time
import tempfile
import numpy as np
from csb.bio import structure
from .universe import Universe
def randomwalk(n_steps, dim=3):
"""
Generate a random walk in n-dimensional space by making steps of
fixed size (unit length) and uniformly chosen direction.
Parameters
----------
n_steps :
length of random walk, i.e. number of steps
dim:
dimension of embedding space (default: dim=3)
"""
## generate isotropically distributed bond vectors of
## unit length
bonds = np.random.standard_normal((int(n_steps),int(dim)))
norms = np.sum(bonds**2,1)**0.5
bonds = (bonds.T / norms).T
return np.add.accumulate(bonds,0)
def create_universe(n_particles=1, diameter=1):
"""
Create a universe containing 'n_particles' Particles of
given diameter. The coordinates of the particles follow
a random walk in 3d space.
Parameters
----------
n_particles : non-negative number
number of particles contained in universe
diameter : non-negative float
particle diameter
"""
universe = Universe(int(n_particles))
universe.coords[...] = randomwalk(n_particles) * diameter
return universe
def make_chain(coordinates, sequence=None, chainid='A'):
"""
Creates a Chain instance from a coordinate array assuming
that these are the positions of CA atoms
"""
if sequence is None: sequence = ['ALA'] * len(coordinates)
residues = []
for i in range(len(sequence)):
residue = structure.ProteinResidue(i+1, sequence[i], sequence_number=i+1)
atom = structure.Atom(i+1, 'CA', 'C', coordinates[i])
atom.occupancy = 1.0
residue.atoms.append(atom)
residues.append(residue)
chain = structure.Chain(chainid, residues=residues)
return chain
class Viewer(object):
"""Viewer
A low-level viewer that allows one to visualize 3d arrays as molecular
structures using programs such as pymol or rasmol.
"""
def __init__(self, cmd, **options):
import distutils.spawn
exe = distutils.spawn.find_executable(str(cmd))
if exe is None:
msg = 'Executable {} does not exist'
raise ValueError(msg.format(cmd))
self._cmd = str(exe)
self._options = options
@property
def command(self):
return self._cmd
def __str__(self):
return 'Viewer({})'.format(self._cmd)
def write_pdb(self, coords, filename):
if coords.ndim == 2: coords = coords.reshape(1,-1,3)
ensemble = structure.Ensemble()
for i, xyz in enumerate(coords,1):
chain = make_chain(xyz)
struct = structure.Structure('')
struct.chains.append(chain)
struct.model_id = i
ensemble.models.append(struct)
ensemble.to_pdb(filename)
def __call__(self, coords, cleanup=True):
"""
View 3d coordinates as a cloud of atoms.
"""
tmpfile = tempfile.mktemp()
self.write_pdb(coords, tmpfile)
os.system('{0} {1}'.format(self._cmd, tmpfile))
time.sleep(1.)
if cleanup: os.unlink(tmpfile)
class ChainViewer(Viewer):
"""ChainViewer
Specialized viewer for visualizing chain molecules.
"""
def __init__(self):
super(ChainViewer, self).__init__('pymol')
self.pymol_settings = ('set ribbon_trace_atoms=1',
'set ribbon_radius=0.75000',
'set cartoon_trace_atoms=1',
'set spec_reflect=0.00000',
'set opaque_background, off',
'bg_color white',
'as ribbon',
'util.chainbow()')
def __call__(self, coords, cleanup=True):
"""
View 3d coordinates as a ribbon
"""
pdbfile = tempfile.mktemp() + '.pdb'
pmlfile = pdbfile.replace('.pdb','.pml')
self.write_pdb(coords, pdbfile)
pmlscript = ('load {}'.format(pdbfile),
'hide') + \
self.pymol_settings
with open(pmlfile, 'w') as f:
f.write('\n'.join(pmlscript))
os.system('{0} {1} &'.format(self._cmd, pmlfile))
time.sleep(2.)
if cleanup:
os.unlink(pdbfile)
os.unlink(pmlfile)
| StarcoderdataPython |
3224263 | <reponame>kamilpz/haystack
import os
import sys
import html
import logging
import pandas as pd
from json import JSONDecodeError
from pathlib import Path
import streamlit as st
from annotated_text import annotation
from markdown import markdown
from htbuilder import H
# streamlit does not support any states out of the box. On every button click, streamlit reload the whole page
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
import SessionState
from utils import HS_VERSION, feedback_doc, haystack_is_ready, retrieve_doc, upload_doc, haystack_version
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = "Who's the father of <NAME>?"
# Labels for the evaluation
EVAL_LABELS = os.getenv("EVAL_FILE", Path(__file__).parent / "eval_labels_example.csv")
# Whether the file upload should be enabled or not
DISABLE_FILE_UPLOAD = os.getenv("HAYSTACK_UI_DISABLE_FILE_UPLOAD")
def main():
# Persistent state
state = SessionState.get(
random_question=DEFAULT_QUESTION_AT_STARTUP,
random_answer="",
results=None,
raw_json=None,
get_next_question=True
)
# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
state.results = None
state.raw_json = None
# Title
st.write("# Haystack Demo")
# Sidebar
st.sidebar.header("Options")
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
top_k_retriever = st.sidebar.slider("Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1)
eval_mode = st.sidebar.checkbox("Evaluation mode")
debug = st.sidebar.checkbox("Show debug info")
# File upload block
if not DISABLE_FILE_UPLOAD:
st.sidebar.write("## File Upload:")
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
for data_file in data_files:
# Upload file
if data_file:
raw_json = upload_doc(data_file)
st.sidebar.write(str(data_file.name) + " ✅ ")
if debug:
st.subheader("REST API JSON response")
st.sidebar.write(raw_json)
hs_version = None
try:
hs_version = f" <small>(v{haystack_version()})</small>"
except Exception:
pass
st.sidebar.markdown(f"""
<style>
a {{
text-decoration: none;
}}
.haystack-footer {{
text-align: center;
}}
.haystack-footer h4 {{
margin: 0.1rem;
padding:0;
}}
footer {{
opacity: 0;
}}
</style>
<div class="haystack-footer">
<hr />
<h4>Built with <a href="https://www.deepset.ai/haystack">Haystack</a>{hs_version}</h4>
<p>Get it on <a href="https://github.com/deepset-ai/haystack/">GitHub</a> - Read the <a href="https://haystack.deepset.ai/overview/intro">Docs</a></p>
<small>Data crawled from <a href="https://en.wikipedia.org/wiki/Category:Lists_of_countries_by_continent">Wikipedia</a> in November 2021.<br />See the <a href="https://creativecommons.org/licenses/by-sa/3.0/">License</a> (CC BY-SA 3.0).</small>
</div>
""", unsafe_allow_html=True)
# Load csv into pandas dataframe
if eval_mode:
try:
df = pd.read_csv(EVAL_LABELS, sep=";")
except Exception:
st.error(f"The eval file was not found. Please check the demo's [README](https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
sys.exit(f"The eval file was not found under `{EVAL_LABELS}`. Please check the README (https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
# Get next random question from the CSV
state.get_next_question = st.button("Load new question")
if state.get_next_question:
reset_results()
new_row = df.sample(1)
while new_row["Question Text"].values[0] == state.random_question: # Avoid picking the same question twice (the change is not visible on the UI)
new_row = df.sample(1)
state.random_question = new_row["Question Text"].values[0]
state.random_answer = new_row["Answer"].values[0]
# Search bar
question = st.text_input(
"Please provide your query:",
value=state.random_question,
max_chars=100,
on_change=reset_results
)
run_query = st.button("Run")
# Check the connection
with st.spinner("⌛️ Haystack is starting..."):
if not haystack_is_ready():
st.error("🚫 Connection Error. Is Haystack running?")
run_query = False
reset_results()
# Get results for query
if run_query and question:
reset_results()
with st.spinner(
"🧠 Performing neural search on documents... \n "
"Do you want to optimize speed or accuracy? \n"
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
):
try:
state.results, state.raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
except JSONDecodeError as je:
st.error("👓 An error occurred reading the results. Is the document store working?")
return
except Exception as e:
logging.exception(e)
if "The server is busy processing requests" in str(e):
st.error("🧑🌾 All our workers are busy! Try again later.")
else:
st.error("🐞 An error occurred during the request. Check the logs in the console to know more.")
return
if state.results:
# Show the gold answer if we use a question of the given set
if question == state.random_question and eval_mode:
st.write("## Correct answers:")
st.write(state.random_answer)
st.write("## Results:")
count = 0 # Make every button key unique
for result in state.results:
if result["answer"]:
answer, context = result["answer"], result["context"]
start_idx = context.find(answer)
end_idx = start_idx + len(answer)
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
st.write(markdown(context[:start_idx] + str(annotation(answer, "ANSWER", "#8ef")) + context[end_idx:]), unsafe_allow_html=True)
st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"])
else:
st.warning("🤔 Haystack found no good answer to your question. Try to formulate it differently!")
st.write("**Relevance:** ", result["relevance"])
if eval_mode:
# Define columns for buttons
button_col1, button_col2, button_col3, _ = st.columns([1, 1, 1, 6])
if button_col1.button("👍", key=f"{result['context']}{count}1", help="Correct answer"):
feedback_doc(
question=question,
is_correct_answer="true",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col2.button("👎", key=f"{result['context']}{count}2", help="Wrong answer and wrong passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="false",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
if button_col3.button("👎👍", key=f"{result['context']}{count}3", help="Wrong answer, but correct passage"):
feedback_doc(
question=question,
is_correct_answer="false",
document_id=result.get("document_id", None),
model_id=1,
is_correct_document="true",
answer=result["answer"],
offset_start_in_doc=result.get("offset_start_in_doc", None)
)
st.success("✨ Thanks for your feedback! ✨")
count += 1
st.write("___")
if debug:
st.subheader("REST API JSON response")
st.write(state.raw_json)
main()
| StarcoderdataPython |
3332119 | from collections import defaultdict, deque
distances = {}
def depth(edges, n):
visited = [0] * (n + 1)
distance = [0] * (n + 1)
# queue to do BFS.
Q = deque()
distance[1] = 0
Q.append(1)
visited[1] = True
while Q:
x = Q.popleft()
for i in edges[x]:
if visited[i]:
continue
distance[i] = distance[x] + 1
Q.append(i)
visited[i] = 1
return distance
n, q = map(int, input().split())
graph = defaultdict(list)
for _ in range(n - 1):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
depths = depth(graph, n)
for _ in range(q):
c, d = map(int, input().split())
dist = depths[c] - depths[d]
if dist % 2 == 1:
print('Road')
else:
print('Town')
| StarcoderdataPython |
103506 | # Generated by Django 3.0.8 on 2020-09-13 14:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('site_settings', '0004_sitesettings_stripe_product_id'),
('donations', '0010_remove_donation_is_user_first_donation'),
]
operations = [
migrations.RemoveField(
model_name='donation',
name='recurring_status',
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.CharField(max_length=255)),
('recurring_amount', models.FloatField()),
('currency', models.CharField(max_length=20)),
('recurring_status', models.CharField(blank=True, choices=[('active', 'Active'), ('processing', 'Processing'), ('paused', 'Paused'), ('cancelled', 'Cancelled')], max_length=255, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('linked_user_deleted', models.BooleanField(default=False)),
('deleted', models.BooleanField(default=False)),
('gateway', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='site_settings.PaymentGateway')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Subscription',
'verbose_name_plural': 'Subscriptions',
'ordering': ['-created_at'],
},
),
migrations.AddField(
model_name='donation',
name='subscription',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='donations.Subscription'),
),
]
| StarcoderdataPython |
1601129 | <gh_stars>1-10
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.config.provider import ClassProvider
from aries_cloudagent.core.protocol_registry import ProtocolRegistry
from aries_cloudagent.core.plugin_registry import PluginRegistry
from .v1_0.message_types import MESSAGE_TYPES
from .definition import versions
from .patched_protocols.issue_credential.v1_0.message_types import MESSAGE_TYPES as ISSUE_CREDENTIAL_MESSAGE_TYPES
from .patched_protocols.present_proof.v1_0.message_types import MESSAGE_TYPES as PRESENT_PROOF_MESSAGE_TYPES
async def setup(context: InjectionContext):
# Register patched message types.
protocol_registry: ProtocolRegistry = await context.inject(ProtocolRegistry)
protocol_registry.register_message_types(MESSAGE_TYPES, version_definition=versions[0])
protocol_registry.register_message_types(ISSUE_CREDENTIAL_MESSAGE_TYPES, version_definition=versions[0])
protocol_registry.register_message_types(PRESENT_PROOF_MESSAGE_TYPES, version_definition=versions[0])
# Register patched protocol plugins
plugin_registry: PluginRegistry = await context.inject(PluginRegistry)
plugin_registry.register_plugin("mydata_did.patched_protocols.issue_credential.v1_0")
plugin_registry.register_plugin("mydata_did.patched_protocols.present_proof.v1_0")
# Unregister superseded protocols
plugin_registry._plugins.pop("aries_cloudagent.protocols.issue_credential")
plugin_registry._plugins.pop("aries_cloudagent.protocols.present_proof") | StarcoderdataPython |
3240614 | <filename>Exercices/Secao05/exercicio08.py
print('Digite duas notas de um aluno')
n1 = float(input('Nota 1: '))
n2 = float(input('Nota 2: '))
if n1 >= 0 and n2 < 11 :
media = (n1 + n2) / 2
print(f'A média do aluno foi: {media}')
else:
print(f'Nota inválida') | StarcoderdataPython |
19771 | """Loads the config.json file and store key value pairs into variables"""
import json
with open('config.json', 'r', encoding='utf-8') as f:
config = json.load(f)
config_location_type = config['location_type']
config_location = config['location']
country = config['country']
config_covid_terms = config['covid_terms']
newsAPI_key = config['newsAPI_key']
news_outlet_websites = config['news_outlet_websites']
webpage_url = config["local_host_url"]
| StarcoderdataPython |
176540 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
"""password_recovery.py: A CGI endpoint for handling password recovery"""
import json
import os
import sys
import sqlite3
import auth
import groups
import sendemail
from httperror import HTTPError
RETURN_HEADERS = []
def __do_get():
raise HTTPError("This script is NOT GET-able", 403)
def __do_post():
postdata = sys.stdin.read()
try:
postdata = json.loads(postdata)
except json.JSONDecodeError:
raise HTTPError("Malformed Request. Data not JSON-decodable")
if 'action' in postdata and postdata['action'] == "request_token":
return __get_token(postdata)
if 'action' in postdata and postdata['action'] == 'reset_password':
return __reset_password(postdata)
raise HTTPError("Not Implemented", 500)
def __get_token(request):
if not 'email' in request:
raise HTTPError("Missing email")
try:
token = get_token(request['email'])
if token == None:
raise HTTPError("No such recipient")
send_token(request['email'], token)
return ""
except ValueError as err:
raise HTTPError(err.args[0])
def get_token(email):
"""Recieves a new password recovery token for the email address"""
database = sqlite3.connect('database.sqlite3')
exists = database.execute("SELECT count() FROM groups WHERE contact_email=?", (email,)).fetchone()[0]
if exists == 0:
return None
while True:
token = auth.generate_random()
count = database.execute("SELECT count() FROM password_recovery WHERE email=? AND token=?",
(email, token)).fetchone()[0]
if count == 0:
break
database.execute("INSERT INTO password_recovery(email, token) values(?,?)",
(email, token))
database.commit()
return token
def send_token(email, token):
"""Creates a email to the recipient with the token"""
with open("templates/email_password_recovery.txt", mode="r") as file_pointer:
string = file_pointer.read()
string = string % (token, email, token)
sendemail.send_email(email, "Skvaderhack Password Recovery", string, "<EMAIL>")
def __reset_password(request):
try:
reset_password(request['email'], request['token'], request['password'])
group = groups.find_group_by_email(request['email'])
token = auth.login(group, request['password'])
return json.dumps(token)
except ValueError as err:
raise HTTPError(err.args[0])
def reset_password(email, token, password):
"""Uses a token/email combination to set a password"""
if not password:
raise ValueError("Password must be specified")
database = sqlite3.connect('database.sqlite3')
database.execute(("DELETE FROM password_recovery"
" WHERE datetime(generated, '+15 minute') < CURRENT_TIMESTAMP"))
database.commit()
count = database.execute(("SELECT count() FROM password_recovery"
" WHERE email=:email AND token=:token"),
{"email": email, "token": token}).fetchone()[0]
if not count == 1:
raise ValueError("Incorrect email/token combination")
salt = database.execute("SELECT salt FROM groups WHERE contact_email=?", (email,)).fetchone()
if salt is None:
raise ValueError("Could not find salt for email")
salt = salt[0]
password_hash = auth.hash_password(password, salt)
database.execute("UPDATE groups SET password=:password WHERE contact_email=:email",
{"password": password_hash, "email": email})
database.execute("DELETE from password_recovery WHERE token=:token", {"token": token})
database.commit()
return password_hash
def __main():
if not 'REQUEST_METHOD' in os.environ:
raise HTTPError("Missing REQUEST_METHOD")
if os.environ['REQUEST_METHOD'] == 'GET':
return __do_get()
if os.environ['REQUEST_METHOD'] == 'POST':
return __do_post()
raise HTTPError("Undhandled REQUEST_METHOD")
if __name__ == '__main__':
try:
RESPONSE = __main()
except HTTPError as err:
if err.status:
RETURN_HEADERS.append('Status: %d' % err.status)
else:
RETURN_HEADERS.append('Status: 400')
RESPONSE = err.message
NUM_HEADERS = len(RETURN_HEADERS)
if NUM_HEADERS == 0:
print('Status: 200')
else:
for header in RETURN_HEADERS:
print(header)
print('Content-Length: %d' % len(RESPONSE))
print()
print(RESPONSE)
| StarcoderdataPython |
8094 | """
A base node that provides several output tensors.
"""
from ....layers.algebra import Idx
from .base import SingleNode, Node
from .. import _debprint
from ...indextypes import IdxType
class IndexNode(SingleNode):
_input_names = ("parent",)
def __init__(self, name, parents, index, index_state=None):
if len(parents) != 1:
raise TypeError("Index node takes exactly one parent.")
par = parents[0]
iname = par._output_names[index] if hasattr(par, "_output_names") else "<{index}>".format(index=index)
repr_info = {"parent_name": par.name, "index": iname}
module = Idx(index, repr_info=repr_info)
self.index = index
self._index_state = IdxType.NotFound if index_state is None else index_state
super().__init__(name, parents, module=module)
class MultiNode(Node): # Multinode
_output_names = NotImplemented
_output_index_states = NotImplemented # optional?
_main_output = NotImplemented
def __init__(self, name, parents, module="auto", *args, db_name=None, **kwargs):
super().__init__(name, parents, *args, module=module, **kwargs)
self.children = tuple(
IndexNode(name + "." + cn, (self,), index=i, index_state=cidx)
for i, (cn, cidx) in enumerate(zip(self._output_names, self._output_index_states))
)
self.main_output.db_name = db_name
def set_dbname(self, db_name):
self.main_output.set_dbname(db_name)
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Enforce _child_index_states has same length as _output_names
if cls._output_index_states is not NotImplemented:
if len(cls._output_index_states) != len(cls._output_names):
raise AssertionError(
"Lengths of _child_index_states {} doesn't match lengths of ouput_names {}".format(
cls._output_index_states, cls._output_names
)
)
# Enforce no name conflict between input names and output names
if cls._input_names is not NotImplemented:
try:
assert all(o not in cls._input_names for o in cls._output_names)
except AssertionError as ae:
raise ValueError(
"Multi-node output names {} conflict with input names {}".format(
cls._output_names, cls._input_names
)
) from ae
def __dir__(self):
dir_ = super().__dir__()
if self._output_names is not NotImplemented:
dir_ = dir_ + list(self._output_names)
return dir_
def __getattr__(self, item):
if item in ("children", "_output_names"): # Guard against recursion
raise AttributeError("Attribute {} not yet present.".format(item))
try:
return super().__getattr__(item) # Defer to BaseNode first
except AttributeError:
pass
try:
return self.children[self._output_names.index(item)]
except (AttributeError, ValueError):
raise AttributeError("{} object has no attribute '{}'".format(self.__class__, item))
@property
def main_output(self):
if self._main_output is NotImplemented:
return super().main_output
return getattr(self, self._main_output)
| StarcoderdataPython |
1710838 | <reponame>zamazaljiri/django-pyston<filename>pyston/__init__.py
# Apply patch only if django is installed
try:
from django.core.exceptions import ImproperlyConfigured
try:
from django.db import models # NOQA
from pyston.patch import * # NOQA
from pyston.filters.default_filters import * # NOQA
except ImproperlyConfigured:
pass
except ImportError as ex:
pass
| StarcoderdataPython |
24367 | <reponame>alexzanderr/metro.digital<filename>src/app/todos/routes.py
"""
# type: ignore
type ignore is to tell LSP-pyright to ignore the line
because something it thinks that there are errors, but actually at runtime there are not
"""
from .validation import validate_password_check
from .validation import validate_email
from .validation import validate_password
from .validation import validate_username
from json import dumps
from flask import render_template
from flask import Blueprint
from flask import request
from flask import url_for
from flask import redirect
# mongo db client stuff
from ..mongodb_client import mongodb
from ..mongodb_client import CollectionInvalid
from ..mongodb_client import ObjectId
from ..mongodb_client import collection_exists
from ..mongodb_client import get_db_name
from ..mongodb_client import collection_create
from ..mongodb_client import get_collection
from ..mongodb_client import create_or_get_collection
from ..routes_utils import json_response
from string import ascii_letters, digits
from random import choice, randint
from datetime import datetime, timedelta
import hashlib
todos = Blueprint(
"todos",
__name__,
url_prefix="/todos",
# not working
# template_folder="templates/todos"
)
# document template
# todo = {
# text: 'yeaaah',
# timestamp: 1639492801.10111,
# datetime: '14.12.2021-16:40:01',
# completed: false
# }
todos_collection_name = "todos"
todos_collection = create_or_get_collection(todos_collection_name)
# document template
# user = {
# "username": "alexzander",
# "password": "<PASSWORD>", # hashed
# "eamil": "<EMAIL>",
# "creation_timestamp": datetime.timestamp(datetime.now()),
# "creation_datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
# }
users_collection_name = "users"
users_collection = create_or_get_collection(users_collection_name)
# ('_id', 1)]},
# 'username_1': {'v': 2, 'key': [('username', 1)], 'unique': True}}
users_unique_keys = [{
"name": "username",
"exists": False
}]
for _, value in users_collection.index_information().items():
for unique_key in value["key"]:
for users_unique_key in users_unique_keys:
if unique_key[0] == users_unique_key["name"]:
users_unique_key["exists"] = True
for users_unique_key in users_unique_keys:
if not users_unique_key["exists"]:
users_collection.create_index([
(users_unique_key["name"], 1)
], unique=True)
register_tokens_collection_name = "register_tokens"
register_tokens_collection = create_or_get_collection(register_tokens_collection_name)
# ('_id', 1)]},
# 'username_1': {'v': 2, 'key': [('username', 1)], 'unique': True}}
tokens_unique_keys = [{
"name": "token",
"exists": False
}]
for _, value in users_collection.index_information().items():
for unique_key in value["key"]:
for tokens_unique_key in tokens_unique_keys:
if unique_key[0] == tokens_unique_key["name"]:
tokens_unique_key["exists"] = True
for tokens_unique_key in users_unique_keys:
if not tokens_unique_key["exists"]:
users_collection.create_index([
(tokens_unique_key["name"], 1)
], unique=True)
# users_collection.create_index([("username", 1)], unique=True)
@todos.route("/")
def todos_root():
# TODO
# add authentication with accounts
todos_collection = get_collection(todos_collection_name)
todo_list = todos_collection.find()
return render_template("todos/index.html", todo_list=todo_list)
def hash_password(password: str):
# deci input pentru sha256 trebuie sa fie bytes
return hashlib.sha256(password.encode()).hexdigest()
def check_hash_of_password(username: str, password: str):
_user = users_collection.find_one({"username": username})
_hashed_password = hash_password(password)
return _user["password"] == _hashed_password # type: ignore
@todos.route("/login", methods=["GET", "POST"])
def todos_login():
"""
Function: todos_login
Summary: this function returns a login page with a form
Returns: render_template("todos/login.html")
"""
method = request.method
if method == "POST":
# then create a new user in database and encrypt
# the password
# then redirect to /todos based on the content that the user has in todos database
# return render_template ?
pass
else:
# GET
# if the user is already authenticated
# then redirect to /todos page
# else
# return below
return render_template("todos/login.html")
@todos.route("/mongo/add", methods=["POST"])
def mongo_add():
todos_collection.insert_one({
"text": request.form["text"],
"timestamp": datetime.timestamp(datetime.now()),
"datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S"),
"completed": False
})
# return dict(todo), {
# "Refresh": "1; url={}".format(url_for("todos"))
# }
return redirect("/todos")
@todos.route("/mongo/complete/<oid>")
def mongo_complete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
completed = True
if requested_todo["completed"]: # type: ignore
completed = False
todos_collection.update_one(
requested_todo,
{"$set": {"completed": completed}})
# todos_collection.replace_one(requested_todo, {"something": "else"})
# 61b6247e165b109454a32c1b
# 61b6247e165b109454a32c1b
return redirect("/todos")
@todos.route("/mongo/delete/<oid>")
def mongo_delete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
todos_collection.delete_one(requested_todo)
return redirect(url_for("todos"))
@todos.route('/mongo/delete/all')
def mongo_delete_all():
todos_collection.delete_many({})
return redirect(url_for('todos'))
# @todos.route("/", methods=['POST'])
# @todos.route("/<component_name>", methods=['POST'])
# def graphql_query(component_name="app"):
# return str(component_name)
todos_api = Blueprint(
"todos_api",
__name__,
url_prefix="/todos/api")
@todos_api.route("/")
def todos_api_root():
return {"message": "salutare"}, 200
@todos_api.route("/mongo/add", methods=["POST"])
def todos_api_mongo_add():
json_from_request = request.get_json()
todo = {
"text": json_from_request["text"], # type: ignore
"timestamp": datetime.timestamp(datetime.now()),
"datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S"),
"completed": False
}
todos_collection.insert_one(todo)
# the above function insert a _id key
todo["oid"] = str(todo["_id"])
del todo["_id"]
return json_response(todo, 200)
# PATCH request
# The PATCH method applies partial modifications to a resource
# meaning that in this case partial mods are todo completed == true
@todos_api.route("/mongo/complete/<oid>", methods=["PATCH"])
def todos_api_mongo_complete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
completed = True
if requested_todo["completed"]: # type: ignore
completed = False
todos_collection.update_one(
requested_todo,
{"$set": {"completed": completed}}
)
requested_todo["oid"] = str(requested_todo["_id"]) # type: ignore
requested_todo["completed"] = completed # type: ignore
del requested_todo["_id"] # type: ignore
return json_response(requested_todo, 200) # type: ignore
# TODO add the oid in the post data body
# instead of making it an url, so that no one can see
# te oid
@todos_api.route("/mongo/delete/<oid>", methods=["DELETE"])
def todos_api_mongo_delete(oid):
requested_todo = todos_collection.find_one({
"_id": ObjectId(oid)
})
todos_collection.delete_one(requested_todo)
requested_todo["oid"] = str(requested_todo["_id"]) # type: ignore
del requested_todo["_id"] # type: ignore
return json_response(requested_todo, 200) # type: ignore
def generate_random_register_token():
return "".join([choice(ascii_letters + digits) for _ in range(30)])
def get_new_register_token():
"""
Function: get_new_token()
Summary: gets new token based on whats in the db
Returns: new token that is not the database
"""
brand_new_token = generate_random_register_token()
while register_tokens_collection.find_one({"token": brand_new_token}):
brand_new_token = generate_random_register_token()
return brand_new_token
@todos.route("/register", methods=["GET", "POST"])
def todos_register():
method = request.method
if method == "POST":
# then create a new user in database and encrypt
# the password
# then redirect to /todos based on the content that the user has in todos database
# return render_template ?
# get data and token from request data body
json_from_request: dict = request.get_json() # type: ignore
username = json_from_request["username"]
email = json_from_request["email"]
password = json_from_request["password"]
password_check = json_from_request["password_check"]
remember_me = json_from_request["remember_me"]
register_token = json_from_request["register_token"]
if not register_tokens_collection.find_one({"token": register_token}):
return {
"message": "cannot register, register token is not database"
}, 403
users_collection.insert_one({
"username": username,
"password": <PASSWORD>(password), # hashed
"email": email,
"creation_timestamp": datetime.timestamp(datetime.now()),
"creation_datetime": datetime.now().strftime("%d.%m.%Y-%H:%M:%S")
})
# you can redirect from POST request sorry
# and you can render HTML from here because you
# are making the request from ajax, not from firefox
return {"message": "success", "redirectTo": "/todos"}, 200
# or you can redirect to login page
# or you can automatically login the user after registration
else:
# GET
# if the user is already authenticated
# then redirect to /todos page
# else
# return below
return render_template("todos/register.html")
@todos_api.post("/register/validation")
def todos_api_register():
"""
Function: todos_api_register
Returns: json with validated input
"""
json_from_request: dict = request.get_json() # type: ignore
username = json_from_request["username"]
email = json_from_request["email"]
password = json_from_request["password"]
password_check = json_from_request["password_check"]
remember_me = json_from_request["remember_me"]
# some examples
results = {
"username": validate_username(username),
"password": validate_password(password),
"email": validate_email(email),
"password_check": validate_password_check(password, password_check),
"register_token": None
}
all_passed = True
for k, v in results.items():
if k != "register_token" and not v["passed"]:
all_passed = False
break
if all_passed:
new_token = get_new_register_token()
results["register_token"] = new_token
register_tokens_collection.insert_one({
"token": new_token,
"expiration_timestamp": datetime.timestamp(datetime.now() + timedelta(minutes=2))
})
# TODO add check for username in database
return json_response(results, 200)
# return {
# "username": username,
# "email": email,
# "password": password,
# "password_check": <PASSWORD>,
# "remember_me": remember_me
# }, 200
| StarcoderdataPython |
1736203 | <reponame>luisornelasch/melp<filename>migration.py
import pandas
from app.database.database import engine
from app.models.models import Post
file_name = "restaurantes.csv"
data_frame = pandas.read_csv(file_name)
data_frame.to_sql(con=engine, name=Post.__tablename__,
if_exists='append', index=False)
print("Done") | StarcoderdataPython |
3381760 | import os
import random
import argparse
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchtext.data import Field, LabelField, BucketIterator
class Data_Prepper:
def __init__(self, name, train_batch_size, n_participants, sample_size_cap=-1, test_batch_size=100, valid_batch_size=None, train_val_split_ratio=0.8, device=None,args_dict=None):
self.args = None
self.args_dict = args_dict
self.name = name
self.device = device
self.n_participants = n_participants
self.sample_size_cap = sample_size_cap
self.train_val_split_ratio = train_val_split_ratio
self.init_batch_size(train_batch_size, test_batch_size, valid_batch_size)
if name in ['sst', 'mr', 'imdb']:
parser = argparse.ArgumentParser(description='CNN text classificer')
self.args = parser.parse_args()
self.train_datasets, self.validation_dataset, self.test_dataset = self.prepare_dataset(name)
self.valid_loader = BucketIterator(self.validation_dataset, batch_size = 500, sort_key=lambda x: len(x.text), device=self.device )
self.test_loader = BucketIterator(self.test_dataset, batch_size = 500, sort_key=lambda x: len(x.text), device=self.device)
self.args.embed_num = len(self.args.text_field.vocab)
self.args.class_num = len(self.args.label_field.vocab)
self.args.embed_dim = self.args_dict['embed_dim']
self.args.kernel_num = self.args_dict['kernel_num']
self.args.kernel_sizes = self.args_dict['kernel_sizes']
self.args.static = self.args_dict['static']
train_size = sum([len(train_dataset) for train_dataset in self.train_datasets])
if self.n_participants > 5:
print("Splitting all {} train data to {} parties. Caution against this due to the limited training size.".format(train_size, self.n_participants))
print("Model embedding arguments:", self.args)
print('------')
print("Train to split size: {}. Validation size: {}. Test size: {}".format(train_size, len(self.validation_dataset), len(self.test_dataset)))
print('------')
else:
self.train_dataset, self.validation_dataset, self.test_dataset = self.prepare_dataset(name)
print('------')
print("Train to split size: {}. Validation size: {}. Test size: {}".format(len(self.train_dataset), len(self.validation_dataset), len(self.test_dataset)))
print('------')
self.valid_loader = DataLoader(self.validation_dataset, batch_size=self.test_batch_size)
self.test_loader = DataLoader(self.test_dataset, batch_size=self.test_batch_size)
def init_batch_size(self, train_batch_size, test_batch_size, valid_batch_size):
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
self.valid_batch_size = valid_batch_size if valid_batch_size else test_batch_size
def get_valid_loader(self):
return self.valid_loader
def get_test_loader(self):
return self.test_loader
def get_train_loaders(self, n_participants, split='powerlaw', batch_size=None):
if not batch_size:
batch_size = self.train_batch_size
if split == 'classimbalance':
if self.name not in ['mnist','cifar10']:
raise NotImplementedError("Calling on dataset {}. Only mnist and cifar10 are implemnted for this split".format(self.name))
n_classes = 10
data_indices = [(self.train_dataset.targets == class_id).nonzero().view(-1).tolist() for class_id in range(n_classes)]
class_sizes = np.linspace(1, n_classes, n_participants, dtype='int')
print("class_sizes for each party", class_sizes)
party_mean = self.sample_size_cap // self.n_participants
from collections import defaultdict
party_indices = defaultdict(list)
for party_id, class_sz in enumerate(class_sizes):
classes = range(class_sz) # can customize classes for each party rather than just listing
each_class_id_size = party_mean // class_sz
# print("party each class size:", party_id, each_class_id_size)
for i, class_id in enumerate(classes):
# randomly pick from each class a certain number of samples, with replacement
selected_indices = random.choices(data_indices[class_id], k=each_class_id_size)
# randomly pick from each class a certain number of samples, without replacement
'''
NEED TO MAKE SURE THAT EACH CLASS HAS MORE THAN each_class_id_size for no replacement sampling
selected_indices = random.sample(data_indices[class_id],k=each_class_id_size)
'''
party_indices[party_id].extend(selected_indices)
# top up to make sure all parties have the same number of samples
if i == len(classes) - 1 and len(party_indices[party_id]) < party_mean:
extra_needed = party_mean - len(party_indices[party_id])
party_indices[party_id].extend(data_indices[class_id][:extra_needed])
data_indices[class_id] = data_indices[class_id][extra_needed:]
indices_list = [party_index_list for party_id, party_index_list in party_indices.items()]
elif split == 'powerlaw':
if self.name in ['sst', 'mr', 'imdb']:
# sst, mr, imdb split is different from other datasets, so return here
self.train_loaders = [BucketIterator(train_dataset, batch_size=self.train_batch_size, device=self.device, sort_key=lambda x: len(x.text),train=True) for train_dataset in self.train_datasets]
self.shard_sizes = [(len(train_dataset)) for train_dataset in self.train_datasets]
return self.train_loaders
else:
indices_list = powerlaw(list(range(len(self.train_dataset))), n_participants)
elif split in ['balanced','equal']:
from utils.utils import random_split
indices_list = random_split(sample_indices=list(range(len(self.train_dataset))), m_bins=n_participants, equal=True)
elif split == 'random':
from utils.utils import random_split
indices_list = random_split(sample_indices=list(range(len(self.train_dataset))), m_bins=n_participants, equal=False)
# from collections import Counter
# for indices in indices_list:
# print(Counter(self.train_dataset.targets[indices].tolist()))
self.shard_sizes = [len(indices) for indices in indices_list]
participant_train_loaders = [DataLoader(self.train_dataset, batch_size=batch_size, sampler=SubsetRandomSampler(indices)) for indices in indices_list]
return participant_train_loaders
def prepare_dataset(self, name='adult'):
if name == 'adult':
from utils.load_adult import get_train_test
from utils.Custom_Dataset import Custom_Dataset
import torch
train_data, train_target, test_data, test_target = get_train_test()
X_train = torch.tensor(train_data.values, requires_grad=False).float()
y_train = torch.tensor(train_target.values, requires_grad=False).long()
X_test = torch.tensor(test_data.values, requires_grad=False).float()
y_test = torch.tensor(test_target.values, requires_grad=False).long()
print("X train shape: ", X_train.shape)
print("y train shape: ", y_train.shape)
pos, neg =(y_train==1).sum().item() , (y_train==0).sum().item()
print("Train set Positive counts: {}".format(pos),"Negative counts: {}.".format(neg), 'Split: {:.2%} - {:.2%}'.format(1. * pos/len(X_train), 1.*neg/len(X_train)))
print("X test shape: ", X_test.shape)
print("y test shape: ", y_test.shape)
pos, neg =(y_test==1).sum().item() , (y_test==0).sum().item()
print("Test set Positive counts: {}".format(pos),"Negative counts: {}.".format(neg), 'Split: {:.2%} - {:.2%}'.format(1. * pos/len(X_test), 1.*neg/len(X_test)))
train_indices, valid_indices = get_train_valid_indices(len(X_train), self.train_val_split_ratio, self.sample_size_cap)
train_set = Custom_Dataset(X_train[train_indices], y_train[train_indices], device=self.device)
validation_set = Custom_Dataset(X_train[valid_indices], y_train[valid_indices], device=self.device)
test_set = Custom_Dataset(X_test, y_test, device=self.device)
return train_set, validation_set, test_set
elif name == 'mnist':
train = FastMNIST('datasets/MNIST', train=True, download=True)
test = FastMNIST('datasets/MNIST', train=False, download=True)
train_indices, valid_indices = get_train_valid_indices(len(train), self.train_val_split_ratio, self.sample_size_cap)
from utils.Custom_Dataset import Custom_Dataset
train_set = Custom_Dataset(train.data[train_indices], train.targets[train_indices], device=self.device)
validation_set = Custom_Dataset(train.data[valid_indices],train.targets[valid_indices] , device=self.device)
test_set = Custom_Dataset(test.data, test.targets, device=self.device)
del train, test
return train_set, validation_set, test_set
elif name == 'cifar10':
'''
from torchvision import transforms
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
'''
train = FastCIFAR10('datasets/cifar', train=True, download=True)#, transform=transform_train)
test = FastCIFAR10('datasets/cifar', train=False, download=True)#, transform=transform_test)
train_indices, valid_indices = get_train_valid_indices(len(train), self.train_val_split_ratio, self.sample_size_cap)
from utils.Custom_Dataset import Custom_Dataset
train_set = Custom_Dataset(train.data[train_indices], train.targets[train_indices], device=self.device)
validation_set = Custom_Dataset(train.data[valid_indices],train.targets[valid_indices] , device=self.device)
test_set = Custom_Dataset(test.data, test.targets, device=self.device)
del train, test
return train_set, validation_set, test_set
elif name == "sst":
import torchtext.data as data
text_field = data.Field(lower=True)
from torch import long as torch_long
label_field = LabelField(dtype = torch_long, sequential=False)
import torchtext.datasets as datasets
train_data, validation_data, test_data = datasets.SST.splits(text_field, label_field, fine_grained=True)
indices_list = powerlaw(list(range(len(train_data))), self.n_participants)
ratios = [len(indices) / len(train_data) for indices in indices_list]
train_datasets = split_torchtext_dataset_ratios(train_data, ratios)
text_field.build_vocab(*(train_datasets + [validation_data, test_data]))
label_field.build_vocab(*(train_datasets + [validation_data, test_data]))
self.args.text_field = text_field
self.args.label_field = label_field
return train_datasets, validation_data, test_data
elif name == 'mr':
import torchtext.data as data
from utils import mydatasets
text_field = data.Field(lower=True)
from torch import long as torch_long
label_field = LabelField(dtype = torch_long, sequential=False)
# label_field = data.Field(sequential=False)
train_data, dev_data = mydatasets.MR.splits(text_field, label_field, root='.data/mr', shuffle=False)
validation_data, test_data = dev_data.split(split_ratio=0.5, random_state = random.seed(1234))
indices_list = powerlaw(list(range(len(train_data))), self.n_participants)
ratios = [len(indices) / len(train_data) for indices in indices_list]
train_datasets = split_torchtext_dataset_ratios(train_data, ratios)
# print(train_data, dir(train_data))
# print((train_datasets[0].examples[0].text))
# print((train_datasets[0].examples[1].text))
# print((train_datasets[0].examples[2].text))
# exit()
text_field.build_vocab( *(train_datasets + [validation_data, test_data] ))
label_field.build_vocab( *(train_datasets + [validation_data, test_data] ))
self.args.text_field = text_field
self.args.label_field = label_field
return train_datasets, validation_data, test_data
elif name == 'imdb':
from torch import long as torch_long
# text_field = Field(tokenize = 'spacy', preprocessing = generate_bigrams) # generate_bigrams takes about 2 minutes
text_field = Field(tokenize = 'spacy')
label_field = LabelField(dtype = torch_long)
dirname = '.data/imdb/aclImdb'
from torch.nn.init import normal_
from torchtext import datasets
train_data, test_data = datasets.IMDB.splits(text_field, label_field) # 25000, 25000 samples each
# use 5000 out of 25000 of test_data as the test_data
test_data, remaining = test_data.split(split_ratio=0.2 ,random_state = random.seed(1234))
# use 5000 out of the remaining 2000 of test_data as valid data
valid_data, remaining = remaining.split(split_ratio=0.25 ,random_state = random.seed(1234))
# train_data, valid_data = train_data.split(split_ratio=self.train_val_split_ratio ,random_state = random.seed(1234))
indices_list = powerlaw(list(range(len(train_data))), self.n_participants)
ratios = [len(indices) / len(train_data) for indices in indices_list]
train_datasets = split_torchtext_dataset_ratios(train_data, ratios)
MAX_VOCAB_SIZE = 25_000
text_field.build_vocab(*(train_datasets + [valid_data, test_data] ), max_size = MAX_VOCAB_SIZE, vectors = "glove.6B.100d", unk_init = normal_)
label_field.build_vocab( *(train_datasets + [valid_data, test_data] ))
# INPUT_DIM = len(text_field.vocab)
# OUTPUT_DIM = 1
# EMBEDDING_DIM = 100
PAD_IDX = text_field.vocab.stoi[text_field.pad_token]
self.args.text_field = text_field
self.args.label_field = label_field
self.args.pad_idx = PAD_IDX
return train_datasets, valid_data, test_data
elif name == 'names':
from utils.load_names import get_train_test
from utils.Custom_Dataset import Custom_Dataset
import torch
from collections import Counter
X_train, y_train, X_test, y_test, reference_dict = get_train_test()
print("X train shape: ", X_train.shape)
print("y train shape: ", y_train.shape)
print("X test shape: ", X_test.shape)
print("y test shape: ", y_test.shape)
from utils.Custom_Dataset import Custom_Dataset
train_set = Custom_Dataset(X_train, y_train)
test_set = Custom_Dataset(X_test, y_test)
return train_set, test_set
from torchvision.datasets import MNIST
class FastMNIST(MNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = self.data.unsqueeze(1).float().div(255)
from torch.nn import ZeroPad2d
pad = ZeroPad2d(2)
self.data = torch.stack([pad(sample.data) for sample in self.data])
self.targets = self.targets.long()
self.data = self.data.sub_(self.data.mean()).div_(self.data.std())
# self.data = self.data.sub_(0.1307).div_(0.3081)
# Put both data and targets on GPU in advance
self.data, self.targets = self.data, self.targets
print('MNIST data shape {}, targets shape {}'.format(self.data.shape, self.targets.shape))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
return img, target
from torchvision.datasets import CIFAR10
class FastCIFAR10(CIFAR10):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Scale data to [0,1]
from torch import from_numpy
self.data = from_numpy(self.data)
self.data = self.data.float().div(255)
self.data = self.data.permute(0, 3, 1, 2)
self.targets = torch.Tensor(self.targets).long()
# https://github.com/kuangliu/pytorch-cifar/issues/16
# https://github.com/kuangliu/pytorch-cifar/issues/8
for i, (mean, std) in enumerate(zip((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))):
self.data[:,i].sub_(mean).div_(std)
# Put both data and targets on GPU in advance
self.data, self.targets = self.data, self.targets
print('CIFAR10 data shape {}, targets shape {}'.format(self.data.shape, self.targets.shape))
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
return img, target
def powerlaw(sample_indices, n_participants, alpha=1.65911332899, shuffle=False):
# the smaller the alpha, the more extreme the division
if shuffle:
random.seed(1234)
random.shuffle(sample_indices)
from scipy.stats import powerlaw
import math
party_size = int(len(sample_indices) / n_participants)
b = np.linspace(powerlaw.ppf(0.01, alpha), powerlaw.ppf(0.99, alpha), n_participants)
shard_sizes = list(map(math.ceil, b/sum(b)*party_size*n_participants))
indices_list = []
accessed = 0
for participant_id in range(n_participants):
indices_list.append(sample_indices[accessed:accessed + shard_sizes[participant_id]])
accessed += shard_sizes[participant_id]
return indices_list
def get_train_valid_indices(n_samples, train_val_split_ratio, sample_size_cap=None):
indices = list(range(n_samples))
random.seed(1111)
random.shuffle(indices)
split_point = int(n_samples * train_val_split_ratio)
train_indices, valid_indices = indices[:split_point], indices[split_point:]
if sample_size_cap is not None:
train_indices = indices[:min(split_point, sample_size_cap)]
return train_indices, valid_indices
def split_torchtext_dataset_ratios(data, ratios):
train_datasets = []
while len(ratios) > 1:
split_ratio = ratios[0] / sum(ratios)
ratios.pop(0)
train_dataset, data = data.split(split_ratio=split_ratio, random_state=random.seed(1234))
train_datasets.append(train_dataset)
train_datasets.append(data)
return train_datasets
def generate_bigrams(x):
n_grams = set(zip(*[x[i:] for i in range(2)]))
for n_gram in n_grams:
x.append(' '.join(n_gram))
return x
'''
def get_df(pos, neg):
data_rows = []
for text in pos:
data_rows.append(['positive', text.rstrip()])
for text in neg:
data_rows.append(['negative', text.rstrip()])
return pd.DataFrame(data=data_rows, columns=['label', 'text'])
def create_data_txts_for_sst(n_participants, dirname='.data/sst'):
train_txt = 'trees/train.txt'
with open(os.path.join(dirname, train_txt), 'r') as file:
train_samples = file.readlines()
all_indices = list(range(len(train_samples)))
random.seed(1111)
n_samples_each = 8000 // 20
sample_indices = random.sample(all_indices, n_samples_each * n_participants)
foldername = "P{}_powerlaw".format(n_participants)
if foldername in os.listdir(dirname):
pass
else:
try:
os.mkdir(os.path.join(dirname, foldername))
except:
pass
indices_list = powerlaw(sample_indices, n_participants)
for i, indices in enumerate(indices_list):
with open(os.path.join(dirname, foldername,'P{}.txt'.format(i)) , 'w') as file:
[file.write(train_samples[index]) for index in indices]
return
def create_powerlaw_csvs(n_participants, dirname, train_df):
# shuffle the train samples
train_df = train_df.sample(frac=1)
n_samples_each = len(train_df) // 20
sample_indices = list(range(n_samples_each * n_participants))
foldername = "P{}_powerlaw".format(n_participants)
foldername = os.path.join(dirname, foldername)
if foldername in os.listdir(dirname):
pass
else:
try:
os.mkdir(foldername)
except:
pass
indices_list = powerlaw(sample_indices, n_participants)
for i, indices in enumerate(indices_list):
sub_df = train_df.iloc[indices]
sub_df.to_csv(os.path.join(foldername,'P{}.csv'.format(i)), index=False)
return
def read_samples(samples_dir):
samples = []
for file in os.listdir(samples_dir):
with open(os.path.join(samples_dir, file), 'r') as line:
samples.append(file.readlines())
return [sample.rstrip() for sample in samplesl]
def create_data_csvs_for_mr(n_participants, dirname='.data/mr'):
pos = 'rt-polaritydata/rt-polarity.pos'
neg = 'rt-polaritydata/rt-polarity.neg'
with open(os.path.join(dirname, pos), 'r', encoding='latin-1') as file:
pos_samples = file.readlines()
with open(os.path.join(dirname, neg), 'r', encoding='latin-1') as file:
neg_samples = file.readlines()
random.seed(1111)
random.shuffle(pos_samples)
random.shuffle(neg_samples)
train, val, test = [], [], []
N = len(pos_samples)
split_points = [4000, (N-4000)//2+4000 ]
train, val, test = np.array_split(pos_samples,split_points)
train_, val_, test_ = np.array_split( neg_samples,split_points)
train_df = get_df(train, train_)
val_df = get_df(val, val_)
test_df = get_df(test, test_)
val_df.to_csv( os.path.join(dirname, 'val.csv') , index=False)
test_df.to_csv( os.path.join(dirname, 'test.csv') , index=False)
create_powerlaw_csvs(n_participants, dirname, train_df)
return
def create_data_csvs_for_IMDB(n_participants, dirname):
train_dir = os.path.join(dirname, 'train')
pos_train_dir = os.path.join(train_dir, 'pos')
neg_train_dir = os.path.join(train_dir, 'neg')
test_dir = os.path.join(dirname, 'test')
pos_test_dir = os.path.join(test_dir, 'pos')
neg_test_dir = os.path.join(test_dir, 'neg')
pos_samples = read_samples(pos_train_dir) + read_samples(pos_test_dir)
neg_samples = read_samples(neg_train_dir) + read_samples(neg_test_dir)
N_pos, N_neg = len(pos_samples), len(neg_samples)
split_points = [int(N_pos*0.8), int(N_pos*0.9) ]
train, val, test = np.array_split(pos_samples, split_points)
split_points = [int(N_neg*0.8), int(N_pos*0.9) ]
train_, val_, test_ = np.array_split(neg_samples,split_points)
train_df = get_df(train, train_)
val_df = get_df(val, val_)
test_df = get_df(test, test_)
val_df.to_csv( os.path.join(dirname, 'val.csv') , index=False)
test_df.to_csv( os.path.join(dirname, 'test.csv') , index=False)
create_powerlaw_csvs(n_participants, dirname, train_df)
return
'''
| StarcoderdataPython |
3285915 | # Modify from https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/evaluate.py
# coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT zero-shot evaluation."""
import math
import torch
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
sys.path.append(dir_path + "/../../../3rdparty/Megatron-LM")
from megatron import get_args
from megatron.initialize import initialize_megatron
from megatron import get_args
from megatron import print_rank_0, is_last_rank
from megatron import get_tokenizer
from megatron import mpu
from megatron.model import GPTModel
from megatron.utils import get_ltor_masks_and_position_ids, unwrap_model
from megatron.p2p_communication import recv_forward
from tasks.finetune_utils import build_data_loader
from tasks.zeroshot_gpt.datasets import build_dataset
# These are needed to unwrap the model, would be nice to put these in megatron.utils if possible?
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from megatron.model import DistributedDataParallel as LocalDDP
from megatron.model import Float16Module
from examples.pytorch.gpt.utils.gpt import GPT
def get_tasks_args(parser):
"""Provide extra arguments required for tasks."""
group = parser.add_argument_group(title='tasks')
group.add_argument('--task', type=str, required=True,
help='Task name.')
group.add_argument('--epochs', type=int, default=None,
help='Number of finetunning epochs. Zero results in '
'evaluation only.')
group.add_argument('--pretrained-checkpoint', type=str, default=None,
help='Pretrained checkpoint used for finetunning.')
group.add_argument('--keep-last', action='store_true',
help='Keep the last batch (maybe incomplete) in'
'the data loader')
group.add_argument('--train-data', nargs='+', default=None,
help='Whitespace separated paths or corpora names '
'for training.')
group.add_argument('--valid-data', nargs='*', default=None,
help='path(s) to the validation data.')
group.add_argument('--overlapping-eval', type=int, default=32,
help='Sliding window for overlapping evaluation.')
group.add_argument('--strict-lambada', action='store_true',
help='Use more difficult formulation of lambada.')
# Retriever args
group.add_argument('--qa-data-dev', type=str, default=None,
help='Path to the QA dataset dev file.')
group.add_argument('--qa-data-test', type=str, default=None,
help='Path to the QA dataset test file.')
# Faiss arguments for retriever
group.add_argument('--faiss-use-gpu', action='store_true',
help='Whether create the FaissMIPSIndex on GPU')
group.add_argument('--faiss-match', type=str, default='string', \
choices=['regex', 'string'], help="Answer matching '\
'logic type")
group.add_argument('--faiss-topk-retrievals', type=int, default=100,
help='Number of blocks to use as top-k during retrieval')
# finetune for retriever
group.add_argument('--eval-micro-batch-size', type=int, default=None,
help='Eval Batch size per model instance (local batch '
'size). Global batch size is local batch size '
'times data parallel size.')
group.add_argument('--train-with-neg', action='store_true',
help='Whether to use negative examples during model '
'training')
group.add_argument('--train-hard-neg', type=int, default=0,
help='Number of hard negative exmaples to use during '
'training')
# parameters for Av.rank validation method
# Following options/arguments have been taken directly from DPR codebase
group.add_argument('--val-av-rank-hard-neg', type=int, default=30,
help='Av.rank validation: how many hard negatives to'
' take from each question pool')
group.add_argument('--val-av-rank-other-neg', type=int, default=30,
help='Av.rank validation: how many other negatives to'
' take from each question pool')
group.add_argument('--ckpt-path', type=str, required=True,
help='c model checkpoint path for FasterTransformer.')
group.add_argument('--lib-path', type=str, required=True,
help='library path of FT op.')
group.add_argument('--beam_width', type=int, required=True,
help='beam width for beam search.')
group.add_argument('--top_k', type=int, required=True,
help='top k for sampling.')
group.add_argument('--top_p', type=float, required=True,
help='top p for sampling.')
return parser
def get_model_provider(eval_metric):
"""Based on evaluation metric set the parallel-output flag and
return the model provider."""
def model_provider(pre_process=True, post_process=True):
"""Build the model."""
if eval_metric == 'loss':
parallel_output = True
elif eval_metric == 'accuracy':
parallel_output = False
else:
raise NotImplementedError('output type for {} evaluation metric '
'is not supported.'.format(eval_metric))
print_rank_0('building GPT model ...')
model = GPTModel(num_tokentypes=0, parallel_output=parallel_output,
pre_process=pre_process, post_process=post_process)
return model
return model_provider
def process_batch(batch):
"""Process batch and produce inputs for the model."""
args = get_args()
tokenizer = get_tokenizer()
loss_mask = batch['pad_mask'].long().cuda().contiguous().byte()
tokens_ = batch['text'].long().cuda().contiguous()
labels = tokens_[:, 1:].contiguous()
tokens = tokens_[:, :-1].contiguous()
# Get the masks and postition ids.
attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
tokens,
tokenizer.eod,
args.reset_position_ids,
args.reset_attention_mask,
args.eod_mask_loss)
return tokens, labels, attention_mask, position_ids, loss_mask
def forward_step(batch, model, eval_metric, args):
"""Forward step."""
# Get the batch.
tokens, labels, attention_mask, position_ids, loss_mask = process_batch(
batch)
# Tell the model what our actual batch size will be
args = get_args()
args.micro_batch_size = len(labels)
input_tensor = recv_forward()
# Forward pass through the model.
unwrapped_model = unwrap_model(
model, (torchDDP, LocalDDP, Float16Module))
unwrapped_model.set_input_tensor(input_tensor)
start_lengths = torch.sum(tokens != model.end_id, axis=1).contiguous().int()
input_len = torch.max(start_lengths).contiguous().int()
output = []
for i in range(input_len):
tmp_length = torch.ones(args.micro_batch_size) * (i + 1)
tmp_length = tmp_length.cuda().int()
tmp_start_lengths = torch.min(tmp_length, start_lengths).contiguous()
input_ids = tokens[:,:(i + 1)].contiguous().int()
output_id = model(input_ids,
tmp_start_lengths,
1,
args.beam_width,
args.top_k,
args.top_p,
0.0,
1.0,
1.0,
1.0,
0)
output.append(output_id[:,0,-1].reshape([-1, 1]))
output = torch.cat((output), 1)
padding = torch.ones(output.shape[0], labels.shape[1] - output.shape[1]).cuda().int()
outputs = torch.cat((output, padding), 1)
if mpu.is_pipeline_last_stage():
# For loss, return the unreduced loss.
if eval_metric == 'loss':
losses = mpu.vocab_parallel_cross_entropy(
output.contiguous().float(), labels.contiguous())
loss = torch.sum(
losses.view(-1) * loss_mask.contiguous().view(-1).float())
return loss
# For accuracy, return the number of correctly predicted samples.
if eval_metric == 'accuracy':
correct = (outputs == labels).float()
correct[(1 - loss_mask).bool()] = 1
correct = correct.prod(-1)
return correct.sum()
raise NotImplementedError('forward method for evaluation metric {} '
'is not implemented.'.format(eval_metric))
return None
def evaluate(data_loader, model, eval_metric, args):
"""Evaluation."""
args = get_args()
# Turn on evaluation mode which disables dropout.
model.eval()
total_output = 0.0
with torch.no_grad():
# For all the batches in the dataset.
for iteration, batch in enumerate(data_loader):
if iteration % args.log_interval == 0:
print_rank_0('> working on iteration: {}'.format(iteration))
# Forward evaluation.
output = forward_step(batch, model, eval_metric, args)
# Reduce across processes.
if mpu.is_pipeline_last_stage():
torch.distributed.all_reduce(output,
group=mpu.get_data_parallel_group())
total_output += output
return total_output
def evaluate_and_print_results(task, data_loader, model, eval_metric, args):
"""Evaluate and print results on screen."""
# Evaluate and get results.
output = evaluate(data_loader, model, eval_metric, args)
string = ' validation results on {} | '.format(task)
if is_last_rank():
if eval_metric == 'loss':
num_tokenized_tokens = data_loader.dataset.num_tokenized_tokens
num_original_tokens = data_loader.dataset.num_original_tokens
val_loss = output / (num_tokenized_tokens - 1)
ppl = math.exp(min(20, val_loss))
token_ratio = (num_tokenized_tokens - 1) / (num_original_tokens - 1)
adjusted_ppl = math.exp(min(20, val_loss * token_ratio))
string += 'avg loss: {:.4E} | '.format(val_loss)
string += 'ppl: {:.4E} | '.format(ppl)
string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
string += 'token ratio: {} |'.format(token_ratio)
elif eval_metric == 'accuracy':
num_examples = len(data_loader.dataset)
acc = output / num_examples
string += 'number correct: {:.4E} | '.format(output)
string += 'total examples: {:.4E} | '.format(num_examples)
string += 'avg accuracy: {:.4E}'.format(acc)
else:
raise NotImplementedError('evaluation method for {} metric is not '
'implemented yet.'.format(eval_metric))
length = len(string) + 1
print('-' * length)
print(string)
print('-' * length)
def main():
"""Main program."""
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for text generation.")
exit()
if args.task == 'LAMBADA':
eval_metric = 'accuracy'
elif args.task == 'WIKITEXT103':
eval_metric = 'loss'
else:
raise NotImplementedError('{} task is not implemented.'.format(
args.task))
tokenzier = get_tokenizer()
# Set up model and load checkpoint.
model = GPT(args.num_attention_heads, (int)(args.hidden_size / args.num_attention_heads),
args.padded_vocab_size, tokenzier.eod, tokenzier.eod,
args.num_layers, args.seq_length, 1, 1, "lib/libth_gpt.so")
if not model.load(ckpt_path=args.ckpt_path):
print("[ERROR] Checkpoint file not found at {}.".format(args.ckpt_path))
exit(-1)
if args.fp16:
assert not args.bf16
model.half()
if args.bf16:
assert not args.fp16
model.bfloat16()
# Data stuff.
dataset = build_dataset(args.task)
dataloader = build_data_loader(dataset, args.micro_batch_size,
args.num_workers, drop_last=False)
# Run evaluation.
evaluate_and_print_results(args.task, dataloader, model, eval_metric, args)
print_rank_0('done :-)')
if __name__ == '__main__':
initialize_megatron(extra_args_provider=get_tasks_args)
args = get_args()
if args.num_layers_per_virtual_pipeline_stage is not None:
print("Interleaved pipeline schedule is not yet supported for downstream tasks.")
exit()
main() | StarcoderdataPython |
3229030 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from colour.utilities import CaseInsensitiveMapping
from .aces import (ACES_2065_1_COLOURSPACE, ACES_CC_COLOURSPACE,
ACES_CCT_COLOURSPACE, ACES_CG_COLOURSPACE,
ACES_PROXY_COLOURSPACE)
from .aces_it import ACES_RICD
from .adobe_rgb_1998 import ADOBE_RGB_1998_COLOURSPACE
from .adobe_wide_gamut_rgb import ADOBE_WIDE_GAMUT_RGB_COLOURSPACE
from .apple_rgb import APPLE_RGB_COLOURSPACE
from .arri_alexa_wide_gamut import ALEXA_WIDE_GAMUT_COLOURSPACE
from .best_rgb import BEST_RGB_COLOURSPACE
from .beta_rgb import BETA_RGB_COLOURSPACE
from .cie_rgb import CIE_RGB_COLOURSPACE
from .canon_cinema_gamut import CINEMA_GAMUT_COLOURSPACE
from .color_match_rgb import COLOR_MATCH_RGB_COLOURSPACE
from .dcdm_xyz import DCDM_XYZ_COLOURSPACE
from .dci_p3 import DCI_P3_COLOURSPACE, DCI_P3_P_COLOURSPACE
from .display_p3 import DISPLAY_P3_COLOURSPACE
from .p3_d65 import P3_D65_COLOURSPACE
from .don_rgb_4 import DON_RGB_4_COLOURSPACE
from .dji_dgamut import DJI_D_GAMUT_COLOURSPACE
from .eci_rgb_v2 import ECI_RGB_V2_COLOURSPACE
from .ekta_space_ps5 import EKTA_SPACE_PS_5_COLOURSPACE
from .fujifilm_f_gamut import F_GAMUT_COLOURSPACE
from .filmlight_egamut import FILMLIGHT_E_GAMUT_COLOURSPACE
from .gopro import PROTUNE_NATIVE_COLOURSPACE
from .itur_bt_470 import BT470_525_COLOURSPACE, BT470_625_COLOURSPACE
from .itur_bt_709 import BT709_COLOURSPACE
from .itur_bt_2020 import BT2020_COLOURSPACE
from .max_rgb import MAX_RGB_COLOURSPACE
from .pal_secam import PAL_SECAM_COLOURSPACE
from .red import (RED_COLOR_COLOURSPACE, RED_COLOR_2_COLOURSPACE,
RED_COLOR_3_COLOURSPACE, RED_COLOR_4_COLOURSPACE,
DRAGON_COLOR_COLOURSPACE, DRAGON_COLOR_2_COLOURSPACE,
RED_WIDE_GAMUT_RGB_COLOURSPACE)
from .rimm_romm_rgb import (ROMM_RGB_COLOURSPACE, RIMM_RGB_COLOURSPACE,
ERIMM_RGB_COLOURSPACE, PROPHOTO_RGB_COLOURSPACE)
from .russell_rgb import RUSSELL_RGB_COLOURSPACE
from .sharp import SHARP_RGB_COLOURSPACE
from .smpte_240m import SMPTE_240M_COLOURSPACE
from .smpte_c import SMPTE_C_COLOURSPACE
from .ntsc import NTSC_1953_COLOURSPACE, NTSC_1987_COLOURSPACE
from .sony import (S_GAMUT_COLOURSPACE, S_GAMUT3_COLOURSPACE,
S_GAMUT3_CINE_COLOURSPACE)
from .srgb import sRGB_COLOURSPACE
from .panasonic_v_gamut import V_GAMUT_COLOURSPACE
from .xtreme_rgb import XTREME_RGB_COLOURSPACE
RGB_COLOURSPACES = CaseInsensitiveMapping({
ACES_2065_1_COLOURSPACE.name: ACES_2065_1_COLOURSPACE,
ACES_CC_COLOURSPACE.name: ACES_CC_COLOURSPACE,
ACES_CCT_COLOURSPACE.name: ACES_CCT_COLOURSPACE,
ACES_PROXY_COLOURSPACE.name: ACES_PROXY_COLOURSPACE,
ACES_CG_COLOURSPACE.name: ACES_CG_COLOURSPACE,
ADOBE_RGB_1998_COLOURSPACE.name: ADOBE_RGB_1998_COLOURSPACE,
ADOBE_WIDE_GAMUT_RGB_COLOURSPACE.name: ADOBE_WIDE_GAMUT_RGB_COLOURSPACE,
APPLE_RGB_COLOURSPACE.name: APPLE_RGB_COLOURSPACE,
ALEXA_WIDE_GAMUT_COLOURSPACE.name: ALEXA_WIDE_GAMUT_COLOURSPACE,
BEST_RGB_COLOURSPACE.name: BEST_RGB_COLOURSPACE,
BETA_RGB_COLOURSPACE.name: BETA_RGB_COLOURSPACE,
BT470_525_COLOURSPACE.name: BT470_525_COLOURSPACE,
BT470_625_COLOURSPACE.name: BT470_625_COLOURSPACE,
BT709_COLOURSPACE.name: BT709_COLOURSPACE,
BT2020_COLOURSPACE.name: BT2020_COLOURSPACE,
CIE_RGB_COLOURSPACE.name: CIE_RGB_COLOURSPACE,
CINEMA_GAMUT_COLOURSPACE.name: CINEMA_GAMUT_COLOURSPACE,
COLOR_MATCH_RGB_COLOURSPACE.name: COLOR_MATCH_RGB_COLOURSPACE,
DCDM_XYZ_COLOURSPACE.name: DCDM_XYZ_COLOURSPACE,
DCI_P3_COLOURSPACE.name: DCI_P3_COLOURSPACE,
DCI_P3_P_COLOURSPACE.name: DCI_P3_P_COLOURSPACE,
DISPLAY_P3_COLOURSPACE.name: DISPLAY_P3_COLOURSPACE,
DJI_D_GAMUT_COLOURSPACE.name: DJI_D_GAMUT_COLOURSPACE,
DON_RGB_4_COLOURSPACE.name: DON_RGB_4_COLOURSPACE,
ECI_RGB_V2_COLOURSPACE.name: ECI_RGB_V2_COLOURSPACE,
EKTA_SPACE_PS_5_COLOURSPACE.name: EKTA_SPACE_PS_5_COLOURSPACE,
FILMLIGHT_E_GAMUT_COLOURSPACE.name: FILMLIGHT_E_GAMUT_COLOURSPACE,
PROTUNE_NATIVE_COLOURSPACE.name: PROTUNE_NATIVE_COLOURSPACE,
MAX_RGB_COLOURSPACE.name: MAX_RGB_COLOURSPACE,
P3_D65_COLOURSPACE.name: P3_D65_COLOURSPACE,
PAL_SECAM_COLOURSPACE.name: PAL_SECAM_COLOURSPACE,
RED_COLOR_COLOURSPACE.name: RED_COLOR_COLOURSPACE,
RED_COLOR_2_COLOURSPACE.name: RED_COLOR_2_COLOURSPACE,
RED_COLOR_3_COLOURSPACE.name: RED_COLOR_3_COLOURSPACE,
RED_COLOR_4_COLOURSPACE.name: RED_COLOR_4_COLOURSPACE,
RED_WIDE_GAMUT_RGB_COLOURSPACE.name: RED_WIDE_GAMUT_RGB_COLOURSPACE,
DRAGON_COLOR_COLOURSPACE.name: DRAGON_COLOR_COLOURSPACE,
DRAGON_COLOR_2_COLOURSPACE.name: DRAGON_COLOR_2_COLOURSPACE,
ROMM_RGB_COLOURSPACE.name: ROMM_RGB_COLOURSPACE,
RIMM_RGB_COLOURSPACE.name: RIMM_RGB_COLOURSPACE,
ERIMM_RGB_COLOURSPACE.name: ERIMM_RGB_COLOURSPACE,
F_GAMUT_COLOURSPACE.name: F_GAMUT_COLOURSPACE,
PROPHOTO_RGB_COLOURSPACE.name: PROPHOTO_RGB_COLOURSPACE,
RUSSELL_RGB_COLOURSPACE.name: RUSSELL_RGB_COLOURSPACE,
SHARP_RGB_COLOURSPACE.name: SHARP_RGB_COLOURSPACE,
SMPTE_240M_COLOURSPACE.name: SMPTE_240M_COLOURSPACE,
SMPTE_C_COLOURSPACE.name: SMPTE_C_COLOURSPACE,
NTSC_1953_COLOURSPACE.name: NTSC_1953_COLOURSPACE,
NTSC_1987_COLOURSPACE.name: NTSC_1987_COLOURSPACE,
S_GAMUT_COLOURSPACE.name: S_GAMUT_COLOURSPACE,
S_GAMUT3_COLOURSPACE.name: S_GAMUT3_COLOURSPACE,
S_GAMUT3_CINE_COLOURSPACE.name: S_GAMUT3_CINE_COLOURSPACE,
sRGB_COLOURSPACE.name: sRGB_COLOURSPACE,
V_GAMUT_COLOURSPACE.name: V_GAMUT_COLOURSPACE,
XTREME_RGB_COLOURSPACE.name: XTREME_RGB_COLOURSPACE
})
RGB_COLOURSPACES.__doc__ = """
Aggregated *RGB* colourspaces.
RGB_COLOURSPACES : CaseInsensitiveMapping
Aliases:
- 'aces': ACES_2065_1_COLOURSPACE.name
- 'adobe1998': ADOBE_RGB_1998_COLOURSPACE.name
- 'prophoto': PROPHOTO_RGB_COLOURSPACE.name
"""
# yapf: disable
RGB_COLOURSPACES['aces'] = (
RGB_COLOURSPACES[ACES_2065_1_COLOURSPACE.name])
RGB_COLOURSPACES['adobe1998'] = (
RGB_COLOURSPACES[ADOBE_RGB_1998_COLOURSPACE.name])
RGB_COLOURSPACES['prophoto'] = (
RGB_COLOURSPACES[PROPHOTO_RGB_COLOURSPACE.name])
# yapf: enable
__all__ = ['ACES_RICD']
__all__ += ['RGB_COLOURSPACES']
__all__ += [
'ACES_2065_1_COLOURSPACE', 'ACES_CC_COLOURSPACE', 'ACES_CCT_COLOURSPACE',
'ACES_PROXY_COLOURSPACE', 'ACES_CG_COLOURSPACE',
'ADOBE_RGB_1998_COLOURSPACE', 'ADOBE_WIDE_GAMUT_RGB_COLOURSPACE',
'ALEXA_WIDE_GAMUT_COLOURSPACE', 'APPLE_RGB_COLOURSPACE',
'BEST_RGB_COLOURSPACE', 'BETA_RGB_COLOURSPACE', 'BT470_525_COLOURSPACE',
'BT470_625_COLOURSPACE', 'BT709_COLOURSPACE', 'BT2020_COLOURSPACE',
'CIE_RGB_COLOURSPACE', 'CINEMA_GAMUT_COLOURSPACE',
'COLOR_MATCH_RGB_COLOURSPACE', 'DCDM_XYZ_COLOURSPACE',
'DCI_P3_COLOURSPACE', 'DCI_P3_P_COLOURSPACE', 'DISPLAY_P3_COLOURSPACE',
'DJI_D_GAMUT_COLOURSPACE', 'DON_RGB_4_COLOURSPACE',
'ECI_RGB_V2_COLOURSPACE', 'EKTA_SPACE_PS_5_COLOURSPACE',
'FILMLIGHT_E_GAMUT_COLOURSPACE', 'PROTUNE_NATIVE_COLOURSPACE',
'MAX_RGB_COLOURSPACE', 'P3_D65_COLOURSPACE', 'PAL_SECAM_COLOURSPACE',
'RED_COLOR_COLOURSPACE', 'RED_COLOR_2_COLOURSPACE',
'RED_COLOR_3_COLOURSPACE', 'RED_COLOR_4_COLOURSPACE',
'DRAGON_COLOR_COLOURSPACE', 'DRAGON_COLOR_2_COLOURSPACE',
'RED_WIDE_GAMUT_RGB_COLOURSPACE', 'ROMM_RGB_COLOURSPACE',
'RIMM_RGB_COLOURSPACE', 'ERIMM_RGB_COLOURSPACE',
'PROPHOTO_RGB_COLOURSPACE', 'RUSSELL_RGB_COLOURSPACE',
'SHARP_RGB_COLOURSPACE', 'SMPTE_240M_COLOURSPACE', 'SMPTE_C_COLOURSPACE',
'NTSC_1953_COLOURSPACE', 'NTSC_1987_COLOURSPACE', 'S_GAMUT_COLOURSPACE',
'S_GAMUT3_COLOURSPACE', 'S_GAMUT3_CINE_COLOURSPACE', 'sRGB_COLOURSPACE',
'V_GAMUT_COLOURSPACE', 'XTREME_RGB_COLOURSPACE', 'F_GAMUT_COLOURSPACE'
]
| StarcoderdataPython |
3375978 | import asyncio
from .engine.fanhuaji import FanhuajiEngine
from .engine.opencc import OpenCC as OpenCCEngine
class Converter():
def __init__(self, engine, converter):
self.engine = engine
self.converter = converter
def convert(self, content: str) -> str:
"""轉換 epub 內文
Args:
content (str): epub 內文字串
Returns:
[str]: 轉換後的內文
"""
converted_content = None
if not content:
return None
# opencc 轉換
if self.engine == 'opencc':
opencc = OpenCCEngine(self.converter)
converted_content = opencc.convert(content)
payload = {
'text': content,
'converter': self.converter
}
# 繁化姬同步轉換
if self.engine == 'fanhuaji':
fanhuaji = FanhuajiEngine()
converted_content = fanhuaji.convert(**payload)
# 繁化姬異步轉換
if self.engine == 'fanhuaji_async':
fanhuaji = FanhuajiEngine()
converted_content = asyncio.get_event_loop().run_until_complete(
fanhuaji.async_convert(**payload))
return converted_content
| StarcoderdataPython |
1683287 | <gh_stars>0
import jwt
# TODO : move to environment variable
JWT_SECRET_KEY = '<KEY>'
def jwt_encode(data, expire_timestamp=None):
if expire_timestamp is not None:
data['exp'] = expire_timestamp
return jwt.encode(data, JWT_SECRET_KEY, algorithm='HS512')
def jwt_decode(token):
return jwt.decode(token, JWT_SECRET_KEY) | StarcoderdataPython |
20800 | """
Implements the base Parser as a Context Manager.
"""
from abc import ABCMeta, abstractmethod
class Parser:
"""
Base abstract class for a parser.
Further parsers should inherit from this and override the abstract parse()
method.
"""
__metaclass__ = ABCMeta
def __init__(self, filename):
"""
Store the filename for use by enter.
:param filename: The path to the file to be parsed
:type filename: string
"""
self._filename = filename
self.file = None
self.fitting_problem = None
def __enter__(self):
"""
Called when used as a context manager.
Opens the file ready for parsing.
"""
self.file = open(self._filename, 'r')
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
Called when used as a context manager.
Closes the file.
:param exc_type: Used if an exception occurs. Contains the
exception type.
:type exc_type: type
:param exc_value: Used if an exception occurs. Contains the exception
value.
:type exc_value: Exception
:param traceback: Used if an exception occurs. Contains the exception
traceback.
:type traceback: traceback
"""
try:
self.file.close()
except AttributeError:
pass
@abstractmethod
def parse(self):
"""
Parse the file into a FittingProblem.
:returns: The parsed problem
:rtype: FittingProblem
"""
raise NotImplementedError
| StarcoderdataPython |
107474 | <reponame>amachefe/XAc
# Experiments going on here
from bitcoinrpc.authproxy import AuthServiceProxy
import json
access = AuthServiceProxy("http://rpcusername:rpcpassword@127.0.0.1:8332")
def reconcile_transaction(txid, amount, type):
raw_transaction = access.getrawtransaction(txid, 1)
vector_out = raw_transaction['vout']
for utxo in vector_out:
index = utxo['n']
utxo_detail = access.gettxout(txid, index)
if utxo_detail == "None":
unspent = False
elif 'bestblock' in utxo_detail:
unspent = True
print(utxo_detail)
| StarcoderdataPython |
1657500 | <reponame>danielboloc/postgap<filename>postgap_html_report.py<gh_stars>1-10
#! /usr/bin/env python
"""
Copyright [1999-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
"""
import sys
import argparse
from argparse import RawTextHelpFormatter
import os
import csv
from jinja2 import Template
import requests
def main():
#get options
options = get_options()
#assert that files exists
assert os.path.exists(options.result_file), "result file " + options.result_file + " can't be found."
assert os.path.exists(options.template), "HTML template file " + options.template + " can't be found."
#get top 10 genes
top_10_genes, top_10_snps = get_top_10_genes_snps(options.result_file)
#get top 10 pathways
top_10_pathways = get_top_10_pathways(top_10_genes)
#load template and render the html file out
template_file = open(options.template, 'r')
template_data = str(template_file.read())
t = Template(template_data)
output_html=open(options.output, "w+")
output_html.write(t.render(gene_list=top_10_genes, snp_list=top_10_snps, pathway_list=top_10_pathways))
output_html.close
def get_options():
parser = argparse.ArgumentParser(description="Creates a HTML report based on POSTGAP (--output2) results", formatter_class = RawTextHelpFormatter)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument('--output', help='Name of the HTML output file', required=True)
requiredNamed.add_argument('--template', help='Name of the HTML template file', required=True)
requiredNamed.add_argument('--result_file', help='Name of the POSTGAP (--output2) results file', required=True)
options = parser.parse_args()
return options
def get_top_10_genes_snps(result_file):
top_10_genes = [[0 for x in range(4)] for y in range(10)]
top_10_snps = [[0 for x in range(4)] for y in range(10)]
source_file = open(result_file, 'r')
reader = csv.reader(source_file, delimiter='\t')
for row in reader:
gene = row[0]
cluster = row[1].replace('GWAS_Cluster_', '')
snp = row[2]
snp_gene_tissue_posterior = row[3]
tissue = row[4]
gene_cluster_tissue_posterior = row[5]
for i in range(10):
if top_10_genes[i][0] != gene or top_10_genes[i][1] != cluster or top_10_genes[i][2] != tissue:
if top_10_genes[i][3] < gene_cluster_tissue_posterior:
item = [gene, cluster, tissue, gene_cluster_tissue_posterior]
top_10_genes.insert(i, item)
del top_10_genes[-1]
break
for i in range(10):
if top_10_snps[i][0] != snp or top_10_snps[i][1] != gene or top_10_snps[i][2] != tissue:
if top_10_snps[i][3] < snp_gene_tissue_posterior:
item = [snp, gene, tissue, snp_gene_tissue_posterior]
top_10_snps.insert(i, item)
del top_10_snps[-1]
break
return top_10_genes, top_10_snps
def get_top_10_pathways(top_10_genes):
#get the list of genes
gene_list = []
for gene in top_10_genes:
if not gene[0] in gene_list:
gene_list.append(gene[0])
#call reactome
genes_to_analyse = '\n'.join([str(i) for i in gene_list])
headers = {'Content-Type': 'text/plain',}
res = requests.post('https://reactome.org/AnalysisService/identifiers/projection/?pageSize=10&page=1', headers=headers, data=genes_to_analyse)
pathways_jdata = res.json()
top_10_pathways = []
for i in range(10):
pathway_stdi = pathways_jdata['pathways'][i]['stId']
pathway_name = pathways_jdata['pathways'][i]['name']
pathway_score = 1 - float(pathways_jdata['pathways'][i]['entities']['fdr'])
item = [pathway_stdi, pathway_name, pathway_score]
top_10_pathways.insert(i, item)
return top_10_pathways
if __name__ == "__main__":
main() | StarcoderdataPython |
1640889 | #!/usr/bin/python
import os
import base64
import json
import sys
import logging
import logging.handlers
import subprocess
import time
import re
import argparse
operation = "keyvault_helper_module"
script_path = os.path.realpath(__file__)
script_directory = os.path.dirname(script_path)
log_path = os.path.join(script_directory, '{0}.log'.format(operation))
log_level = logging.DEBUG
resolve_lock_err_cmd =""
class OperationFailed(Exception):
def __init__(self, operation, message):
self.operation = operation
self.message = message
def get_logger(logger_name, logger_path, log_level):
'''Returns a properly formatted logger object that uses a rotating file handler'''
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
logFormatter = logging.Formatter('%(asctime)s [%(levelname)s] - %(message)s')
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(log_level)
consoleHandler.setFormatter(logFormatter)
fileHandler = logging.handlers.RotatingFileHandler(logger_path, maxBytes=1024 * 1024, backupCount=2)
fileHandler.setLevel(log_level)
fileHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler)
return logger
log = get_logger(operation, log_path, log_level)
def execute_command(command_list, raise_exception_on_error=True, available_retry=1, verbose=False):
'''Executes the specified command using the supplied parameters'''
status = 1
retrycount = 0
while (status != 0 and retrycount < available_retry):
try:
process = subprocess.Popen(command_list, bufsize=4096, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True)
while process.poll() is None:
time.sleep(10)
except OSError as os_err:
log.error('{0}:{1}'.format(os_err.errno, os_err.strerror))
process.kill()
except ValueError:
log.error('Invalid arguments:{0}'.format(command_list))
time.sleep(30)
process.kill()
except IOError as io_err:
log.error("IO Error: {0}:{1}".format(io_err.errno, io_err.strerror))
if "dpkg: error: dpkg status database is locked by another process" in e.strerror:
process.kill()
except Exception as e:
log.error("Unexpected error:{0}".format(sys.exc_info()[0]))
log.error("error msg: {0}".format(e.message))
process.kill()
finally:
output, error = process.communicate()
status = process.returncode
if verbose:
log.debug(
'Command List: {0}{1}Status: {2}{1}Output: {3}{1}Error: {4}{1}'.format(command_list, os.linesep, status,
output.strip(),
error.strip()))
retrycount += 1
if status != 0:
log.error(
'Command List: {0}{1}Status: {2}{1}Output: {3}{1}Error: {4}{1}'.format(command_list, os.linesep,
status, output.strip(),
error.strip()))
if "Could not get lock /var/lib/dpkg/lock" in error:
execute_command([resolve_lock_err_cmd], available_retry=3)
return status, output, error
class Result(object):
success = False
error_code = 0
error_msg = ""
data = None
info_msg = ""
def __init__(self, success=True, error_msg="", info_msg="", data={}):
self.success = success
self.error_msg = error_msg
self.info_msg = info_msg
self.data = data
def setSuccess(self, success):
self.success = success
def IsSuccess(self):
return self.success
def setErrorMessage(self, error_msg):
self.error_msg += error_msg +'\n'
if self.success:
self.success = False
def getErrorMessage(self):
return self.error_msg
def setInfoMessage(self, info_msg):
self.info_msg += info_msg + '\n'
def getInfoMessage(self):
return self.info_msg
def addData(self, name, value):
self.data[name] = value
def getData(self):
return self.data
def toString(self):
return "Info:\n{0}\nData:\n{1} Error:\n{0}".format(self.info_msg, self.data, self.error_msg)
def parse_output(output):
result = Result()
error_token = "error".encode('utf-8')
info_token = "info".encode('utf-8')
data_token = "data".encode('utf-8')
output_lines = output.splitlines()
num_lines_in_parsed_output = len(output_lines)
result.setSuccess(True)
for index in range(0, num_lines_in_parsed_output):
line_info = output_lines[index].decode('utf-8').split()
first_token = line_info[0].rstrip(':').encode('utf-8')
data_pattern = re.compile('(data:)')
searchStr = output_lines[index].encode('utf-8')
if "id" in searchStr:
value_pattern = re.compile(".*\"id\": \"(.*)\",")
value_result = re.search(value_pattern, searchStr)
if value_result:
result.addData("id", value_result.groups()[0])
if error_token in first_token:
result.setErrorMessage(output_lines[index].lstrip("error:").encode('utf-8'))
#TODO: Change this to use regex to collect data accurately
elif data_token in first_token:
data_pattern = re.compile('(data:)')
searchStr = output_lines[index].encode('utf-8')
if "id" in searchStr:
value_pattern = re.compile(r'"([^"]*)"')
value_result = re.search(value_pattern, searchStr)
if value_result:
result.addData("id", value_result.groups()[0])
elif info_token in first_token:
result.setErrorMessage(output_lines[index].lstrip("error:").encode('utf-8'))
return result
def parse_result(output, error):
p_output = parse_output(output=output)
p_error = parse_output(output=error)
return Result(p_error.IsSuccess(), p_error.getErrorMessage(), p_output.getInfoMessage(), p_output.getData())
def set_subscription(subscription_id):
set_azure_subscription = "az account set --subscription {0}".format(subscription_id)
return_error_code = 0
status, output, error = execute_command(set_azure_subscription)
parsed_result = parse_result(output,error)
parsed_result.error_code = status
if not parsed_result.IsSuccess():
raise OperationFailed(set_azure_subscription, parsed_result.getErrorMessage())
def create_resource_group(resource_group_name, region):
create_resource_group = "az group create -n '{0}' -l '{1}'".format(resource_group_name, region)
return_error_code = 0
status, output, error = execute_command(create_resource_group)
parsed_result = parse_result(output,error)
parsed_result.error_code = status
if not parsed_result.IsSuccess():
raise OperationFailed(create_resource_group, parsed_result.getErrorMessage())
def create_key_vault(keyvault_name, resource_group_name, region):
create_keyvault = "az keyvault create --name '{0}' --resource-group '{1}' --location '{2}' --enabled-for-deployment '{3}' --enabled-for-template-deployment '{3}'".format(keyvault_name, resource_group_name, region, "true")
show_keyvault = "az keyvault show --name '{0}'".format(keyvault_name)
status, output, error = execute_command(create_keyvault)
parsed_result = parse_result(output, error)
parsed_result.error_code = status
if not parsed_result.success:
kv_exists_pattern = re.compile('\s[vV]ault\s[\w\d]+\salready\sexists\s.*?')
kv_exists_result = re.search(kv_exists_pattern, parsed_result.error_msg)
if(kv_exists_result):
parsed_result.setSuccess(True)
status1, output1, error1 = execute_command(show_keyvault)
show_parsed_result = parse_result(output1, error1)
parsed_result.error_code = status1
return parsed_result.data["id"]
else:
raise OperationFailed(create_keyvault, parsed_result.error_msg)
return parsed_result.data["id"]
def upload_secret(resource_group_name, region, keyvault_name, secret, subscription, certificate_name):
set_keyvault_secret = "az keyvault secret set --vault-name '{0}' --name '{1}' --value '{2}'".format(keyvault_name, certificate_name, secret)
#enable_keyvault_for_deployment = "az keyvault set-policy --name '{0}' --certificate-permissions '{1}' --key-permissions '{2}' --secret-permissions '{3}'".format(keyvault_name, "create delete get getissuers import list update", "create decrypt delete encrypt get import list sign unwrapKey update wrapKey", "delete get list set")
status, output, error = execute_command(set_keyvault_secret)
sourceurl_result = parse_result(output, error)
sourceurl_result.error_code = status
sourceurl = sourceurl_result.data["id"]
if not sourceurl_result.IsSuccess():
raise OperationFailed(set_keyvault_secret, sourceurl_result.error_msg)
#status2, output2, error2 = execute_command(enable_keyvault_for_deployment)
#parsed_result = parse_result(output2, error2)
#parsed_result.error_code = status2
#if 0 != status2:
#raise OperationFailed(enable_keyvault_for_deployment, error2)
return sourceurl
def get_certificate_content(certificate_path):
fh = open('sfrptestautomation.pfx', 'rb')
try:
ba = bytearray(fh.read())
cert_base64_str = base64.b64encode(ba)
password = '<PASSWORD>'
json_blob = {
'data': cert_base64_str,
'dataType': 'pfx',
'password': password
}
blob_data= json.dumps(json_blob)
content_bytes= bytearray(blob_data)
content = base64.b64encode(content_bytes)
return content
finally:
fh.close
fh.close()
def get_file_name(file_path):
base = os.path.basename(file_path)
return os.path.splitext(base)[0]
class Certificate(object):
pem_file_path=""
pfx_file_path=""
def __init__(self, subscription_id, rg_name, kv_name, location, certificate_name, password, pfx_file_path="", pem_file_path=""):
self.subscription_id = subscription_id
self.rg_name = rg_name
self.kv_name = kv_name
self.location = location
self.certificate_name = certificate_name
self.pfx_file_path = pfx_file_path
self.pem_file_path = pem_file_path
self.password = password
def getPfxFilePath(self):
return self.pfx_file_path
def getResourceGroupName(self):
return self.rg_name
def getKeyVaultName(self):
return self.kv_name
def getLocation(self):
return self.location
def getSubscription(self):
return self.subscription_id
def getCertificateName(self):
return self.certificate_name
def getPassword(self):
return self.password
def manupulate_cert(self):
raise NotImplementedError("Subclass must implement")
def cleanup(self):
raise NotImplementedError("Subclass must implement")
def getContent(self):
fh = open(self.getPfxFilePath(), 'rb')
try:
ba = bytearray(fh.read())
cert_base64_str = base64.b64encode(ba)
password = self.getPassword()
json_blob = {
'data': cert_base64_str,
'dataType': 'pfx',
'password': password
}
blob_data= json.dumps(json_blob)
content_bytes= bytearray(blob_data)
content = base64.b64encode(content_bytes)
return content
finally:
fh.close
fh.close()
def extract_thumbprint(self):
get_thumbprint = "openssl x509 -in {0} -noout -fingerprint".format(self.pem_file_path)
status, output, error = execute_command(get_thumbprint)
if (status != 0):
print "Error: {0}".format(error)
else:
pattern = re.compile('Fingerprint=([\w\W\d\D]+)')
thumbprint = re.search(pattern, output)
return (thumbprint.groups()[0]).encode('utf-8').replace(":", "")
return ""
def upload_cert(self):
self.manupulate_cert()
set_subscription(subscription_id=self.subscription_id)
rg_result = create_resource_group(self.rg_name, self.location)
kv_result = create_key_vault(self.kv_name, self.rg_name, self.location)
secret_result = upload_secret(self.rg_name, self.location, self.kv_name, self.getContent(), self.subscription_id, self.certificate_name)
thumbprint = self.extract_thumbprint()
self.cleanup()
return thumbprint, kv_result, secret_result
class pfx_certificate_format(Certificate):
def manupulate_cert(self):
file_name = get_file_name(self.pfx_file_path)
self.tmp_file_path = "/tmp/{0}.pem".format(file_name)
prepare_pem_file = "openssl pkcs12 -in {0} -out {1} -nodes -passin pass:'{2}'".format(self.pfx_file_path, self.tmp_file_path, self.password)
status, output, error = execute_command(prepare_pem_file)
if(status != 0):
print error
else:
self.pem_file_path = self.tmp_file_path
def cleanup(self):
rm_tmp_pem = "rm {0}".format(self.tmp_file_path)
status, output, error = execute_command(rm_tmp_pem)
if (status != 0):
print error
class pem_certificate_format(Certificate):
def manupulate_cert(self):
file_name = get_file_name(self.pem_file_path)
self.tmp_file_path = "/tmp/{0}.pfx".format(file_name)
prepare_pfx_file = "openssl pkcs12 -export -out {1} -inkey {0} -in {0} -passout pass:'{2}'".format(self.pem_file_path, self.tmp_file_path, self.password)
status, output, error = execute_command(prepare_pfx_file)
if (status != 0):
print error
raise OperationFailed("creation of temp pfx failed with error {0}".format(error))
else:
self.pfx_file_path = self.tmp_file_path
def cleanup(self):
rm_tmp_pem = "rm {0}".format(self.tmp_file_path)
status, output, error = execute_command(rm_tmp_pem)
if (status != 0):
print error
def cert_factory(input_params):
if ("pem" in input_params["cert_type"] and "pfx" in input_params["cert_type"]):
print 'invoke_add_cert.py --ifile <inputcertfile> --resourcegroup <rgname> --keyvault <keyvaultname> --subscriptionid <subscription> --certificatename <certificatename> --location <region> --certtype pem|pfx'
sys.exit()
elif "pem" in input_params["cert_type"]:
return pem_certificate_format( subscription_id= input_params["subscription_id"],
rg_name= input_params["resource_groupname"],
pem_file_path=input_params["cert_file"],
kv_name=input_params["keyvault_name"],
location=input_params["location"],
certificate_name=input_params["certificate_name"])
elif "pfx" in input_params["cert_type"]:
return pfx_certificate_format(subscription_id= input_params["subscription_id"],
rg_name= input_params["resource_groupname"],
pfx_file_path=input_params["cert_file"],
kv_name=input_params["keyvault_name"],
location=input_params["location"],
certificate_name=input_params["certificate_name"])
elif "self_sign" in input_params["cert_type"]:
return pem_certificate_format( subscription_id= input_params["subscription_id"],
rg_name= input_params["resource_groupname"],
pem_file_path=input_params["cert_file"],
kv_name=input_params["keyvault_name"],
location=input_params["location"],
certificate_name=input_params["certificate_name"])
def prepare_self_sign_certificate(subject, certificate_name):
certificate_file = '/tmp/{0}.pem'.format(certificate_name)
create_certificate= 'openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout {0} -out {0} -subj "/CN={1}"'.format(certificate_file, subject)
status, output, error = execute_command(create_certificate)
parsed_result = parse_result(output, error)
parsed_result.error_code = status
if not parsed_result.IsSuccess():
raise OperationFailed(create_resource_group, parsed_result.getErrorMessage())
return certificate_file
def verify_required_params(args):
if not args.subscription_id:
print ("subscription id is required")
sys.exit()
if not args.resource_group_name:
print ("resource group name is required")
sys.exit()
if not args.key_vault_name:
print ("key vault name is required")
sys.exit()
if not args.location:
print ("location is required")
sys.exit()
if not args.certificate_name:
print ("certificate name is required")
sys.exit()
if not args.password:
print ("password is required")
sys.exit()
def cert_factory(args):
verify_required_params(args)
if("ss" in args.which):
if (args.subjectname and args.certificate_name):
cert_file = prepare_self_sign_certificate(args.subjectname, args.certificate_name)
return pem_certificate_format(subscription_id=args.subscription_id,
rg_name=args.resource_group_name,
pem_file_path=cert_file,
kv_name=args.key_vault_name,
location=args.location,
certificate_name=args.certificate_name,
password=args.password)
else:
print ("selfsign flag cannot be passed witout subjectname and certificatename")
sys.exit()
if ("pem" in args.which and "pfx" in args.which):
if not (args.input_cert_file):
print "input certificate file path is required"
sys.exit()
if "pem" in args.which:
return pem_certificate_format( subscription_id= args.subscription_id,
rg_name= args.resource_group_name,
pem_file_path=args.input_cert_file,
kv_name=args.key_vault_name,
location=args.location,
certificate_name=args.certificate_name,
password=args.password)
elif "pfx" in args.which:
return pfx_certificate_format(subscription_id= args.subscription_id,
rg_name= args.resource_group_name,
pfx_file_path=args.input_cert_file,
kv_name=args.key_vault_name,
location=args.location,
certificate_name=args.certificate_name,
password=args.password)
def get_arg_parser():
'''Returns an argument parser suitable for interpreting the specified command line arguments'''
arg_parser = argparse.ArgumentParser()
subparsers = arg_parser.add_subparsers(help='commands')
pem_parser =subparsers.add_parser('pem', help='pfx input file commands')
pem_parser.set_defaults(which="pem")
pfx_parser = subparsers.add_parser('pfx', help='pem input file commands')
pfx_parser.set_defaults(which="pfx")
ss_parser =subparsers.add_parser('ss', help='self sign certificate commands')
ss_parser.set_defaults(which="ss")
# pem and pfx required file
pem_parser.add_argument('-ifile', '--input_cert_file', action = 'store', help = 'Input certificate file')
pem_parser.add_argument('-sub', '--subscription_id', action='store', help='Path to subscription')
pem_parser.add_argument('-rgname', '--resource_group_name', action='store', help='name of resource group')
pem_parser.add_argument('-kv', '--key_vault_name', action='store', help='Key vault name')
pem_parser.add_argument('-sname', '--certificate_name', action='store', help='Name for secret')
pem_parser.add_argument('-l', '--location', action='store', help='Location')
pem_parser.add_argument('-p', '--password', action='store', help='password for certificate')
pfx_parser.add_argument('-ifile', '--input_cert_file', action = 'store', help = 'Input certificate file')
pfx_parser.add_argument('-sub', '--subscription_id', action='store', help='Path to subscription')
pfx_parser.add_argument('-rgname', '--resource_group_name', action='store', help='name of resource group')
pfx_parser.add_argument('-kv', '--key_vault_name', action='store', help='Key vault name')
pfx_parser.add_argument('-sname', '--certificate_name', action='store', help='Name for secret')
pfx_parser.add_argument('-l', '--location', action='store', help='Location')
pfx_parser.add_argument('-p', '--password', action='store', help='password for certificate')
# subj name only required for self sign certificate
ss_parser.add_argument('-subj', '--subjectname', action='store', help='subject name for self sign certificate')
ss_parser.add_argument('-sub', '--subscription_id', action='store', help='Path to subscription')
ss_parser.add_argument('-rgname', '--resource_group_name', action='store', help='name of resource group')
ss_parser.add_argument('-kv', '--key_vault_name', action='store', help='Key vault name')
ss_parser.add_argument('-sname', '--certificate_name', action='store', help='Name for secret')
ss_parser.add_argument('-l', '--location', action='store', help='Location')
ss_parser.add_argument('-p', '--password', action='store', help='password for certificate')
return arg_parser
def main():
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
cert = cert_factory(args)
thumbprint, resourceid, secreturl = cert.upload_cert()
if("ss" in args.which):
print "Please copy self signed cert from : {0}".format(cert.pem_file_path)
print("SourceVault: {1}\nCertificateUrl: {2}\nCertificateThumbprint: {0}\n".format(thumbprint, resourceid, secreturl))
if __name__ == "__main__":
main()
| StarcoderdataPython |
3357242 | <filename>bin/intensity_normalization/normalize/__init__.py
from . import fcm, gmm, kde, lsq, nyul, whitestripe, zscore | StarcoderdataPython |
3376553 | <reponame>YuanMaSa/aws-well-architected-framework<gh_stars>10-100
"""
This module is the main script to add EMR step
"""
import os
import json
import logging
import time
import boto3
from botocore.exceptions import ClientError
msg_format = '%(asctime)s %(levelname)s %(name)s: %(message)s'
logging.basicConfig(format=msg_format, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger("handler_logger")
logger.setLevel(logging.INFO)
# aws api client
s3 = boto3.client('s3')
emr = boto3.client('emr')
sns = boto3.client('sns')
# load Environment Variables
prefix_name = os.environ["Prefix"]
s3_data_repo = os.environ["S3DataRepoName"]
sns_topic = os.environ["SnsTopic"]
subnet_id = os.environ["SubnetId"]
master_sg = os.environ['MasterSg']
slave_sg = os.environ['SlaveSg']
service_access_sg = os.environ['ServiceAccessSg']
log_uri = os.environ["LogUri"]
lab_bucket = os.environ["LabBucket"]
def lambda_handler(event, context):
"""
Main function
"""
total_start_time = time.time()
logger.info(f"event:\n{event}")
res = emr_run_spark()
message = "Configure EMR cluster to run Spark job flow\n" + \
"EMR cluster is starting now"
sns.publish(Message=message, TopicArn=sns_topic)
outer_info = fetch_emr_info(res["ClusterArn"])
logger.info("fetch all the required values")
logger.info(
"total running time of function : --- %s seconds ---"
%(time.time() - total_start_time)
)
event["job_status"] = "start to run EMR step"
return {
'statusCode': 200,
'jobFlowId': res["JobFlowId"],
'clusterArn': res["ClusterArn"],
'cluster_id': outer_info["cluster_id"],
"step_list": outer_info["step_list"]
}
def emr_run_spark():
"""
EMR run pyspark script
"""
try:
response = emr.run_job_flow(
Name="Lab Spark Cluster",
LogUri=log_uri,
ReleaseLabel='emr-5.28.0',
Instances={
'MasterInstanceType': 'm5.xlarge',
'SlaveInstanceType': 'r5.2xlarge',
'InstanceCount': 4,
'KeepJobFlowAliveWhenNoSteps': True,
'TerminationProtected': False,
'Ec2SubnetId': subnet_id,
'EmrManagedMasterSecurityGroup': master_sg,
'EmrManagedSlaveSecurityGroup': slave_sg,
'ServiceAccessSecurityGroup': service_access_sg
},
Applications=[
{
'Name': 'Spark'
}
],
BootstrapActions=[
{
'Name': 'Maximize Spark Default Config',
'ScriptBootstrapAction': {
'Path': 's3://support.elasticmapreduce/spark/maximize-spark-default-config',
}
},
{
'Name': 'Install boto3',
'ScriptBootstrapAction': {
'Path': f's3://{lab_bucket}/spark/conf/install_python_modules.sh',
}
}
],
Steps=[
{
'Name': 'Setup Debugging',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['state-pusher-script']
}
},
{
'Name': 'setup - copy files',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['aws', 's3', 'cp', f's3://{lab_bucket}/spark/main.py', '/home/hadoop/']
}
},
{
'Name': 'Run Spark',
'ActionOnFailure': 'CANCEL_AND_WAIT',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': ['spark-submit', '/home/hadoop/main.py', lab_bucket, s3_data_repo]
}
}
],
Configurations=[
{
'Classification': 'spark-env',
"Configurations": [
{
"Classification": "export",
"Properties": {
"PYSPARK_PYTHON": "/usr/bin/python3"
}
}
]
}
],
VisibleToAllUsers=True,
JobFlowRole='EMR_EC2_DefaultRole',
ServiceRole='EMR_DefaultRole',
Tags=[
{
'Key': 'Project',
'Value': 'Data Lake Quickstart'
},
{
'Key': 'Prefix',
'Value': prefix_name
}
]
)
return response
except ClientError as error:
logger.error("The error occurred when configure emr to run spark")
logger.exception(error)
def fetch_emr_info(cluster_arn):
"""
fetch all required info from EMR
"""
for cluster in emr.list_clusters()["Clusters"]:
if cluster["ClusterArn"] == cluster_arn:
cluster_id = cluster["Id"]
res = emr.list_steps(ClusterId=cluster_id)
step_list = list(map(lambda x: x["Id"], res["Steps"]))
return {
"cluster_id": cluster_id,
"step_list": step_list
}
| StarcoderdataPython |
1787976 | <reponame>gpauloski/ProxyStore<gh_stars>1-10
"""MapReduce with FuncX and ProxyStore example"""
import argparse
import numpy as np
import proxystore as ps
import time
from funcx.sdk.client import FuncXClient
from typing import List
def app_double(x: np.ndarray) -> np.ndarray:
"""Doubles input array"""
return 2 * x
def app_sum(inputs: List[np.ndarray]) -> float:
"""Sums all elements in list of arrays"""
import numpy as np
if len(inputs) == 0:
return
out = inputs[0]
for x in inputs[1:]:
out += x
return np.sum(out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MapReduce with FuncX')
parser.add_argument(
'-n',
'--num-arrays',
type=int,
required=True,
help='Number of arrays to be mapreduced',
)
parser.add_argument(
'-s',
'--size',
type=int,
required=True,
help='Length of array where each array is s x s',
)
parser.add_argument(
'-f',
'--funcx-endpoint',
type=str,
required=True,
help='FuncX endpoint ID',
)
parser.add_argument(
'--proxy', action='store_true', help='Use proxy store to pass inputs'
)
parser.add_argument(
'--redis-port',
type=int,
default=59465,
help='If not None, use Redis backend',
)
args = parser.parse_args()
fxc = FuncXClient()
double_uuid = fxc.register_function(app_double)
sum_uuid = fxc.register_function(app_sum)
if args.proxy:
store = ps.store.init_store(
'redis', hostname='127.0.0.1', port=args.redis_port
)
batch = fxc.create_batch()
for i in range(args.num_arrays):
x = np.random.rand(args.size, args.size)
if args.proxy:
x = store.proxy(x)
batch.add(x, endpoint_id=args.funcx_endpoint, function_id=double_uuid)
batch_res = fxc.batch_run(batch)
mapped_results = fxc.get_batch_result(batch_res)
for i, res in mapped_results.items():
while res['pending']:
time.sleep(0.1)
mapped_results = [
fxc.get_result(i) for i, status in mapped_results.items()
]
if args.proxy:
mapped_results = store.proxy(mapped_results)
total = fxc.run(
mapped_results, endpoint_id=args.funcx_endpoint, function_id=sum_uuid
)
while fxc.get_task(total)['pending']:
time.sleep(0.1)
print('Sum:', fxc.get_result(total))
| StarcoderdataPython |
1788913 | from .task import *
from .pymp_mapper import *
from .pymp_iterator import *
from .ray_backend import *
from .ray_mapper import *
from .ray_iterator import *
# from .ray_dataloader import *
from ._version import get_versions
__version__ = get_versions()['version']
version = __version__
del get_versions
def init_backend(backend, *args, **kwargs):
if backend == 'pymp':
pass
elif backend == 'ray':
init_ray_backend(*args, **kwargs)
else:
raise ValueError('[backend] should be pymp or ray')
def shutdown_backend(backend, *args, **kwargs):
if backend == 'pymp':
pass
elif backend == 'ray':
shutdown_ray_backend(*args, **kwargs)
else:
raise ValueError('[backend] should be pymp or ray')
def mapper(backend, num_proc=None, ordered=True, chunk_size=1, report_interval=0, report_newline=False, report_name=None):
if backend == 'pymp':
return pymp_mapper(num_proc, ordered, chunk_size, report_interval, report_newline, report_name)
if backend == 'ray':
return ray_mapper(num_proc, ordered, chunk_size, report_interval, report_newline, report_name)
raise ValueError('unknown backend [{backend}], only support [pymp] or [ray]')
def iterator(backend, prefetch_size=1, chunk_size=1):
if prefetch_size < 1:
raise ValueError(f'requires prefetch_size >= 1, but got {prefetch_size}')
if chunk_size < 1:
raise ValueError(f'requires chunk_size >= 1, but got {chunk_size}')
if backend == 'pymp':
return pymp_iterator(prefetch_size, chunk_size)
if backend == 'ray':
return ray_iterator(prefetch_size, chunk_size)
raise ValueError('unknown backend [{backend}], only support [pymp] or [ray]')
| StarcoderdataPython |
29899 | <reponame>openlobby/openlobby-server
from datetime import datetime
def strip_value(data, *path):
element = path[0]
value = data.get(element)
if len(path) == 1:
data[element] = "__STRIPPED__"
return value
else:
if isinstance(value, dict):
return strip_value(value, *path[1:])
elif isinstance(value, list):
return [strip_value(item, *path[1:]) for item in value]
else:
raise NotImplementedError()
def dates_to_iso(data):
for key, val in data.items():
if isinstance(val, datetime):
data[key] = val.isoformat()
return data
| StarcoderdataPython |
3374458 | """
This function will submit a specified number of sbatch jobs. Each sbatch job (I call them applyFunctionDaemons) will traverse the directory tree which starts from root. While traversing the directory tree, a daemon will look for a certain condition that indicates it is in the proper place to apply a function. Once it finds the right place, it will check that another daemon is not already working or has finished working in that place. If no daemon has, it will apply the function. Once it's done, it will move onto another directory until there are none left or it runs out of time.
For simplicity, the function will not require any arguments.
"""
import os
import argparse
DaemonPath = "applyFunctionDaemon.py"
cwd = os.getcwd()
def getPaths(root,file):
paths=[]
for path,dirs,files in os.walk(root):
if file not in files and len(dirs)==0: #This is not wild card-able need to give exact filename
paths.append(path)
return paths
def partitionList(array, partitions):
if partitions > len(array):
length=1
partitions=len(array)
else:
length = len(array)//partitions #Get length of each partition
partitionedList = [array[i-length:i] for i in range(length,length*partitions+1,length)] #Partition the array.
partitionedList[-1].extend(array[length*partitions:]) #Add leftover elements of array to the last partition.
return partitionedList
def writePathsFile(filename,paths):
with open(filename,"w") as file:
for path in paths:
file.write(path+"\n")
parse=argparse.ArgumentParser()
parse.add_argument("-r", "--root", dest="root", help="Root directory to start searching from.")
parse.add_argument("-f", "--function", dest="function", help="The path to the function to be run. e.g. $PWD/functions/HelloWorld.py") #This will be what goes in wrap? Whole thing will need to be given in quotes.
parse.add_argument("-j", "--jobs", dest="jobs")
parse.add_argument("-t", "--time", dest="time")
parse.add_argument("-o", "--outputFile", dest="outputFile")
args = parse.parse_args()
if not os.path.exists(args.root):
print("Invalid path.")
else:
paths = getPaths(args.root,args.outputFile)
partitionedPaths = partitionList(paths, int(args.jobs))
for job in range(int(args.jobs)):
filename = f"applyFunctionDaemonPaths{job}.txt"
writePathsFile(filename,partitionedPaths[job])
os.system(f"sbatch -t {args.time} --wrap=\"python3 -u {os.path.join(cwd,DaemonPath)} -f \'{args.function}\' -p {filename}\"")
#os.system(f"python {os.path.join(cwd,DaemonPath)} --function \"{args.function}\" --paths {filename}")
| StarcoderdataPython |
3396825 | <reponame>wcooley/python-gryaml<filename>tests/test__py2neo.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for :mod:`gryaml._py2neo`."""
from __future__ import print_function, absolute_import
import pytest # noqa
@pytest.mark.integration
def test_connect(graphdb):
"""Test :func:`~py2neo_compat.connect`."""
assert graphdb.neo4j_version
| StarcoderdataPython |
1609136 | from setuptools import setup
with open("README.md", "r", encoding="utf8") as rm:
readme = rm.read()
with open("requirements.txt") as rq:
requirements = rq.read().split('\n')
setup(
name="lshashing",
version="1.0.0",
description="Nearest neighbors search using locality-sensitive hashing",
packages=["lshashing"],
install_requires=requirements,
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/MNoorFawi/lshashing",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
]
)
| StarcoderdataPython |
1611155 | import json
from django.conf.urls import url
from django.http import HttpResponse
from ecs.docstash.decorators import with_docstash
@with_docstash()
def simple_post_view(request):
if request.method == 'POST':
request.docstash.value = request.POST.dict()
request.docstash.save()
return HttpResponse(json.dumps(request.docstash.value), content_type='text/json')
urlpatterns = (
url(r'^simple_post/(?:(?P<docstash_key>.*)/)?$', simple_post_view),
)
| StarcoderdataPython |
172156 | <reponame>joshuagryphon/plastid
#!/usr/bin/env python
"""Estimate :term:`sub-codon phasing` in a :term:`ribosome profiling` dataset,
stratified by read length.
Because ribosomes step three nucleotides in each cycle of translation elongation,
in many :term:`ribosome profiling` datasets a triplet periodicity is observable
in the distribution of :term:`ribosome-protected footprints <footprint>`.
In a good dataset, 70-90% of the reads on a codon fall within the first of the
three codon positions. This allows deduction of translation reading frames, if
the reading frame is not known *a priori.* See :cite:`Ingolia2009` for more
details.
Output files
------------
OUTBASE_phasing.txt
Read phasing for each read length
OUTBASE_phasing.svg
Plot of phasing by read length
where `OUTBASE` is supplied by the user.
.. note::
To avoid double-counting of codons, users should either use an *ROI file*
made by the ``generate`` subprogram of the |metagene| script, or supply
an :term:`annotation` file that includes only one transcript isoform per
gene.
"""
import sys
import argparse
import inspect
import warnings
import pandas as pd
import numpy
import matplotlib
matplotlib.use("Agg")
from plastid.util.scriptlib.argparsers import (
AlignmentParser,
AnnotationParser,
PlottingParser,
BaseParser,
)
from plastid.util.io.openers import get_short_name, argsopener, read_pl_table
from plastid.util.io.filters import NameDateWriter
from plastid.util.scriptlib.help_formatters import format_module_docstring
from plastid.util.services.exceptions import DataWarning, ArgumentWarning
from plastid.plotting.plots import phase_plot
from plastid.genomics.roitools import SegmentChain
warnings.simplefilter("once")
printer = NameDateWriter(get_short_name(inspect.stack()[-1][1]))
def roi_row_to_cds(row):
"""Helper function to extract coding portions from maximal spanning windows
flanking CDS starts that are created by |metagene| ``generate`` subprogram.
Parameters
----------
row : (int, Series)
Row from a :class:`pandas.DataFrame` of an ROI file made by the |metagene|
``generate`` subprogram
Returns
-------
|SegmentChain|
Coding portion of maximal spanning window
"""
chainstr, alignment_offset, zero_point = row[1][["region", "alignment_offset", "zero_point"]]
chain = SegmentChain.from_str(chainstr)
cds_start = zero_point - alignment_offset
subchain = chain.get_subchain(cds_start, chain.length)
return subchain
def main(argv=sys.argv[1:]):
"""Command-line program
Parameters
----------
argv : list, optional
A list of command-line arguments, which will be processed
as if the script were called from the command line if
:py:func:`main` is called directly.
Default: `sys.argv[1:]`. The command-line arguments, if the script is
invoked from the command line
"""
al = AlignmentParser(
disabled=["normalize", "big_genome", "spliced_bowtie_files"],
input_choices=["BAM"],
)
an = AnnotationParser()
pp = PlottingParser()
bp = BaseParser()
plotting_parser = pp.get_parser()
alignment_file_parser = al.get_parser(conflict_handler="resolve")
annotation_file_parser = an.get_parser(conflict_handler="resolve")
base_parser = bp.get_parser()
parser = argparse.ArgumentParser(
description=format_module_docstring(__doc__),
formatter_class=argparse.RawDescriptionHelpFormatter,
conflict_handler="resolve",
parents=[base_parser, annotation_file_parser, alignment_file_parser, plotting_parser]
)
parser.add_argument("roi_file",type=str,nargs="?",default=None,
help="Optional. ROI file of maximal spanning windows surrounding start codons, "+\
"from ``metagene generate`` subprogram. Using this instead of `--annotation_files` "+\
"prevents double-counting of codons when multiple transcript isoforms exist "+\
"for a gene. See the documentation for `metagene` for more info about ROI files."+\
"If an ROI file is not given, supply an annotation with ``--annotation_files``")
parser.add_argument("outbase", type=str, help="Required. Basename for output files")
parser.add_argument(
"--codon_buffer",
type=int,
default=5,
help="Codons before and after start codon to ignore (Default: 5)"
)
args = parser.parse_args(argv)
bp.get_base_ops_from_args(args)
pp.set_style_from_args(args)
gnd = al.get_genome_array_from_args(args, printer=printer)
read_lengths = list(range(args.min_length, args.max_length + 1))
codon_buffer = args.codon_buffer
dtmp = {
"read_length": numpy.array(read_lengths),
"reads_counted": numpy.zeros_like(read_lengths, dtype=int),
}
if args.roi_file is not None:
using_roi = True
roi_table = read_pl_table(args.roi_file)
regions = roi_table.iterrows()
transform_fn = roi_row_to_cds
back_buffer = -1
if len(args.annotation_files) > 0:
warnings.warn(
"If an ROI file is given, annotation files are ignored. Pulling regions from '%s'. Ignoring '%s'"
% (args.roi_file, ", ".join(args.annotation_files)), ArgumentWarning
)
else:
using_roi = False
if len(args.annotation_files) == 0:
printer.write("Either an ROI file or at least annotation file must be given.")
sys.exit(1)
else:
warnings.warn(
"Using a transcript annotation file instead of an ROI file can lead to double-counting of codons if the annotation contains multiple transcripts per gene.",
ArgumentWarning
)
regions = an.get_transcripts_from_args(args, printer=printer)
back_buffer = -codon_buffer
transform_fn = lambda x: x.get_cds()
phase_sums = {}
for k in read_lengths:
phase_sums[k] = numpy.zeros(3)
for n, roi in enumerate(regions):
if n % 1000 == 1:
printer.write("Counted %s ROIs ..." % n)
# transformation needed to extract CDS from transcript or from ROI file window
cds_part = transform_fn(roi)
# only calculate for coding genes
if len(cds_part) > 0:
read_dict = {}
count_vectors = {}
for k in read_lengths:
read_dict[k] = []
count_vectors[k] = []
# for each seg, fetch reads, sort them, and create individual count vectors
for seg in cds_part:
reads = gnd.get_reads(seg)
for read in filter(lambda x: len(x.positions) in read_dict, reads):
read_dict[len(read.positions)].append(read)
# map and sort by length
for read_length in read_dict:
count_vector = list(gnd.map_fn(read_dict[read_length], seg)[1])
count_vectors[read_length].extend(count_vector)
# add each count vector for each length to total
for k, vec in count_vectors.items():
counts = numpy.array(vec)
if cds_part.strand == "-":
counts = counts[::-1]
if len(counts) % 3 == 0:
counts = counts.reshape((int(len(counts) / 3), 3))
else:
if using_roi == False:
message = "Length of '%s' coding region (%s nt) is not divisible by 3. Ignoring last partial codon." % (
roi.get_name(), len(counts)
)
warnings.warn(message, DataWarning)
newlen = int(len(counts) // 3)
counts = counts[:3 * newlen]
counts = counts.reshape(newlen, 3)
phase_sums[k] += counts[codon_buffer:back_buffer, :].sum(0)
printer.write("Counted %s ROIs total." % (n + 1))
for k in dtmp:
dtmp[k] = numpy.array(dtmp[k])
# total reads counted for each size
for k in read_lengths:
dtmp["reads_counted"][dtmp["read_length"] == k] = phase_sums[k].sum()
# read length distribution
dtmp["fraction_reads_counted"
] = dtmp["reads_counted"].astype(float) / dtmp["reads_counted"].sum()
# phase vectors
phase_vectors = {K: V.astype(float) / V.astype(float).sum() for K, V in phase_sums.items()}
for i in range(3):
dtmp["phase%s" % i] = numpy.zeros(len(dtmp["read_length"]))
for k, vec in phase_vectors.items():
for i in range(3):
dtmp["phase%s" % i][dtmp["read_length"] == k] = vec[i]
# phase table
fn = "%s_phasing.txt" % args.outbase
printer.write("Saving phasing table to %s ..." % fn)
dtmp = pd.DataFrame(dtmp)
with argsopener(fn, args) as fh:
dtmp.to_csv(
fh,
columns=[
"read_length",
"reads_counted",
"fraction_reads_counted",
"phase0",
"phase1",
"phase2",
],
float_format="%.6f",
na_rep="nan",
sep="\t",
index=False,
header=True
)
fh.close()
fig = {}
if args.figsize is not None:
fig["figsize"] = tuple(args.figsize)
colors = pp.get_colors_from_args(args, len(read_lengths))
fn = "%s_phasing.%s" % (args.outbase, args.figformat)
printer.write("Plotting to %s ..." % fn)
plot_counts = numpy.vstack([V for (_, V) in sorted(phase_sums.items())])
fig, (ax1,_) = phase_plot(plot_counts,labels=read_lengths,lighten_by=0.3,
cmap=None,color=colors,fig=fig)
if args.title is not None:
ax1.set_title(args.title)
else:
ax1.set_title("Phasing stats for %s" % args.outbase)
fig.savefig(fn, dpi=args.dpi, bbox_inches="tight")
if __name__ == "__main__":
main()
| StarcoderdataPython |
58019 | <gh_stars>100-1000
import click
import json, cPickle
import requests, zipfile
import os, glob
from music21 import analysis, converter, corpus, meter
from music21.note import Note
from constants import *
@click.group()
def datasets():
"""Constructs various datasets."""
pass
@click.command()
@click.option('--keep-fermatas', type=bool, default=True)
@click.option('--subset', type=bool, default=False)
@click.option('--mono', type=bool, default=False, help='Extract only monophonic Soprano part')
@click.option('--parts_to_mask', '-m', multiple=True, type=str)
def prepare(keep_fermatas, subset, mono, parts_to_mask=[]):
"""
Prepares polyphonic scores using a chord tuple representation.
Each score is transformed into a sequence of tuples with a constant
timestep of (1/`FRAMES_PER_CROTCHET`) crotchets between consecutive chords.
Each encoded chord has the following format:
Notes : List[(
Midi: Int,
Tied : Bool (true if note is continuation of previous note)
)]
"""
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
it = iter_standardized_chorales()
if subset:
it = [next(it) for _ in range(5)]
for score in it:
bwv_id = score.metadata.title
print('Processing BWV {0}'.format(bwv_id))
# remove all except 'Soprano' part if --mono
if mono:
for part in score.parts:
if part.id != 'Soprano':
score.remove(part)
#key = score.analyze('key') # TODO: filter to only majors for task?
encoded_score = encode_score(score, keep_fermatas=keep_fermatas, parts_to_mask=parts_to_mask)
encoded_score_txt = to_text(encoded_score)
fname = 'BWV-{0}'.format(bwv_id)
if mono:
fname += '-mono'
if parts_to_mask:
fname += '-mask-{0}'.format('-'.join(parts_to_mask))
else:
fname += '-nomask'
if keep_fermatas:
fname += '-fermatas'
else:
fname += '-nofermatas'
out_path = SCRATCH_DIR + '/{0}'.format(fname)
print 'Writing {0}'.format(out_path)
with open(out_path + '.txt', 'w') as fd:
fd.write('\n'.join(encoded_score_txt))
with open(out_path + '.utf', 'w') as fd:
fd.write(to_utf(txt_to_utf, encoded_score_txt))
@click.command()
@click.argument('files', nargs=-1, required=True)
@click.option('-o', '--output', type=click.File('wb'), default=SCRATCH_DIR + '/concat_corpus.txt')
def concatenate_corpus(files, output):
"""Concatenates individual files together into single corpus.
Try `bachbot concatenate_corpus scratch/*.utf`.
"""
print 'Writing concatenated corpus to {0}'.format(output.name)
for fp in files:
with open(fp, 'rb') as fd:
output.write(''.join(filter(lambda x: x != '\n', fd.read())))
@click.command()
@click.option('--utf-to-txt-json', type=click.File('rb'), default=SCRATCH_DIR + '/utf_to_txt.json')
@click.argument('in-file', type=click.File('rb'))
@click.argument('out-file', type=click.File('wb'))
def encode_text(utf_to_txt_json, in_file, out_file):
utf_to_txt = json.load(utf_to_txt_json)
txt_to_utf = { v:k for k,v in utf_to_txt.items() }
out_file.write(to_utf(txt_to_utf, in_file))
def standardize_key(score):
"""Converts into the key of C major or A minor.
Adapted from https://gist.github.com/aldous-rey/68c6c43450517aa47474
"""
# major conversions
majors = dict([("A-", 4),("A", 3),("B-", 2),("B", 1),("C", 0),("C#",-1),("D-", -1),("D", -2),("E-", -3),("E", -4),("F", -5),("F#",6),("G-", 6),("G", 5)])
minors = dict([("A-", 1),("A", 0),("B-", -1),("B", -2),("C", -3),("C#",-4),("D-", -4),("D", -5),("E-", 6),("E", 5),("F", 4),("F#",3),("G-", 3),("G", 2)])
# transpose score
key = score.analyze('key')
if key.mode == "major":
halfSteps = majors[key.tonic.name]
elif key.mode == "minor":
halfSteps = minors[key.tonic.name]
tScore = score.transpose(halfSteps)
# transpose key signature
for ks in tScore.flat.getKeySignatures():
ks.transpose(halfSteps, inPlace=True)
return tScore
def extract_SATB(score):
"""
Extracts the Soprano, Alto, Tenor, and Bass parts from a piece. The returned score is guaranteed
to have parts with names 'Soprano', 'Alto', 'Tenor', and 'Bass'.
This method mutates its arguments.
"""
ids = dict()
ids['Soprano'] = {
'Soprano',
'S.',
'Soprano 1', # NOTE: soprano1 or soprano2?
'Soprano\rOboe 1\rViolin1'}
ids['Alto'] = { 'Alto', 'A.'}
ids['Tenor'] = { 'Tenor', 'T.'}
ids['Bass'] = { 'Bass', 'B.'}
id_to_name = {id:name for name in ids for id in ids[name] }
for part in score.parts:
if part.id in id_to_name:
part.id = id_to_name[part.id]
else:
score.remove(part)
return score
def build_vocabulary():
if os.path.exists(SCRATCH_DIR + '/utf_to_txt.json'):
with open(SCRATCH_DIR + '/utf_to_txt.json', 'r') as f:
utf_to_txt = json.load(f)
txt_to_utf = {v:k for k,v in utf_to_txt.items()}
else:
vocabulary = set([str((midi, tie)) for tie in [True, False] for midi in range(128)]) # all MIDI notes and tie/notie
vocabulary.update(set([CHORD_BOUNDARY_DELIM, FERMATA_SYM]))
txt_to_utf = dict(map(lambda x: (x[1], unichr(x[0])), enumerate(vocabulary)))
txt_to_utf['START'] = START_DELIM
txt_to_utf['END'] = END_DELIM
utf_to_txt = {utf:txt for txt,utf in txt_to_utf.items()}
# save vocabulary
with open(SCRATCH_DIR + '/utf_to_txt.json', 'w') as fd:
print 'Writing vocabulary to ' + SCRATCH_DIR + '/utf_to_txt.json'
json.dump(utf_to_txt, fd)
return txt_to_utf, utf_to_txt
def iter_standardized_chorales():
"Iterator over 4/4 Bach chorales standardized to Cmaj/Amin with SATB parts extracted."
for score in corpus.chorales.Iterator(
numberingSystem='bwv',
returnType='stream'):
if score.getTimeSignatures()[0].ratioString == '4/4': # only consider 4/4
yield extract_SATB(standardize_key(score))
@click.command()
@click.argument('in_path', type=click.Path(exists=True))
@click.argument('outfile', type=click.File('w'))
def prepare_harm_input(in_path, outfile):
"Prepares and encodes a musicXML file containing a monophonic melody line as a Soprano voice to harmonize."
txt_to_utf, utf_to_txt = build_vocabulary()
txt_to_utf[BLANK_MASK_TXT] = BLANK_MASK_UTF # don't add to `utf_to_txt` because samples should never contain BLANK_MASK
sc = converter.parseFile(in_path)
encoded_score = []
for note in sc.flat.notesAndRests:
if note.isRest:
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = any(map(lambda e: e.isClassOrSubclass(('Fermata',)), note.expressions))
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord = [(note.pitch.midi, has_tie)] + ([BLANK_MASK_TXT for _ in range(3)])
encoded_score.append((has_fermata, encoded_chord))
encoded_score.extend((int(note.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
outfile.write(to_utf(txt_to_utf, to_text(encoded_score)))
def encode_score(score, keep_fermatas=True, parts_to_mask=[]):
"""
Encodes a music21 score into a List of chords, where each chord is represented with
a (Fermata :: Bool, List[(Note :: Integer, Tie :: Bool)]).
If `keep_fermatas` is True, all `has_fermata`s will be False.
All tokens from parts in `parts_to_mask` will have output tokens `BLANK_MASK_TXT`.
Time is discretized such that each crotchet occupies `FRAMES_PER_CROTCHET` frames.
"""
encoded_score = []
for chord in (score
.quantize((FRAMES_PER_CROTCHET,))
.chordify(addPartIdAsGroup=bool(parts_to_mask))
.flat
.notesAndRests): # aggregate parts, remove markup
# expand chord/rest s.t. constant timestep between frames
if chord.isRest:
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET)) * [[]])
else:
has_fermata = (keep_fermatas) and any(map(lambda e: e.isClassOrSubclass(('Fermata',)), chord.expressions))
encoded_chord = []
# TODO: sorts Soprano, Bass, Alto, Tenor without breaking ties
# c = chord.sortAscending()
# sorted_notes = [c[-1], c[0]] + c[1:-1]
# for note in sorted_notes:
for note in chord:
if parts_to_mask and note.pitch.groups[0] in parts_to_mask:
encoded_chord.append(BLANK_MASK_TXT)
else:
has_tie = note.tie is not None and note.tie.type != 'start'
encoded_chord.append((note.pitch.midi, has_tie))
encoded_score.append((has_fermata, encoded_chord))
# repeat pitches to expand chord into multiple frames
# all repeated frames when expanding a chord should be tied
encoded_score.extend((int(chord.quarterLength * FRAMES_PER_CROTCHET) - 1) * [
(has_fermata,
map(lambda note: BLANK_MASK_TXT if note == BLANK_MASK_TXT else (note[0], True), encoded_chord))
])
return encoded_score
def to_utf(txt_to_utf, score_txt):
"""
Converts a text-encoded score into UTF encoding (appending start/end delimiters).
Throws `KeyError` when out-of-vocabulary token is encountered
"""
return START_DELIM +\
''.join(map(lambda txt: txt_to_utf[txt.strip()], score_txt)) +\
END_DELIM
def to_text(encoded_score):
"Converts a Python encoded score into plain-text."
encoded_score_plaintext = []
for i,chord_pair in enumerate(encoded_score):
if i > 0:
encoded_score_plaintext.append(CHORD_BOUNDARY_DELIM) # chord boundary delimiter
if len(chord_pair) > 0:
is_fermata, chord = chord_pair
if is_fermata:
encoded_score_plaintext.append(FERMATA_SYM)
for note in chord:
encoded_score_plaintext.append(str(note))
return encoded_score_plaintext
map(datasets.add_command, [
prepare,
prepare_harm_input,
encode_text,
concatenate_corpus,
])
| StarcoderdataPython |
189655 | from chef.base import ChefObject
class Environment(ChefObject):
"""A Chef environment object.
.. versionadded:: 0.2
"""
url = '/environments'
api_version = '0.10'
attributes = {
'description': str,
'cookbook_versions': dict,
'default_attributes': dict,
'override_attributes': dict,
}
| StarcoderdataPython |
1763388 | <reponame>rikiesxiao/android_testing_with_python
#coding=utf-8
import re
import time
import subprocess
import log
class AdbException(Exception):
def __init__(self, message):
Exception.__init__(self, message+'\n')
RETRY_CONNECTION_TIMES = 5
RETRY_CONNECTION_BETWEEN = 10
class ADB(object):
def __init__(self, id):
self.id = id.rstrip()
log.debug('target devices: %s' %self.id)
self.adbd = 'adb'
#check adb is installed
try:
subprocess.Popen( [ self.adbd, 'devices' ], stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read(1)
self.adb_command = [self.adbd]
except:
raise AdbException("Error: is `adb` installed ??")
self.__isNetworkConnection()
self.adb_command = [ self.adbd, '-s', self.id ]
self.adbsh_command = [ self.adbd, '-s', self.id, 'shell' ]
self.retry_connection()
def retry_connection(self):
while not self._connection():
print 'Retry Connection'
time.sleep(RETRY_CONNECTION_BETWEEN)
def _connection(self):
devices = self.__adb('devices')
if not str(devices).__contains__(self.id):
# self.adb('disconnect')
r = self.__adb('connect %s' %self.id.split(":")[0] if ":" in self.id else self.id)
if str(r).__contains__('unable to connect to'):
print 'unable to connect to %s' %self.id
return False
time.sleep(1)
r = self.adb('root')
log.debug('root devices:\n %s' %r)
if 'adbd is already running as root' not in r:
time.sleep(2)
self.__adb('connect %s' %self.id.split(":")[0] if ":" in self.id else self.id)
time.sleep(1)
self.adb('remount')
log.debug('remount devices:\n %s' %r)
return True
def __adb(self, command):
if not isinstance(command, list): command = command.split()
log.debug(str([self.adbd] + command))
return subprocess.Popen([self.adbd]+command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
def adb(self, command):
""" run `adb -s serial` command """
if not isinstance(command, list): command = command.split()
log.debug(str(self.adb_command + command))
return subprocess.Popen(self.adb_command + command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
def adbshell(self, command):
""" run `adb -s serial shell` command """
if not isinstance(command, list): command = command.split()
log.debug(str(self.adbsh_command + command))
return subprocess.Popen(self.adbsh_command + command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()[0]
def popen(self, cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT):
"""run adb shell command as nohup, return subprocess.Popen instance"""
if not isinstance(cmd, list): cmd = [cmd]
log.debug("popen command: "+str(self.adbsh_command+cmd))
return subprocess.Popen(self.adbsh_command+cmd, stdout=stdout,stderr=stderr)
def screenshot(self, filename, path="/data/local/tmp"):
self.adbshell("screencap -p %s" %os.path.join(path,filename, ".png"))
def push(self, jar, path="/data/local/tmp"):
#95 KB/s (6332 bytes in 0.064s)
r = self.adb("push %s %s/" %(jar, path))
pflag = False
for _retry_times in range(1,6):
if re.compile(r'\d+ KB/s \(\d+ bytes in .*s\)').match(r):
pflag = True
break;
print r
self.retry_connection()
r = self.adb("push %s %s/" %(jar, path))
if not pflag: raise AdbException(r)
def install(self, apkname):
#2685 KB/s (2433480 bytes in 0.884s)
# pkg: /data/local/tmp/Settings.apk
#Success
r = self.adb("install -r %s" %apkname)
pflag = False
for _retry_times in range(1,6):
if str(r).__contains__('Success'):
pflag = True
break;
print r
self.retry_connection()
r = self.adb("push %s %s/" %(jar, path))
if not pflag: raise AdbException(r) | StarcoderdataPython |
65301 | import sys
from app import app
from app import get_attempts_data as presenter
from app import topic_hlr_train as model_functions
from app import kafka_config
import json
from kafka import KafkaConsumer
from kafka.structs import OffsetAndMetadata, TopicPartition
from datetime import datetime, time, timedelta
from collections import defaultdict
def __run_inference(user_id, attempts_df, todays_attempts):
results = None
entity_types = ['subject', 'chapter']
if len(attempts_df) > 0:
last_practiced_map = presenter.get_last_practiced(user_id) if todays_attempts else defaultdict(list)
results = model_functions.run_inference(attempts_df, entity_types, last_practiced_map)
presenter.write_to_hlr_index(user_id, results, todays_attempts, entity_types)
def get_attempts_and_run_inference(user_id, t_start, today_start):
only_todays_attempts = t_start == today_start
attempts_df = presenter.get_attempts_of_user(user_id, t_start)
if len(attempts_df) == 0:
if not only_todays_attempts:
__run_inference(user_id, attempts_df, False)
return
if not only_todays_attempts:
prev_attempts = attempts_df[attempts_df['attempttime'] < today_start]
__run_inference(user_id, prev_attempts, False)
__run_inference(user_id, attempts_df[attempts_df['attempttime'] >= today_start], True)
else:
__run_inference(user_id, attempts_df, True)
def infer_on_attempts(user_id):
today_start_ms = int(datetime.combine(datetime.today(), time.min).timestamp() * 1000)
if not presenter.past_attempts_fetched(user_id):
t_minus_x = datetime.now() - timedelta(days=model_functions.MAX_HL)
start_time = int(t_minus_x.timestamp() * 1000)
else:
start_time = today_start_ms
get_attempts_and_run_inference(user_id, start_time, today_start_ms)
def start_consumer():
print ("Starting consumer...")
consumer = KafkaConsumer(bootstrap_servers=[kafka_config.HOST],
key_deserializer=lambda m: m.decode('utf8'),
value_deserializer=lambda m: json.loads(m.decode('utf8')),
auto_offset_reset="latest",
max_poll_records=50,
group_id=kafka_config.GROUP_ID)
consumer.subscribe([kafka_config.TOPIC])
for msg in consumer:
infer_on_attempts(msg.value['userid'])
print ("Consumer: {}".format(msg), file=sys.stdout)
tp = TopicPartition(msg.topic, msg.partition)
offsets = {tp: OffsetAndMetadata(msg.offset, None)}
try:
consumer.commit(offsets=offsets)
except Exception as e:
print (e)
| StarcoderdataPython |
132496 | <reponame>paulondc/ushotgun
import os
import json
import shotgun_api3
class SessionConfigError(Exception):
"""
Session Config Error.
"""
class Session(object):
"""
Singleton shotgun session.
"""
__sessionSingleton = None
__sessionConfigEnv = 'USHOTGUN_SESSION_CONFIG_PATH'
__shotgunUrlEnv = 'UMEDIA_SHOTGUN_URL'
@classmethod
def get(cls, config='default'):
"""
Return the current session.
"""
if cls.__sessionSingleton is None:
config = cls.__loadConfig(config)
# creating a singleton session object
cls.__sessionSingleton = shotgun_api3.Shotgun(
os.environ[cls.__shotgunUrlEnv],
script_name=config['auth']['scriptName'],
api_key=config['auth']['apiKey']
)
return cls.__sessionSingleton
@classmethod
def __loadConfig(cls, name):
"""
Return a dict with the session configuration.
"""
configDir = os.environ.get(cls.__sessionConfigEnv, '')
if not configDir:
raise SessionConfigError(
'Environment "{}" is not defined!'.format(
cls.__sessionConfigEnv
)
)
# looking for the configuration under the config path
configFile = None
for configDirectory in filter(os.path.exists, configDir.split(':')):
targetConfig = os.path.join(configDir, '{}.json'.format(name))
if os.path.exists(targetConfig):
configFile = targetConfig
break
# making sure the configuration file exists
if not configFile:
raise SessionConfigError(
'Could not find configuration "{}.json" under {} environment'.format(
name,
cls.__sessionConfigEnv
)
)
# loading configuration json file
result = {}
with open(configFile) as jsonFile:
result = json.load(jsonFile)
return result
| StarcoderdataPython |
87247 | <gh_stars>0
#! /usr/bin/python
# -*- coding: utf-8 -*-
import re;
import urllib;
import urllib2;
import sys;
import os;
def write_xml_tocache(xml, path):
afile = open(path, 'w');
afile.write(xml);
afile.close();
def crawl_xml_incache(word):
qw = fix_query_string(word.lower());
swi_path = "/home/lizhi/gnu/plugins/dict/words/" + qw + ".xml";
if os.path.exists(swi_path):
afile = open(swi_path);
xml = afile.read();
afile.close();
return xml;
httpcn = urllib2.urlopen("http://fanyi.youdao.com/openapi.do?"
+ "keyfrom=3zsocom&key=1620414302&type=data&doctype=xml&version=1.1&q="
+ urllib.quote_plus(qw));
xml = httpcn.read();
httpcn.close();
write_xml_tocache(xml, swi_path);
return xml;
def debug():
xml = open("word.xml").read();
print get_text(xml);
print get_elements_by_path(xml, "custom-translation/content");
#print_translations(xml, False, False);
def get_elements_by_path(xml, elem):
if type(xml) == type(''):
xml = [xml];
if type(elem) == type(''):
elem = elem.split('/');
if (len(xml) == 0):
return [];
elif (len(elem) == 0):
return xml;
elif (len(elem) == 1):
result = [];
for item in xml:
result += get_elements(item, elem[0]);
return result;
else:
subitems = [];
for item in xml:
subitems += get_elements(item, elem[0]);
return get_elements_by_path(subitems, elem[1:]);
textre = re.compile("\!\[CDATA\[(.*?)\]\]", re.DOTALL);
def get_text(xml):
match = re.search(textre, xml);
if not match:
return xml;
return match.group(1);
def get_elements(xml, elem):
p = re.compile("<" + elem + ">" + "(.*?)</" + elem + ">", re.DOTALL);
it = p.finditer(xml);
result = [];
for m in it:
result.append(m.group(1));
return result;
GREEN = "\033[1;32m";
DEFAULT = "\033[0;49m";
BOLD = "\033[1m";
UNDERLINE = "\033[4m";
NORMAL = "\033[m";
RED = "\033[1;31m";
BOLD = "\033[1m";
queryre = re.compile(u"[^a-zA-Z\u4e00-\u9fa5]", re.DOTALL);
def fix_query_string(queryword):
match = re.search(queryre, queryword.decode("utf-8"));
if not match:
return queryword;
ret = queryre.sub(" ", queryword);
return ret;
def crawl_xml(queryword):
qw = fix_query_string(queryword);
return urllib2.urlopen("http://fanyi.youdao.com/openapi.do?"
+ "keyfrom=3zsocom&key=1620414302&type=data&doctype=xml&version=1.1&q="
+ urllib.quote_plus(qw)).read();
def print_translations(xml, with_color, detailed):
#print xml;
global GREEN;
global DEFAULT;
global BOLD;
global UNDERLINE;
global NORMAL;
global RED;
if not with_color:
GREEN = "";
DEFAULT = "";
BOLD = "";
UNDERLINE = "";
NORMAL = "";
RED = "";
original_query = get_elements(xml, "query");
queryword = get_text(original_query[0]);
custom_translations = get_elements(xml, "basic");
translated = False;
el_phonetic = get_elements(xml, "phonetic");
phonetic_symbol = " ";
if not len(el_phonetic) <= 0:
phonetic_symbol = " [" + get_text(get_elements(xml, "phonetic")[0]) + "] ";
paragraph_symbol = get_text(get_elements(xml, "paragraph")[0]);
print BOLD + UNDERLINE + queryword + NORMAL + phonetic_symbol + paragraph_symbol;
#sys.stdout.buffer.write(phonetic_symbol.encode("utf-8"));
#print phonetic_symbol;
for cus in custom_translations:
source = "youdao:"; #get_elements_by_path(cus, "basic/explains");
print RED + "Translations from " + source + DEFAULT;
contents = get_elements_by_path(cus, "explains/ex");
for content in contents[0:5]:
print " " + GREEN + get_text(content) + DEFAULT;
translated = True;
yodao_translations = get_elements(xml, "web");
printed = False;
for trans in yodao_translations:
webtrans = get_elements(trans, "explain");
for web in webtrans[0:5]:
if not printed:
print RED + "Translations from web:" + DEFAULT;
printed = True;
keys = get_elements(web, "key");
values = get_elements_by_path(web, "value/ex");
#summaries = get_elements_by_path(web, "trans/summary");
key = keys[0].strip();
value = values[0].strip();
web_value = "";
for v in values[0:5]:
web_value += get_text(v.strip()) + ";";
#summary = summaries[0].strip();
#lines = get_elements(summary, "line");
print " " + BOLD + get_text(key) + ":\t" +DEFAULT + GREEN + get_text(web_value) + NORMAL;
#for line in lines:
# print GREEN + get_text(line) + DEFAULT;
#print get_text(summary) + DEFAULT;
#translated = True;
#if not detailed:
# break
def usage():
print "usage: dict.py word_to_translate";
def main(argv):
if len(argv) <= 0:
usage();
#debug();
sys.exit(1);
xml = crawl_xml_incache(argv[0]);
is_color = "True";
if len(argv) == 2:
is_color = argv[1];
print_translations(xml, bool(is_color == "True"), False);
if __name__ == "__main__":
main(sys.argv[1:]);
| StarcoderdataPython |
56707 | """
This file contains code to measure the performance of the system
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import logging
LOGGER = logging.getLogger(__name__)
import time
import re
# CATAN
import catan.db
import catan.globals as G
def log_database_counts(node_dict):
"""
This function writes the number of persons in the database that
originated from each node to a log file
Primarily used for testing synchronization of the node databases
"""
with open(G.METRICS_DBSYNC_LOG_FILENAME, 'a') as log:
log.write("Time = %d, Nodes = %s\n" % ( int(round(time.time())),
str(node_dict) ) )
def log_person_update(node_message):
"""
This function takes a node message containing a person update and
logs the communication delay based on the time encoded in the message
called from catan_services
"""
db_obj = catan.db.CatanDatabaseObject(node_message.data)
diff_time = None
if db_obj.person_description:
if db_obj.person_description.person_description:
description = db_obj.person_description.person_description
diff_time = extract_time_diff(description)
msg_type = "Person"
elif db_obj.person_message:
if db_obj.person_message.person_message:
message = db_obj.person_message.person_message
diff_time = extract_time_diff(message)
msg_type = "Message"
if diff_time:
with open(G.METRICS_UPDATE_LOG_FILENAME, 'a') as log:
log.write("Time = %d, Delay = %d, Node = %d, Type = %s\n"
% (int(round(time.time())), diff_time,
node_message.source, msg_type ) )
def extract_time_diff(text):
"""
Returns the difference between the current time and the time
in the embedded << >> tag (encoded in POSIX time)
or None if a matching tag cannot be found
"""
time_list = re.findall('<<(.*)>>', text)
if time_list:
update_time = float(time_list[0])
diff_time = round(time.time() - update_time)
return diff_time
else:
return None
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.