seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
551020468 | # Copyright (C) 2021 Intel Corporation.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import lxml
def add_child(element, tag, text=None, **kwargs):
child = lxml.etree.Element(tag)
child.text = text
for k,v in kwargs.items():
child.set(k, v)
element.append(child)
return child
def get_node(etree, xpath):
result = etree.xpath(xpath)
assert len(result) <= 1, \
"Internal error: cannot get texts from multiple nodes at a time. " \
"Rerun the Board Inspector with `--loglevel debug`. If this issue persists, " \
"log a new issue at https://github.com/projectacrn/acrn-hypervisor/issues and attach the full logs."
return result[0] if len(result) == 1 else None
| null | misc/config_tools/board_inspector/extractors/helpers.py | helpers.py | py | 723 | python | en | code | null | code-starcoder2 | 51 |
626187006 |
'''
Part of the code is drawn from
https://github.com/lixucuhk/adversarial-attack-on-GMM-i-vector-based-speaker-verification-systems
Paper:
Adversarial Attacks on GMM i-vector based Speaker Verification Systems
'''
import torch
import kaldi_io
class PLDA(object):
def __init__(self, mdlfile, random=False, device="cpu"):
self.device = device
if random == True:
self.dim = 600
self.mean = torch.ones(self.dim, device=self.device)
self.transform = torch.ones(self.dim, self.dim, device=self.device)
self.psi = torch.ones(self.dim, device=self.device)
else:
rdfile = open(mdlfile, 'r')
line = rdfile.readline()
data = line.split()[2:-1]
self.dim = len(data)
for i in range(self.dim):
data[i] = float(data[i])
self.mean = torch.tensor(data, device=self.device)
line = rdfile.readline()
line = rdfile.readline()
transform_matrix = []
for i in range(self.dim):
data = line.split(' ')[2:-1]
for j in range(self.dim):
data[j] = float(data[j])
transform_matrix.append(data)
line = rdfile.readline()
self.transform = torch.tensor(transform_matrix, device=self.device)
data = line.split()[1:-1]
for i in range(self.dim):
data[i] = float(data[i])
self.psi = torch.tensor(data, device=self.device)
rdfile.close()
def ReadIvectors(self, ivectorfile):
keys = []
data = []
i = 0
for key, mat in kaldi_io.read_vec_flt_scp(ivectorfile):
# print(key)
# print(mat)
# print(len(mat.tolist()))
# exit(0)
i += 1
keys.append(key)
data.append(mat.tolist())
print('totally %d ivectors' %(i))
return keys, data
def TransformIvector(self, ivector, num_examples, simple_length_norm, normalize_length):
# print(self.transform, self.mean, ivector),
trans_ivector = torch.matmul(self.transform, ivector-self.mean)
factor = 1.0
if simple_length_norm == True:
factor = torch.sqrt(self.dim)/torch.norm(trans_ivector, 2)
elif normalize_length == True:
factor = self.GetNormalizaionFactor(trans_ivector, num_examples)
# print('original ivector is \n')
# print(trans_ivector)
trans_ivector = trans_ivector*factor
# print('factor is %f' %(factor))
# print('transformed ivector is \n')
# print(trans_ivector)
return trans_ivector
def GetNormalizaionFactor(self, trans_ivector, num_examples):
trans_ivector_sq = torch.pow(trans_ivector, 2)
inv_covar = 1.0/(self.psi + 1.0/num_examples)
factor = torch.sqrt(self.dim / torch.dot(inv_covar, trans_ivector_sq))
return factor
def ComputeScores_loop(self, trans_trainivector, num_examples, trans_testivector):
# trans_trainivector = self.TransformIvector(trainivector, num_examples, simple_length_norm, normalize_length)
# trans_testivector = self.TransformIvector(testivector, 1, simple_length_norm, normalize_length)
#### work out loglike_given_class
# mean = torch.zeros(self.dim)
# variance = torch.zeros(self.dim)
mean = torch.zeros(self.dim, device=self.device)
variance = torch.zeros(self.dim, device=self.device)
# debug
# print(self.dim),
# print(mean.size())
# print(variance.size())
# print(self.psi.size())
# print(trans_trainivector.size())
# print(trans_testivector.size())
for i in range(self.dim):
mean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]
variance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)
logdet = torch.sum(torch.log(variance))
sqdiff = torch.pow(trans_testivector-mean, 2)
variance = 1.0/variance
loglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))
### work out loglike_without_class
sqdiff = torch.pow(trans_testivector, 2)
variance = self.psi + 1.0
logdet = torch.sum(torch.log(variance))
variance = 1.0/variance
loglike_without_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))
loglike_ratio = loglike_given_class - loglike_without_class
return loglike_ratio
## no for loop and batch train_ivectors version
def ComputeScores(self, trans_trainivector, num_examples, trans_testivector):
# trans_trainivector = self.TransformIvector(trainivector, num_examples, simple_length_norm, normalize_length)
# trans_testivector = self.TransformIvector(testivector, 1, simple_length_norm, normalize_length)
#### work out loglike_given_class
# mean = torch.zeros(self.dim)
# variance = torch.zeros(self.dim)
# mean = torch.zeros(self.dim, device=self.device)
# variance = torch.zeros(self.dim, device=self.device)
n_train_ivectors = trans_trainivector.shape[0]
assert num_examples == 1
mean = torch.zeros((n_train_ivectors, self.dim), device=self.device)
variance = torch.zeros((n_train_ivectors, self.dim), device=self.device)
# debug
# print(self.dim),
# print(mean.size())
# print(variance.size())
# print(self.psi.size())
# print(trans_trainivector.size())
# print(trans_testivector.size())
# for i in range(self.dim):
# mean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]
# variance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)
# for i in range(self.dim):
# mean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]
# variance[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)
mean = num_examples * self.psi / (num_examples * self.psi + 1.0) * trans_trainivector # (n, dim)
variance = (1.0 + self.psi / (num_examples * self.psi + 1.0)).expand(n_train_ivectors, self.dim) # (n, dim)
# print(mean.shape, variance.shape)
# logdet = torch.sum(torch.log(variance))
logdet = torch.sum(torch.log(variance), dim=1) # (n, )
sqdiff = torch.pow(trans_testivector-mean, 2) # (n, dim)
variance = 1.0/variance # (n, dim)
# loglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance))
loglike_given_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.sum(sqdiff * variance, axis=1)) # (n, )
### work out loglike_without_class
sqdiff = torch.pow(trans_testivector, 2) # (dim, )
variance = self.psi + 1.0 # (dim, )
logdet = torch.sum(torch.log(variance)) # scalar
variance = 1.0/variance # (dim, )
loglike_without_class = -0.5*(logdet + torch.log(2*torch.tensor(3.1415926, device=self.device))*self.dim + torch.dot(sqdiff, variance)) # scalar
loglike_ratio = loglike_given_class - loglike_without_class # (n,)
return loglike_ratio
def DRV_TransformIvector(self, ivector, num_examples, simple_length_norm, normalize_length):
############ Currently we only consider simple_length_norm = False situation.
if normalize_length == True:
trans_ivector = torch.matmul(self.transform, ivector-self.mean)
factor = 1.0
factor = self.GetNormalizaionFactor(trans_ivector, num_examples)
norm_drv = torch.zeros(self.dim, self.dim, device=self.device)
trans_ivector_sq = torch.pow(trans_ivector, 2)
common_vector = torch.matmul(torch.diag(num_examples/(num_examples*self.psi+1)), \
-1*trans_ivector_sq*torch.pow(factor, 3)/self.dim)
for i in range(self.dim):
norm_drv[:,i] += common_vector
norm_drv[i][i] += factor
transform_drv = torch.matmul(self.transform.t(), norm_drv)
else:
transform_drv = self.transform.t()
return transform_drv
def DRV_Scores(self, trans_trainivector, num_examples, trans_testivector):
mean = torch.zeros(self.dim)
v1 = torch.zeros(self.dim)
for i in range(self.dim):
mean[i] = num_examples*self.psi[i]/(num_examples*self.psi[i]+1.0)*trans_trainivector[i]
v1[i] = 1.0+self.psi[i]/(num_examples*self.psi[i]+1.0)
v1 = 1.0/v1
v2 = 1.0/(1+self.psi)
score_drv = torch.matmul(torch.diag(trans_testivector), v2)-torch.matmul(torch.diag(trans_testivector-mean), v1)
return score_drv
def to(self, device):
if device == self.device:
return
self.device = device
self.mean = self.mean.to(self.device)
self.transform = self.transform.to(self.device)
self.psi = self.psi.to(self.device) | null | model/plda.py | plda.py | py | 8,183 | python | en | code | null | code-starcoder2 | 51 |
362551611 | import boto3
import csv
import logging
import os
import re
import time
from datetime import datetime, timedelta, timezone
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from usaspending_api.awards.models import TransactionFPDS, TransactionNormalized, Award
from usaspending_api.broker.helpers.award_category_helper import award_types
from usaspending_api.broker.helpers.find_related_awards import find_related_awards
from usaspending_api.broker.helpers.get_business_categories import get_business_categories
from usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date
from usaspending_api.broker.helpers.set_legal_entity_boolean_fields import set_legal_entity_boolean_fields
from usaspending_api.common.helpers.dict_helpers import upper_case_dict_values
from usaspending_api.common.helpers.etl_helpers import update_c_to_d_linkages
from usaspending_api.common.helpers.date_helper import fy
from usaspending_api.common.helpers.timing_helpers import timer
from usaspending_api.etl.award_helpers import update_awards, update_contract_awards
from usaspending_api.etl.broker_etl_helpers import dictfetchall
from usaspending_api.etl.management.load_base import load_data_into_model, format_date, create_location
from usaspending_api.references.models import LegalEntity, Agency
from usaspending_api.common.retrieve_file_from_uri import RetrieveFileFromUri
logger = logging.getLogger("console")
AWARD_UPDATE_ID_LIST = []
BATCH_FETCH_SIZE = 25000
def read_afa_ids_from_file(afa_id_file_path):
with RetrieveFileFromUri(afa_id_file_path).get_file_object() as f:
return set(tuple(l.decode("utf-8").rstrip() for l in f if l))
class Command(BaseCommand):
help = "Sync USAspending DB FPDS data using Broker for new or modified records and S3 for deleted IDs"
@staticmethod
def get_deleted_fpds_data_from_s3(date):
ids_to_delete = []
regex_str = ".*_delete_records_(IDV|award).*"
if settings.IS_LOCAL:
for file in os.listdir(settings.CSV_LOCAL_PATH):
if re.search(regex_str, file) and datetime.strptime(file[: file.find("_")], "%m-%d-%Y").date() >= date:
with open(settings.CSV_LOCAL_PATH + file, "r") as current_file:
# open file, split string to array, skip the header
reader = csv.reader(current_file.read().splitlines())
next(reader)
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
else:
# Connect to AWS
aws_region = settings.USASPENDING_AWS_REGION
fpds_bucket_name = settings.FPDS_BUCKET_NAME
if not (aws_region and fpds_bucket_name):
raise Exception("Missing required environment variables: USASPENDING_AWS_REGION, FPDS_BUCKET_NAME")
s3client = boto3.client("s3", region_name=aws_region)
s3resource = boto3.resource("s3", region_name=aws_region)
s3_bucket = s3resource.Bucket(fpds_bucket_name)
# make an array of all the keys in the bucket
file_list = [item.key for item in s3_bucket.objects.all()]
# Only use files that match the date we're currently checking
for item in file_list:
# if the date on the file is the same day as we're checking
if (
re.search(regex_str, item)
and "/" not in item
and datetime.strptime(item[: item.find("_")], "%m-%d-%Y").date() >= date
):
s3_item = s3client.get_object(Bucket=fpds_bucket_name, Key=item)
reader = csv.reader(s3_item["Body"].read().decode("utf-8").splitlines())
# skip the header, the reader doesn't ignore it for some reason
next(reader)
# make an array of all the detached_award_procurement_ids
unique_key_list = [rows[0] for rows in reader]
ids_to_delete += unique_key_list
logger.info("Number of records to delete: %s" % str(len(ids_to_delete)))
return ids_to_delete
@staticmethod
def get_fpds_transaction_ids(date):
db_cursor = connections["data_broker"].cursor()
db_query = "SELECT detached_award_procurement_id FROM detached_award_procurement WHERE updated_at >= %s;"
db_args = [date]
db_cursor.execute(db_query, db_args)
db_rows = [id[0] for id in db_cursor.fetchall()]
logger.info("Number of records to insert/update: %s" % str(len(db_rows)))
return db_rows
@staticmethod
def fetch_fpds_data_generator(dap_uid_list):
start_time = datetime.now()
db_cursor = connections["data_broker"].cursor()
db_query = "SELECT * FROM detached_award_procurement WHERE detached_award_procurement_id IN ({});"
total_uid_count = len(dap_uid_list)
for i in range(0, total_uid_count, BATCH_FETCH_SIZE):
max_index = i + BATCH_FETCH_SIZE if i + BATCH_FETCH_SIZE < total_uid_count else total_uid_count
fpds_ids_batch = dap_uid_list[i:max_index]
log_msg = "[{}] Fetching {}-{} out of {} records from broker"
logger.info(log_msg.format(datetime.now() - start_time, i + 1, max_index, total_uid_count))
db_cursor.execute(db_query.format(",".join(str(id) for id in fpds_ids_batch)))
yield dictfetchall(db_cursor) # this returns an OrderedDict
@staticmethod
def delete_stale_fpds(ids_to_delete):
logger.info("Starting deletion of stale FPDS data")
transactions = TransactionNormalized.objects.filter(
contract_data__detached_award_procurement_id__in=ids_to_delete
)
update_award_ids, delete_award_ids = find_related_awards(transactions)
delete_transaction_ids = [delete_result[0] for delete_result in transactions.values_list("id")]
delete_transaction_str_ids = ",".join([str(deleted_result) for deleted_result in delete_transaction_ids])
update_award_str_ids = ",".join([str(update_result) for update_result in update_award_ids])
delete_award_str_ids = ",".join([str(deleted_result) for deleted_result in delete_award_ids])
db_cursor = connections[DEFAULT_DB_ALIAS].cursor()
queries = []
if delete_transaction_ids:
fpds = "DELETE FROM transaction_fpds tf WHERE tf.transaction_id IN ({});".format(delete_transaction_str_ids)
tn = "DELETE FROM transaction_normalized tn WHERE tn.id IN ({});".format(delete_transaction_str_ids)
td = "DELETE FROM transaction_delta td WHERE td.transaction_id in ({});".format(delete_transaction_str_ids)
queries.extend([fpds, tn, td])
# Update Awards
if update_award_ids:
# Removing FK values from awards so constraints don't cause script to fail
# Adding to AWARD_UPDATE_ID_LIST so the transaction FKs will be recalculated
AWARD_UPDATE_ID_LIST.extend(update_award_ids)
query_str = (
"UPDATE awards SET latest_transaction_id = null, earliest_transaction_id = null WHERE id IN ({});"
)
update_awards_query = query_str.format(update_award_str_ids)
queries.append(update_awards_query)
if delete_award_ids:
# Financial Accounts by Awards
query_str = "UPDATE financial_accounts_by_awards SET award_id = null WHERE award_id IN ({});"
fa = query_str.format(delete_award_str_ids)
# Subawards
sub = "UPDATE subaward SET award_id = null WHERE award_id IN ({});".format(delete_award_str_ids)
# Parent Awards
pa_updates = "UPDATE parent_award SET parent_award_id = null WHERE parent_award_id IN ({});".format(
delete_award_str_ids
)
pa_deletes = "DELETE FROM parent_award WHERE award_id IN ({});".format(delete_award_str_ids)
# Delete Subawards
delete_awards_query = "DELETE FROM awards a WHERE a.id IN ({});".format(delete_award_str_ids)
queries.extend([fa, sub, pa_updates, pa_deletes, delete_awards_query])
if queries:
db_query = "".join(queries)
db_cursor.execute(db_query, [])
def insert_all_new_fpds(self, total_insert):
for to_insert in self.fetch_fpds_data_generator(total_insert):
start = time.perf_counter()
self.insert_new_fpds(to_insert=to_insert, total_rows=len(to_insert))
logger.info("Insertion took {:.2f}s".format(time.perf_counter() - start))
def insert_new_fpds(self, to_insert, total_rows):
place_of_performance_field_map = {
"location_country_code": "place_of_perform_country_c",
"country_name": "place_of_perf_country_desc",
"state_code": "place_of_performance_state",
"state_name": "place_of_perfor_state_desc",
"city_name": "place_of_perform_city_name",
"county_name": "place_of_perform_county_na",
"county_code": "place_of_perform_county_co",
"zip_4a": "place_of_performance_zip4a",
"congressional_code": "place_of_performance_congr",
"zip_last4": "place_of_perform_zip_last4",
"zip5": "place_of_performance_zip5",
}
legal_entity_location_field_map = {
"location_country_code": "legal_entity_country_code",
"country_name": "legal_entity_country_name",
"state_code": "legal_entity_state_code",
"state_name": "legal_entity_state_descrip",
"city_name": "legal_entity_city_name",
"county_name": "legal_entity_county_name",
"county_code": "legal_entity_county_code",
"address_line1": "legal_entity_address_line1",
"address_line2": "legal_entity_address_line2",
"address_line3": "legal_entity_address_line3",
"zip4": "legal_entity_zip4",
"congressional_code": "legal_entity_congressional",
"zip_last4": "legal_entity_zip_last4",
"zip5": "legal_entity_zip5",
}
fpds_normalized_field_map = {"type": "contract_award_type", "description": "award_description"}
fpds_field_map = {
"officer_1_name": "high_comp_officer1_full_na",
"officer_1_amount": "high_comp_officer1_amount",
"officer_2_name": "high_comp_officer2_full_na",
"officer_2_amount": "high_comp_officer2_amount",
"officer_3_name": "high_comp_officer3_full_na",
"officer_3_amount": "high_comp_officer3_amount",
"officer_4_name": "high_comp_officer4_full_na",
"officer_4_amount": "high_comp_officer4_amount",
"officer_5_name": "high_comp_officer5_full_na",
"officer_5_amount": "high_comp_officer5_amount",
}
for index, row in enumerate(to_insert, 1):
upper_case_dict_values(row)
# Create new LegalEntityLocation and LegalEntity from the row data
legal_entity_location = create_location(
legal_entity_location_field_map, row, {"recipient_flag": True, "is_fpds": True}
)
recipient_name = row["awardee_or_recipient_legal"]
legal_entity = LegalEntity.objects.create(
recipient_unique_id=row["awardee_or_recipient_uniqu"],
recipient_name=recipient_name if recipient_name is not None else "",
)
legal_entity_value_map = {"location": legal_entity_location, "is_fpds": True}
set_legal_entity_boolean_fields(row)
legal_entity = load_data_into_model(legal_entity, row, value_map=legal_entity_value_map, save=True)
# Create the place of performance location
pop_location = create_location(place_of_performance_field_map, row, {"place_of_performance_flag": True})
# Find the toptier awards from the subtier awards
awarding_agency = Agency.get_by_subtier_only(row["awarding_sub_tier_agency_c"])
funding_agency = Agency.get_by_subtier_only(row["funding_sub_tier_agency_co"])
# Create the summary Award
(created, award) = Award.get_or_create_summary_award(
generated_unique_award_id=row["unique_award_key"], piid=row["piid"]
)
award.parent_award_piid = row.get("parent_award_id")
award.save()
# Append row to list of Awards updated
AWARD_UPDATE_ID_LIST.append(award.id)
if row["last_modified"] and len(str(row["last_modified"])) == len("YYYY-MM-DD HH:MM:SS"): # 19 characters
dt_fmt = "%Y-%m-%d %H:%M:%S"
else:
dt_fmt = "%Y-%m-%d %H:%M:%S.%f" # try using this even if last_modified isn't a valid string
try:
last_mod_date = datetime.strptime(str(row["last_modified"]), dt_fmt).date()
except ValueError: # handle odd-string formats and NULLs from the upstream FPDS-NG system
info_message = "Invalid value '{}' does not match: '{}'".format(row["last_modified"], dt_fmt)
logger.info(info_message)
last_mod_date = None
award_type, award_type_desc = award_types(row)
parent_txn_value_map = {
"award": award,
"awarding_agency": awarding_agency,
"funding_agency": funding_agency,
"recipient": legal_entity,
"place_of_performance": pop_location,
"period_of_performance_start_date": format_date(row["period_of_performance_star"]),
"period_of_performance_current_end_date": format_date(row["period_of_performance_curr"]),
"action_date": format_date(row["action_date"]),
"last_modified_date": last_mod_date,
"transaction_unique_id": row["detached_award_proc_unique"],
"is_fpds": True,
"type": award_type,
"type_description": award_type_desc,
"business_categories": get_business_categories(row=row, data_type="fpds"),
}
transaction_normalized_dict = load_data_into_model(
TransactionNormalized(), # thrown away
row,
field_map=fpds_normalized_field_map,
value_map=parent_txn_value_map,
as_dict=True,
)
contract_instance = load_data_into_model(
# TransactionFPDS() is "thrown" away
TransactionFPDS(),
row,
field_map=fpds_field_map,
as_dict=True,
)
detached_award_proc_unique = contract_instance["detached_award_proc_unique"]
unique_fpds = TransactionFPDS.objects.filter(detached_award_proc_unique=detached_award_proc_unique)
if unique_fpds.first():
transaction_normalized_dict["update_date"] = datetime.now(timezone.utc)
transaction_normalized_dict["fiscal_year"] = fy(transaction_normalized_dict["action_date"])
# update TransactionNormalized
TransactionNormalized.objects.filter(id=unique_fpds.first().transaction.id).update(
**transaction_normalized_dict
)
# update TransactionFPDS
unique_fpds.update(**contract_instance)
else:
# create TransactionNormalized
transaction = TransactionNormalized(**transaction_normalized_dict)
transaction.save()
# create TransactionFPDS
transaction_fpds = TransactionFPDS(transaction=transaction, **contract_instance)
transaction_fpds.save()
# Update legal entity to map back to transaction
legal_entity.transaction_unique_id = detached_award_proc_unique
legal_entity.save()
def perform_load(self, ids_to_delete, ids_to_insert):
if len(ids_to_delete) > 0:
with timer("deletion of all stale FPDS data", logger.info):
self.delete_stale_fpds(ids_to_delete=ids_to_delete)
else:
logger.info("No FPDS records to delete at this juncture")
if len(ids_to_insert) > 0:
# Add FPDS records
with timer("insertion of new FPDS data in batches", logger.info):
self.insert_all_new_fpds(ids_to_insert)
# Update Awards based on changed FPDS records
with timer("updating awards to reflect their latest associated transaction info", logger.info):
award_record_count = update_awards(tuple(AWARD_UPDATE_ID_LIST))
logger.info("{} awards updated from their transactional data".format(award_record_count))
# Update FPDS-specific Awards based on the info in child transactions
with timer("updating contract-specific awards to reflect their latest transaction info", logger.info):
award_record_count = update_contract_awards(tuple(AWARD_UPDATE_ID_LIST))
logger.info("{} awards updated FPDS-specific and exec comp data".format(award_record_count))
# Check the linkages from file C to FPDS records and update any that are missing
with timer("updating C->D linkages", logger.info):
update_c_to_d_linkages("contract")
else:
logger.info("No FPDS records to insert or modify at this juncture")
def nightly_loader(self, start_date):
logger.info("==== Starting FPDS nightly data load ====")
if start_date:
date = start_date
date = datetime.strptime(date, "%Y-%m-%d").date()
else:
default_last_load_date = datetime.now(timezone.utc) - timedelta(days=1)
date = get_last_load_date("fpds", default=default_last_load_date).date()
processing_start_datetime = datetime.now(timezone.utc)
logger.info("Processing data for FPDS starting from %s" % date)
with timer("retrieval of new/modified FPDS data ID list", logger.info):
ids_to_insert = self.get_fpds_transaction_ids(date=date)
with timer("retrieval of deleted FPDS IDs", logger.info):
ids_to_delete = self.get_deleted_fpds_data_from_s3(date=date)
self.perform_load(ids_to_delete, ids_to_insert)
# Update the date for the last time the data load was run
update_last_load_date("fpds", processing_start_datetime)
logger.info("FPDS NIGHTLY UPDATE COMPLETE")
def load_specific_transactions(self, detached_award_procurement_ids):
logger.info("==== Starting FPDS (re)load of specific transactions ====")
self.perform_load(detached_award_procurement_ids, detached_award_procurement_ids)
logger.info("FPDS SPECIFIC (RE)LOAD COMPLETE")
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
"--date",
dest="date",
type=str,
help="(OPTIONAL) Date from which to start the nightly loader. Expected format: YYYY-MM-DD",
)
mutually_exclusive_group.add_argument(
"--detached-award-procurement-ids",
nargs="+",
type=int,
help="(OPTIONAL) detached_award_procurement_ids of FPDS transactions to load/reload from Broker",
)
parser.add_argument(
"--id-file",
metavar="FILEPATH",
type=str,
help="A file containing only transaction IDs (detached_award_procurement_id) "
"to reload, one ID per line. Nonexistent IDs will be ignored.",
)
@transaction.atomic
def handle(self, *args, **options):
if any([options["detached_award_procurement_ids"], options["id_file"]]):
ids_from_file = read_afa_ids_from_file(options["id_file"]) if options["id_file"] else set()
explicit_ids = (
set(options["detached_award_procurement_ids"]) if options["detached_award_procurement_ids"] else set()
)
detached_award_procurement_ids = list(explicit_ids | ids_from_file)
self.load_specific_transactions(detached_award_procurement_ids)
else:
self.nightly_loader(options["date"])
| null | usaspending_api/broker/management/commands/fpds_nightly_loader.py | fpds_nightly_loader.py | py | 20,780 | python | en | code | null | code-starcoder2 | 51 |
651153797 | class Solution:
def singleNumber(self, nums: List[int]) -> int:
""" Given a non-empty array of integers, every element appears twice except for one. Find that single one.
Parameters
----------
nums : List[int]
Returns
-------
int
"""
hash_set = set()
for num in nums:
if num in hash_set:
hash_set.discard(num)
else:
hash_set.add(num)
return hash_set.pop()
| null | hashtable/SingleNumber/main.py | main.py | py | 524 | python | en | code | null | code-starcoder2 | 51 |
214042706 | """empty message
Revision ID: 4c6632617022
Revises: f719fe7c700a
Create Date: 2017-02-15 22:00:21.421420
"""
# revision identifiers, used by Alembic.
revision = '4c6632617022'
down_revision = 'f719fe7c700a'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('production_needs', 'product',
existing_type=sa.INTEGER(),
nullable=True)
op.alter_column('production_needs', 'product_in',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('production_needs', 'product_in',
existing_type=sa.INTEGER(),
nullable=False)
op.alter_column('production_needs', 'product',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
| null | migrations/versions/4c6632617022_.py | 4c6632617022_.py | py | 1,000 | python | en | code | null | code-starcoder2 | 51 |
12212919 | import requests
import calendar
import time
import json
import dateutil.parser
import datetime
import hashlib
from docassemble.base.util import *
from azure.storage.blob import BlockBlobService
AD_URL = "https://login.microsoftonline.com/a2pca.onmicrosoft.com/oauth2/token"
CITATION_LOOKUP_URL = 'https://a2papi.azurewebsites.net/api/case/citation'
CASE_LOOKUP_URL = 'https://a2papi.azurewebsites.net/api/case/cases'
SUBMIT_URL = 'https://a2papi.azurewebsites.net/api/request'
def fetch_citation_data(citation_number, county):
citation_params = {
'num': citation_number,
'county': county
}
res = __do_request(CITATION_LOOKUP_URL, citation_params)
return __format_response(res)
def fetch_case_data(first_name, last_name, dob, drivers_license, county):
case_params = {
'firstName': first_name,
'lastName': last_name,
'dateOfBirth': "%s/%s/%s" % (dob.month, dob.day, dob.year),
'driversLicense': drivers_license,
'county': county
}
res = __do_request(CASE_LOOKUP_URL, case_params)
return __format_response(res)
def date_from_iso8601(date_string):
return dateutil.parser.parse(date_string).date()
def format_money(money_string):
return '${:,.2f}'.format(money_string)
def __format_response(response, request_body=None):
data = {}
data['response_code'] = response.status_code
if response.ok:
data['data'] = response.json()
data['success'] = True
data['error'] = None
if request_body:
data['request_body'] = request_body
else:
data['data'] = {}
data['success'] = False
data['error'] = response.text
return data
def __do_request(url, params):
oauth_params = {
'resource': '3b347c8c-3faa-4331-8273-a5f575997d4e',
'grant_type': 'client_credentials',
'client_id': __get_a2p_config()["client_id"],
'client_secret': __get_a2p_config()["client_secret"],
'scope': 'openid 3b347c8c-3faa-4331-8273-a5f575997d4e'
}
r = requests.post(AD_URL, oauth_params)
data = r.json()
access_token = data['access_token']
headers = { 'Authorization': 'Bearer ' + access_token, 'Content-Type': 'application/json' }
return requests.post(url, data=None, json=params, headers=headers)
def __get_a2p_config():
return get_config('a2p')
def __submit_image_from_url(url):
blob_service = BlockBlobService(account_name='a2pca', account_key=__get_a2p_config()['blob_account_key'])
image_body = requests.get(url).content
filename = 'a2p_daupload_' + hashlib.sha224(image_body).hexdigest()
blob_service.create_blob_from_bytes('attachments', filename, image_body)
return {
"fileName": filename,
"blobName": filename,
"size": len(image_body)
}
def build_submit_payload(data, attachment_urls):
benefit_files_data = []
for url in attachment_urls:
log("Uploading file: %s" % url)
image_meta = __submit_image_from_url(url)
benefit_files_data.append(image_meta)
no_proof_fields = [
'calfresh_no_proof',
'medi_cal_no_proof',
'ssi_no_proof',
'ssp_no_proof',
'cr_ga_no_proof',
'ihss_no_proof',
'tanf_no_proof'
'cal_works_no_proof',
'capi_no_proof',
]
no_docs_upload_comments = []
for field in no_proof_fields:
reason = data.get(field + "_reason")
if reason:
no_docs_upload_comments.append("%s: %s" % field, reason)
case_information = data.get('case_information')
benefits = data.get('benefits', {}).get('elements', {})
no_benefits = True
for benefit_name in ['cal_fresh', 'ssi', 'ssp', 'medi_cal', 'cr_ga', 'ihss', 'cal_works', 'tanf', 'capi', 'other']:
if benefits.get(benefit_name):
no_benefits = False
submitted_on = datetime.datetime.now().isoformat()
on_other_benefits = benefits.get('other', False)
other_benefits_desc = None
if on_other_benefits:
other_benefits_desc = data.get('other_benefits_name')
no_benefits = False
violDescriptions = []
idx = 0
for charge in case_information.get('charges', {}):
descr = []
idx += 1
descr.append("Count %s" % idx)
if charge.get('chargeCode'):
descr.append(charge.get('chargeCode'))
descr.append(charge.get('violationDescription'))
violDescriptions.append("-".join(descr))
additional_requests = data.get('additional_requests', {}).get('elements', {})
difficultyToVisitCourtDueTo = data.get("difficult_open_text", "")
for k, v in data.get('why_difficult', {}).get('elements', {}).items():
if v:
difficultyToVisitCourtDueTo += "/ " + k
request_params = {
"requestStatus": "Submitted",
"petition": {
"noBenefits": no_benefits,
"onFoodStamps": benefits.get('cal_fresh', False),
"onSuppSecIncome": benefits.get('ssi', False),
"onSSP": benefits.get('ssp', False),
"onMedical": benefits.get('medi_cal', False),
"onCountyRelief": benefits.get('cr_ga', False),
"onIHSS": benefits.get('ihss', False),
"onCalWorks": benefits.get('cal_works', False),
"onTANF": benefits.get('tanf', False),
"onCAPI": benefits.get('capi', False),
"benefitFiles": benefit_files_data,
"rent": data.get('monthly_rent'),
"mortgage": data.get('mortgage'),
"phone": data.get('phone_bill'),
"food": data.get('food'),
"insurance": data.get('insurance'),
"isBenefitsProof": len(attachment_urls) > 0,
"isCivilAssessWaiver": False,
"clothes": data.get('clothing'),
"childSpousalSupp": data.get('child_spousal_support'),
"carPayment": data.get('transportation'),
"utilities": data.get('utilities'),
"otherExpenses": [],
"isMoreTimeToPay": additional_requests.get('extension', False),
"isPaymentPlan": additional_requests.get('payment_plan', False),
"isReductionOfPayment": True,
"isCommunityService": additional_requests.get('community_service', False),
"isOtherRequest": False,
"otherRequestDesc": data.get('other_hardship'),
"selectAllRights": True,
"representByAttorneyRight": True,
"speedyTrialRight": True,
"presentEvidenceRight": True,
"testifyUnderOathRight": True,
"remainSilentRight": True,
"isPleadGuilty": data.get('plea', '') == "agree_guilty",
"isPleadNoContest": data.get('plea', '') == "agree_no_contest",
"supportingFiles": [],
"noDocsToUploadReason": "See comments",
"noDocsToUploadComments": "\n".join(no_docs_upload_comments),
"isDeclare": True,
"onOtherBenefits": on_other_benefits,
"onOtherBenefitsDesc": other_benefits_desc,
},
"caseInformation": {
"caseNumber": case_information.get('caseNumber'),
"citationDocumentId": case_information.get('documentid'),
"citationNumber": case_information.get('citationNumber'),
"civilAssessFee": case_information.get('civilAssessFee'),
"county": data.get('county'),
"fullName": case_information.get('firstName', '') + ' ' + case_information.get('lastName', ''),
"totalDueAmt": case_information.get('totalDueAmt'),
"violationDate": case_information.get('charges', [])[0].get('violationDate'),
"violationDescription": "\n".join(violDescriptions),
},
"benefitsStatus": not no_benefits,
"defendantInformation": {
"incomeAmount": data.get('income'),
"incomeFrequency": "Month",
"totalFamilyMembers": data.get('residents'),
},
"survey": {
"isAddressedTrafficMatter": data.get('tool_helpful', '') + ',' + data.get('tool_difficult', ''),
"willYouVisitCourt": data.get('prefer'),
"difficultyToVisitCourtDueTo": difficultyToVisitCourtDueTo,
},
"submittedById": "0",
"judgment": "Submitted",
"submittedByEmail": data.get('email'),
"submittedOn": submitted_on,
"needMoreInformation": [],
"toolRecommendations": [],
"judicialOrder": [],
"auditInformation": [],
"__v": 0
}
return request_params
def submit_interview(data, attachment_urls=[], debug=False):
params = build_submit_payload(data, attachment_urls)
log("Submitting Payload: %s" % params)
res = __do_request(SUBMIT_URL, params)
if debug:
return __format_response(res, params)
else:
return __format_response(res)
#print(fetch_citation_data('CT98966', 'Tulare'))
# print(fetch_case_data('john', 'doe', '11/26/1985', '12345', 'Santa Clara'))
#print(submit_interview({ 'citationNumber': 1234 }))
| null | docassemble/jcc/abilitytopay/a2papi.py | a2papi.py | py | 9,169 | python | en | code | null | code-starcoder2 | 51 |
272151158 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test prefetch.so plugin (simple overflow64 mode).
'''
server = Test.MakeOriginServer("server")
vals = ["3842948374928374982374982374", "3842948374928374982374982375",
"3842948374928374982374982376", "3842948374928374982374982377"]
for i in vals:
request_header = {
"headers":
f"GET /texts/demo-{i}.txt HTTP/1.1\r\n"
"Host: does.not.matter\r\n" # But cannot be omitted.
"\r\n",
"timestamp": "1469733493.993",
"body": ""
}
response_header = {
"headers":
"HTTP/1.1 200 OK\r\n"
"Connection: close\r\n"
"Cache-control: max-age=85000\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": f"This is the body for demo-{i}.txt.\n"
}
server.addResponse("sessionlog.json", request_header, response_header)
dns = Test.MakeDNServer("dns")
ts = Test.MakeATSProcess("ts", use_traffic_out=False, command="traffic_server 2> trace.log")
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'http|dns|prefetch',
'proxy.config.dns.nameservers': f"127.0.0.1:{dns.Variables.Port}",
'proxy.config.dns.resolv_conf': "NULL",
})
ts.Disk.remap_config.AddLine(
f"map http://domain.in http://127.0.0.1:{server.Variables.Port}" +
" @plugin=cachekey.so @pparam=--remove-all-params=true"
" @plugin=prefetch.so" +
" @pparam=--front=true" +
" @pparam=--fetch-policy=simple" +
r" @pparam=--fetch-path-pattern=/(.*-)(\d+)(.*)/$1{$2+1}$3/" +
" @pparam=--fetch-count=3" +
" @pparam=--fetch-overflow=bignum"
)
tr = Test.AddTestRun()
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(dns)
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.Command = 'echo start TS, HTTP server and DNS.'
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
f'curl --verbose --proxy 127.0.0.1:{ts.Variables.port} http://domain.in/texts/demo-3842948374928374982374982374.txt'
)
tr.Processes.Default.ReturnCode = 0
tr = Test.AddTestRun()
tr.Processes.Default.Command = (
"grep 'GET http://domain.in' trace.log"
)
tr.Streams.stdout = "prefetch_bignum.gold"
tr.Processes.Default.ReturnCode = 0
| null | tests/gold_tests/pluginTest/prefetch/prefetch_bignum.test.py | prefetch_bignum.test.py | py | 3,108 | python | en | code | null | code-starcoder2 | 51 |
418301238 | from budgetkey_data_pipelines.pipelines.procurement.tenders.exemptions.exemptions_scraper import ExemptionsPublisherScraper, TooManyFailuresException
import os
import json
from requests.exceptions import HTTPError, ConnectionError
from itertools import islice
class MockExemptionsPublisherScraper(ExemptionsPublisherScraper):
"""
opens files from local filesystem instead of from the source
"""
def __init__(self, publisher_id, write_prefix=None, mock_http_failures=0, **kwargs):
self.write_prefix = write_prefix
self._num_requests = {}
self._mock_http_failures = mock_http_failures
kwargs.setdefault("wait_between_retries", 0.001)
super(MockExemptionsPublisherScraper, self).__init__(publisher_id, **kwargs)
def _get_page_text(self, form_data=None):
if form_data:
filename = "SearchExemptionMessages.aspx.publisher{}-page{}".format(self._publisher_id, self._cur_page_num)
else:
filename = "SearchExemptionMessages.aspx"
if filename not in self._num_requests:
self._num_requests[filename] = 0
self._num_requests[filename] += 1
if self._mock_http_failures < self._num_requests[filename]:
if self.write_prefix:
with open(os.path.join(os.path.dirname(__file__), "{}{}".format(self.write_prefix, filename)), "w") as f:
if form_data:
real_text = super(MockExemptionsPublisherScraper, self)._get_page_text(form_data)
f.write(json.dumps(form_data))
f.write("\n\n")
f.write(real_text)
else:
real_text = super(MockExemptionsPublisherScraper, self)._get_page_text()
f.write(real_text)
return real_text
else:
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
elif self._num_requests[filename] == 1:
raise ConnectionError("fake connecion error")
else:
raise HTTPError("fake http error")
class MockExemptionsPublisherScraperUnlimitedPages(ExemptionsPublisherScraper):
def __init__(self, **kwargs):
super(MockExemptionsPublisherScraperUnlimitedPages, self).__init__(10, **kwargs)
def _get_page_text(self, form_data=None):
if form_data:
filename = "SearchExemptionMessages.aspx.publisher10-page1"
else:
filename = "SearchExemptionMessages.aspx"
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _get_num_pages(self):
return self._cur_page_num + 1
def test():
# 10 = המשרד לביטחון פנים - משטרת ישראל
actual_urls = list(MockExemptionsPublisherScraper(10).get_urls())
assert_publisher_10_urls(actual_urls)
# this test is skipped because it does real conncetion to gov website
# it can be used locally to regenerate the mock files or to test functionality of the real website
def skip_test_no_mock():
# 10 = המשרד לביטחון פנים - משטרת ישראל
actual_urls = []
# gets all the exemptions, wait until the first one in the mock test, then get 12 urls and stop
for url in MockExemptionsPublisherScraper(10, "dump_").get_urls():
is_first_url = url == "/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596877"
if is_first_url or (len(actual_urls) > 0 and len(actual_urls) <= 12):
actual_urls.append(url)
elif len(actual_urls) >= 12:
break
assert_publisher_10_urls(actual_urls)
def test_retries():
# mock_http_failures parameter causes every http request to fail 3 times and succeed on the fourth time
scraper = MockExemptionsPublisherScraper(10, mock_http_failures=3)
# it works due to retry mechanism
assert_publisher_10_urls(list(scraper.get_urls()))
def test_retries_too_many_failures():
# but - if we have too many failures (by default - more then 10)
try:
list(MockExemptionsPublisherScraper(10, mock_http_failures=11).get_urls())
got_exception = False
except TooManyFailuresException:
got_exception = True
# then we get a TooManyFailuresException
assert got_exception
def test_max_pages():
# 1 page = 10 results
assert_max_pages(max_pages=1, num_expected_results=10)
assert_max_pages(max_pages=2, num_expected_results=20)
# the scraper yields unlimited results
assert_max_pages(max_pages=6, num_expected_results=60)
# 0 or negative number - will yield all pages (in the assertion we islice it to 300)
assert_max_pages(max_pages=0, num_expected_results=300)
assert_max_pages(max_pages=-1, num_expected_results=300)
def assert_publisher_10_urls(actual_urls):
assert actual_urls == [
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596877',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596879',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596880',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596739',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596740',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596741',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596751',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596752',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596753',
"/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596755",
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596686',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596700',
'/ExemptionMessage/Pages/ExemptionMessage.aspx?pID=596714'
]
def assert_max_pages(max_pages, num_expected_results):
scraper = MockExemptionsPublisherScraperUnlimitedPages(max_pages=max_pages)
# the scraper will yield unlimited results, we stop at 300 results
urls = islice(scraper.get_urls(), 300)
assert sum(1 for _ in urls) == num_expected_results
| null | tests/procurement/tenders/exemptions/test_exemptions_scraper.py | test_exemptions_scraper.py | py | 6,113 | python | en | code | null | code-starcoder2 | 51 |
420823166 |
import sys, os
from optparse import OptionParser
from src.recorder import Recorder
from src.player import Player
def main(argv):
project_dir = (os.path.sep).join(os.path.abspath(__file__).split(os.path.sep)[:-1])
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("", "--uri", dest="uri", default="", help="Spotify URI of album/playlist to play.")
parser.add_option("", "--path", dest="path", default="", help="Path to file containing Spotify URI to play.")
parser.add_option("", "--current", dest="current", action="store_true", help="Get the currently played song.")
parser.add_option("", "--pause", dest="pause", action="store_true", help="Pause the stream.")
parser.add_option("", "--prev", dest="prev", action="store_true", help="Skip to previous song.")
parser.add_option("", "--next", dest="next", action="store_true", help="Skip to next song.")
(options, args) = parser.parse_args()
nb_options = 0
string_values = ["uri", "path"]
for opt, value in options.__dict__.items():
if (opt in string_values and value != "") or (opt not in string_values and value != None):
nb_options += 1
if (nb_options == 0):
parser.error("Please specify at least one command.")
if (nb_options > 1):
parser.error("Please specify only one command.")
config_file_path = f"{project_dir}{os.path.sep}resources{os.path.sep}config.json"
if options.path != "":
rec = Recorder(path=options.path)
rec.read()
if rec.content != "":
pla = Player(config_file_path)
# Check if we play a new album/playlist or resume
playback = pla.get_current_playback()
if playback == None or playback["context"]["uri"] != rec.content.strip():
pla.play(uri=rec.content)
else:
pla.play()
else:
pla = Player(config_file_path)
if options.uri != "":
pla.play(uri=options.uri)
elif options.pause != None:
pla.pause()
elif options.current != None:
pla.get_current_playback()
elif options.resume != None:
pla.play()
elif options.prev != None:
pla.prev()
elif options.next != None:
pla.next()
if __name__ == "__main__":
main(sys.argv[1:]) | null | main.py | main.py | py | 2,440 | python | en | code | null | code-starcoder2 | 51 |
450770436 | #!/usr/bin/env python
from PIL import Image
import numpy as np
def dct(matrix):
n,m = matrix.shape[0],matrix.shape[1]
pi = np.pi
result = np.zeros((m,n))
for i in range(m):
for j in range(n):
if i == 0:
ci = 1 / np.sqrt(m)
else:
ci = np.sqrt(2) / np.sqrt(m)
if (j == 0):
cj = 1 / np.sqrt(n);
else:
cj = np.sqrt(2) / np.sqrt(n)
sum_ = 0
for k in range(m):
for l in range(n):
dct1 = matrix[k,l] * \
np.cos((2 * k + 1) * i * pi / (2 * m)) * \
np.cos((2 * l + 1) * j * pi / (2 * n))
sum_ = sum_ + dct1
result[i,j] = ci * cj * sum_
return result
class Hash():
def __init__(self, binary_arr):
self.hash = binary_arr
def __sub__(self, other):
return np.count_nonzero(self.hash.flatten() != other.hash.flatten())
def __str__(self):
return bin_to_hex(self.hash.flatten())
def __repr__(self):
return bin_to_hex(self.hash.flatten())
def bin_to_hex(arr):
bit_string = ''.join(str(b) for b in 1 * arr.flatten())
width = 16
return '{:0>{width}x}'.format(int(bit_string, 2), width=width)
def phash(image):
image = image.convert("L").resize((32, 32), Image.ANTIALIAS)
pixels = np.array(image)
DCT = dct(pixels)
low_freq = DCT[:8,:8]
median = np.median(low_freq)
res = low_freq > median
return Hash(res)
def progressBar(value, endvalue, bar_length=20):
percent = float(value) / endvalue
arrow = '-' * int(round(percent * bar_length)-1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\rProgress: [{0}] {1}%".format(arrow + spaces, int(round(percent * 100))))
sys.stdout.flush()
def find_similar_images(userpath):
print("Process started.")
def is_image(filename):
f = filename.lower()
return f.endswith(".png") or f.endswith(".jpg") or f.endswith(".jpeg")
image_files = [os.path.join(userpath, path) for path in os.listdir(userpath) if is_image(path)]
n = len(image_files)
images = {}
for i,img in zip(range(n),image_files):
progressBar(i,n,30)
hash = phash(Image.open(img))
images[hash] = images.get(hash, '') + img[len(userpath)+1:]
progressBar(n,n,30)
print('\n')
hashes = [*images.keys()]
for i in range(n):
for j in range(i+1,n):
if hashes[i] - hashes[j] == 0:
pixels1 = np.asarray(Image.open(image_files[i]))
pixels2 = np.asarray(Image.open(image_files[j]))
if pixels1.shape == pixels2.shape:
if not np.any(pixels1-pixels2):
print(f'{images[hashes[i]]} {images[hashes[j]]} (duplicate)')
continue
print(f'{images[hashes[i]]} {images[hashes[j]]} (modification)')
else:
score = 1-(hashes[i]-hashes[j])/64
if score >= 0.7:
print(f'{images[hashes[i]]} {images[hashes[j]]} (similar)')
def dir_path(path):
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"readable_dir:{path} is not a valid path")
if __name__ == '__main__':
import sys, os, argparse
parser = argparse.ArgumentParser(description='This script identifies similar images in the directory using pHash method.')
parser.add_argument('--path',type=dir_path, help='path to images', required=True)
args = parser.parse_args()
find_similar_images(args.path)
| null | find_similar_images.py | find_similar_images.py | py | 3,708 | python | en | code | null | code-starcoder2 | 51 |
348889814 | # How to implement loops in python
demoList = [1, 2, 3, 4, 5]
# FOR LOOP - runst until all items are exausted
# Through a list
for item in demoList:
print(item)
# Through a dictionary
students = {"Marry" : 9.2, "Jhon" : 10.2, "Face" : 0.1}
# You can cast the pair into a single variable
# the .items() will grab the whole pair
# the .values() will grab only the value
# the .keys() will grab only the keys
for pair in students.items():
print("Name is %s and score is %s" % (pair[0], pair[1]))
# can use more than one variable to asighn to
for key, value in students.items():
print("Name is %s and score is %s" % (key, value))
# WHILE LOOP - runs as long as the conditional is true
a = 0
while a == 0:
print(a)
a = a + 1
# Using break and continue statements
# break -> breakes out of the loop
# continue -> moves on the the top of the loop/goes to the next iteration item
a = True
while True: # will loop forever, since always True
if a == False:
break
else:
a = False
continue | null | loops.py | loops.py | py | 1,037 | python | en | code | null | code-starcoder2 | 51 |
208724565 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.project_info.tasks.ide_gen import Project, SourceSet
from pants.source.source_root import SourceRootConfig
from pants_test.base_test import BaseTest
class IdeGenTest(BaseTest):
def test_collapse_source_root(self):
self.context(for_subsystems=[SourceRootConfig], options={
SourceRootConfig.options_scope: {
'source_roots': {
'/src/java': [],
'/tests/java': [],
'/some/other': []
},
'unmatched': 'fail'
}
})
source_roots = SourceRootConfig.global_instance().get_source_roots()
source_set_list = []
self.assertEquals([], Project._collapse_by_source_root(source_roots, source_set_list))
source_sets = [
SourceSet('/repo-root', 'src/java', 'org/pantsbuild/app', False),
SourceSet('/repo-root', 'tests/java', 'org/pantsbuild/app', True),
SourceSet('/repo-root', 'some/other', 'path', False),
]
results = Project._collapse_by_source_root(source_roots, source_sets)
self.assertEquals(SourceSet('/repo-root', 'src/java', '', False), results[0])
self.assertFalse(results[0].is_test)
self.assertEquals(SourceSet('/repo-root', 'tests/java', '', True), results[1])
self.assertTrue(results[1].is_test)
# If there is no registered source root, the SourceSet should be returned unmodified
self.assertEquals(source_sets[2], results[2])
self.assertFalse(results[2].is_test)
def test_source_set(self):
source_set1 = SourceSet('repo-root', 'path/to/build', 'org/pantsbuild/project', False)
# only the first 3 parameters are considered keys
self.assertEquals(('repo-root', 'path/to/build', 'org/pantsbuild/project'),
source_set1._key_tuple)
source_set2 = SourceSet('repo-root', 'path/to/build', 'org/pantsbuild/project', True)
# Don't consider the test flag
self.assertEquals(source_set1, source_set2)
def assert_dedup(self, expected, actual):
self.assertEquals([expected], actual)
# that test is not good enough, 'resources_only' and 'is_test' aren't considered keys for the set
self.assertEquals(expected.resources_only, actual[0].resources_only)
self.assertEquals(expected.is_test, actual[0].is_test)
def test_dedup_sources_simple(self):
self.assertEquals([
SourceSet('foo', 'bar', ''),
SourceSet('foo', 'bar', 'baz'),
SourceSet('foo', 'bar', 'foobar')
],
Project.dedup_sources([
SourceSet('foo', 'bar', ''),
SourceSet('foo', 'bar', 'foobar'),
SourceSet('foo', 'bar', 'baz'),
SourceSet('foo', 'bar', 'baz'),
SourceSet('foo', 'bar', 'foobar'),
SourceSet('foo', 'bar', 'foobar'),
SourceSet('foo', 'bar', 'baz'),
]))
def test_dedup_sources_resource_and_code(self):
"""Show that a non-resources-only source set turns off the resources_only flag"""
deduped_sources = Project.dedup_sources([
SourceSet('foo', 'bar', 'baz', resources_only=True),
SourceSet('foo', 'bar', 'baz'),
SourceSet('foo', 'bar', 'baz', resources_only=True),
])
self.assert_dedup(SourceSet('foo', 'bar', 'baz'), deduped_sources)
def test_dedup_test_sources(self):
"""Show that a is_test on a non resources_only source set turns on is_test"""
deduped_sources = Project.dedup_sources([
SourceSet('foo', 'bar', 'baz', is_test=True),
SourceSet('foo', 'bar', 'baz'),
SourceSet('foo', 'bar', 'baz', is_test=True),
])
self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True), deduped_sources)
def test_dedup_test_resources(self):
"""Show that competting is_test values on a resources-only source set turns off is_test"""
deduped_sources = Project.dedup_sources([
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
SourceSet('foo', 'bar', 'baz', is_test= False, resources_only=True),
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
])
self.assert_dedup(SourceSet('foo', 'bar', 'baz', resources_only=True), deduped_sources)
def test__only_test_resources(self):
deduped_sources = Project.dedup_sources([
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
])
self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
deduped_sources)
def test_all_together(self):
deduped_sources = Project.dedup_sources([
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=False),
SourceSet('foo', 'bar', 'baz', is_test=True, resources_only=True),
SourceSet('foo', 'bar', 'baz', is_test=False, resources_only=True),
SourceSet('foo', 'bar', 'baz', is_test=False, resources_only=False),
])
self.assert_dedup(SourceSet('foo', 'bar', 'baz', is_test=True), deduped_sources)
| null | tests/python/pants_test/backend/project_info/tasks/test_ide_gen.py | test_ide_gen.py | py | 5,164 | python | en | code | null | code-starcoder2 | 51 |
478189839 | import helpers, testly
from collections import OrderedDict
from pyppl import Proc
from pyppl.proctree import ProcTree, ProcNode
from pyppl.exception import ProcTreeProcExists, ProcTreeParseError
class TestProcNode(testly.TestCase):
def testInit(self):
proc = Proc()
self.maxDiff = None
pn = ProcNode(proc)
self.assertIs(pn.proc, proc)
self.assertListEqual(pn.prev, [])
self.assertListEqual(pn.next, [])
self.assertEqual(pn.ran, False)
self.assertEqual(pn.start, False)
self.assertIn('File ', pn.defs[0])
def dataProvider_testSameIdTag(self):
proc1 = Proc()
pn1 = ProcNode(proc1)
yield pn1, proc1, True
proc2 = Proc()
yield pn1, proc2, False
def testSameIdTag(self, pn, proc, out):
self.assertEqual(pn.sameIdTag(proc), out)
def testRepr(self):
proc = Proc()
pn = ProcNode(proc)
self.assertEqual(repr(pn), '<ProcNode(<Proc(id=%s,tag=%s) @ %s>) @ %s>' % (proc.id, proc.tag, hex(id(proc)), hex(id(pn))))
class TestProcTree(testly.TestCase):
def setUp(self):
# procs registered by Proc.__init__() are also removed!
if self.isFirst() or not self.isOfSet():
ProcTree.NODES = OrderedDict()
def dataProvider_testRegister(self):
proc_testRegister1 = Proc()
yield proc_testRegister1, 1
yield proc_testRegister1, 1
proc_testRegister2 = Proc()
yield proc_testRegister2, 2
def testRegister(self, proc, l):
ProcTree.register(proc)
self.assertIs(ProcTree.NODES[proc].proc, proc)
self.assertEqual(len(ProcTree.NODES), l)
def dataProvider_testCheck(self):
proc_testCheck1 = Proc()
proc_testCheck2 = Proc()
proc_testCheck3 = Proc(id = 'proc_testCheck1')
yield proc_testCheck1, False
yield proc_testCheck2, False
yield proc_testCheck3, True
def testCheck(self, proc, r):
ProcTree.register(proc)
if r:
self.assertRaises(ProcTreeProcExists, ProcTree.check, proc)
else:
ProcTree.check(proc)
def dataProvider_testGetPrevNextStr(self):
proc_testGetPrevNextStr1 = Proc()
proc_testGetPrevNextStr2 = Proc()
proc_testGetPrevNextStr3 = Proc()
proc_testGetPrevNextStr2.depends = proc_testGetPrevNextStr1
proc_testGetPrevNextStr3.depends = proc_testGetPrevNextStr2
ps = [proc_testGetPrevNextStr1, proc_testGetPrevNextStr2, proc_testGetPrevNextStr3]
yield ps, proc_testGetPrevNextStr1, 'prev', 'START'
yield ps, proc_testGetPrevNextStr2, 'prev', '[proc_testGetPrevNextStr1]'
yield ps, proc_testGetPrevNextStr3, 'prev', '[proc_testGetPrevNextStr2]'
yield ps, proc_testGetPrevNextStr1, 'next', '[proc_testGetPrevNextStr2]'
yield ps, proc_testGetPrevNextStr2, 'next', '[proc_testGetPrevNextStr3]'
yield ps, proc_testGetPrevNextStr3, 'next', 'END'
def testGetPrevNextStr(self, procs, proc, which, out):
for p in procs:
ProcTree.register(p)
ProcTree()
if which == 'prev':
self.assertEqual(ProcTree.getPrevStr(proc), out)
else:
self.assertEqual(ProcTree.getNextStr(proc), out)
def dataProvider_testGetNext(self):
proc_testGetNext1 = Proc()
proc_testGetNext2 = Proc()
proc_testGetNext3 = Proc()
proc_testGetNext4 = Proc()
proc_testGetNext2.depends = proc_testGetNext1
proc_testGetNext3.depends = proc_testGetNext2
proc_testGetNext4.depends = proc_testGetNext2
ps = [proc_testGetNext1, proc_testGetNext2, proc_testGetNext3, proc_testGetNext4]
yield ps, proc_testGetNext1, [proc_testGetNext2]
yield ps, proc_testGetNext2, [proc_testGetNext3, proc_testGetNext4]
yield ps, proc_testGetNext3, []
yield ps, proc_testGetNext4, []
def testGetNext(self, procs, proc, outs):
for p in procs:
ProcTree.register(p)
ProcTree()
nexts = ProcTree.getNext(proc)
self.assertCountEqual(nexts, outs)
def dataProvider_testReset(self):
proc_testReset1 = Proc()
proc_testReset2 = Proc()
proc_testReset3 = Proc()
proc_testReset4 = Proc()
proc_testReset2.depends = proc_testReset1
proc_testReset3.depends = proc_testReset2
proc_testReset4.depends = proc_testReset2
yield [proc_testReset1, proc_testReset2, proc_testReset3, proc_testReset4],
def testReset(self, procs):
for p in procs:
ProcTree.register(p)
ProcTree()
ProcTree.reset()
for node in ProcTree.NODES.values():
self.assertListEqual(node.prev, [])
self.assertListEqual(node.next, [])
self.assertFalse(node.ran)
self.assertFalse(node.start)
def dataProvider_testInit(self):
proc_testInit1 = Proc()
proc_testInit2 = Proc()
proc_testInit3 = Proc()
proc_testInit4 = Proc()
proc_testInit2.depends = proc_testInit1
proc_testInit3.depends = proc_testInit2
proc_testInit4.depends = proc_testInit2
yield [proc_testInit1, proc_testInit2, proc_testInit3, proc_testInit4],
yield [proc_testInit1, proc_testInit3],
def testInit(self, procs):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
self.assertEqual(pt.starts, [])
self.assertEqual(pt.ends, [])
for proc in procs:
depends = proc.depends
for depend in depends:
nproc = ProcTree.NODES[proc]
ndepend = ProcTree.NODES[depend]
self.assertIn(nproc, ndepend.next)
self.assertIn(ndepend, nproc.prev)
def dataProvider_testSetGetStarts(self):
proc_testSetGetStarts1 = Proc()
proc_testSetGetStarts2 = Proc()
proc_testSetGetStarts3 = Proc()
proc_testSetGetStarts4 = Proc()
proc_testSetGetStarts2.depends = proc_testSetGetStarts1
proc_testSetGetStarts3.depends = proc_testSetGetStarts2
proc_testSetGetStarts4.depends = proc_testSetGetStarts2
yield [proc_testSetGetStarts1, proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts1]
yield [proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts2]
yield [proc_testSetGetStarts1, proc_testSetGetStarts2, proc_testSetGetStarts3, proc_testSetGetStarts4], [proc_testSetGetStarts1, proc_testSetGetStarts2]
def testSetGetStarts(self, procs, starts):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
for proc in procs:
if proc in starts:
self.assertTrue(ProcTree.NODES[proc].start)
else:
self.assertFalse(ProcTree.NODES[proc].start)
s = pt.getStarts()
self.assertCountEqual(s, starts)
self.assertCountEqual(pt.starts, starts)
def dataProvider_testGetPaths(self):
proc_testGetPaths1 = Proc()
proc_testGetPaths2 = Proc()
proc_testGetPaths3 = Proc()
proc_testGetPaths4 = Proc()
proc_testGetPaths5 = Proc()
proc_testGetPaths2.depends = proc_testGetPaths1
proc_testGetPaths3.depends = proc_testGetPaths2, proc_testGetPaths4
proc_testGetPaths4.depends = proc_testGetPaths2
proc_testGetPaths5.depends = proc_testGetPaths1
"""
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testGetPaths1, proc_testGetPaths2, proc_testGetPaths3, proc_testGetPaths4, proc_testGetPaths5]
yield ps, proc_testGetPaths1, []
yield ps, proc_testGetPaths2, [[proc_testGetPaths1]]
yield ps, proc_testGetPaths3, [[proc_testGetPaths2, proc_testGetPaths1], [proc_testGetPaths4, proc_testGetPaths2, proc_testGetPaths1]]
yield ps, proc_testGetPaths4, [[proc_testGetPaths2, proc_testGetPaths1]]
yield ps, proc_testGetPaths5, [[proc_testGetPaths1]]
proc_testGetPaths6 = Proc()
proc_testGetPaths7 = Proc()
proc_testGetPaths8 = Proc()
proc_testGetPaths7.depends = proc_testGetPaths6
proc_testGetPaths8.depends = proc_testGetPaths7
proc_testGetPaths6.depends = proc_testGetPaths8
ps2 = [proc_testGetPaths6, proc_testGetPaths7, proc_testGetPaths8]
yield ps2, proc_testGetPaths6, [], True
proc_testGetPaths10 = Proc()
proc_testGetPaths11 = Proc()
proc_testGetPaths12 = Proc()
proc_testGetPaths11.depends = proc_testGetPaths10
proc_testGetPaths12.depends = proc_testGetPaths11
proc_testGetPaths10.depends = proc_testGetPaths11
ps3 = [proc_testGetPaths10, proc_testGetPaths11, proc_testGetPaths12]
yield ps3, proc_testGetPaths12, [], True
# should be ok:
# 13 -> 15
# 14 -> 15
# 13 -> 14
proc_testGetPaths13 = Proc()
proc_testGetPaths14 = Proc()
proc_testGetPaths15 = Proc()
proc_testGetPaths15.depends = proc_testGetPaths13, proc_testGetPaths14
proc_testGetPaths14.depends = proc_testGetPaths13
ps4 = [proc_testGetPaths13, proc_testGetPaths14, proc_testGetPaths15]
yield ps4, proc_testGetPaths15, [[proc_testGetPaths13], [proc_testGetPaths14, proc_testGetPaths13]]
def testGetPaths(self, procs, proc, paths, exception = None):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
if exception:
self.assertRaises(ProcTreeParseError, pt.getPaths, proc)
else:
ps = pt.getPaths(proc)
self.assertListEqual(ps, paths)
def dataProvider_testGetPathsToStarts(self):
proc_testGetPathsToStarts1 = Proc()
proc_testGetPathsToStarts2 = Proc()
proc_testGetPathsToStarts3 = Proc()
proc_testGetPathsToStarts4 = Proc()
proc_testGetPathsToStarts5 = Proc()
proc_testGetPathsToStarts2.depends = proc_testGetPathsToStarts1
proc_testGetPathsToStarts3.depends = proc_testGetPathsToStarts2, proc_testGetPathsToStarts4
proc_testGetPathsToStarts4.depends = proc_testGetPathsToStarts2
proc_testGetPathsToStarts5.depends = proc_testGetPathsToStarts1
"""
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testGetPathsToStarts1, proc_testGetPathsToStarts2, proc_testGetPathsToStarts3, proc_testGetPathsToStarts4, proc_testGetPathsToStarts5]
yield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts1, []
yield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts2, [[proc_testGetPathsToStarts1]]
yield ps, [proc_testGetPathsToStarts2], proc_testGetPathsToStarts2, []
yield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]
yield ps, [proc_testGetPathsToStarts1, proc_testGetPathsToStarts4], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]
yield ps, [proc_testGetPathsToStarts2], proc_testGetPathsToStarts3, [[proc_testGetPathsToStarts2], [proc_testGetPathsToStarts4, proc_testGetPathsToStarts2]]
yield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts4, [[proc_testGetPathsToStarts2, proc_testGetPathsToStarts1]]
yield ps, [proc_testGetPathsToStarts1], proc_testGetPathsToStarts5, [[proc_testGetPathsToStarts1]]
def testGetPathsToStarts(self, procs, starts, proc, paths):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
ps = pt.getPathsToStarts(proc)
self.assertListEqual(ps, paths)
def dataProvider_testCheckPath(self):
proc_testCheckPath0 = Proc()
proc_testCheckPath1 = Proc()
proc_testCheckPath2 = Proc()
proc_testCheckPath3 = Proc()
proc_testCheckPath4 = Proc()
proc_testCheckPath5 = Proc()
proc_testCheckPath2.depends = proc_testCheckPath0, proc_testCheckPath1
proc_testCheckPath3.depends = proc_testCheckPath2, proc_testCheckPath4
proc_testCheckPath4.depends = proc_testCheckPath2
proc_testCheckPath5.depends = proc_testCheckPath1
"""
proc0
\
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testCheckPath0, proc_testCheckPath1, proc_testCheckPath2, proc_testCheckPath3, proc_testCheckPath4, proc_testCheckPath5]
yield ps, [proc_testCheckPath1], proc_testCheckPath1, True
yield ps, [proc_testCheckPath1], proc_testCheckPath2, [proc_testCheckPath0]
yield ps, [proc_testCheckPath0, proc_testCheckPath1], proc_testCheckPath2, True
yield ps, [proc_testCheckPath0, proc_testCheckPath1], proc_testCheckPath3, True
yield ps, [proc_testCheckPath0], proc_testCheckPath3, [proc_testCheckPath2, proc_testCheckPath1]
def testCheckPath(self, procs, starts, proc, passed):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
if isinstance(passed, bool):
self.assertEqual(pt.checkPath(proc), passed)
else:
self.assertListEqual(pt.checkPath(proc), passed)
def dataProvider_testGetEnds(self):
# check for loops
proc_testGetEnds_loop0 = Proc()
proc_testGetEnds_loop1 = Proc()
proc_testGetEnds_loop2 = Proc()
proc_testGetEnds_loop3 = Proc()
proc_testGetEnds_loop1.depends = proc_testGetEnds_loop0
proc_testGetEnds_loop2.depends = proc_testGetEnds_loop1
proc_testGetEnds_loop3.depends = proc_testGetEnds_loop2
proc_testGetEnds_loop0.depends = proc_testGetEnds_loop1
"""
0 -> 1 -> 2 -> 3
|____|
"""
yield [proc_testGetEnds_loop0, proc_testGetEnds_loop1, proc_testGetEnds_loop2, proc_testGetEnds_loop3], [proc_testGetEnds_loop3], [], ProcTreeParseError, 'Loop dependency'
proc_testGetEnds_loop4 = Proc()
proc_testGetEnds_loop5 = Proc()
proc_testGetEnds_loop6 = Proc()
proc_testGetEnds_loop7 = Proc()
proc_testGetEnds_loop5.depends = proc_testGetEnds_loop4
proc_testGetEnds_loop6.depends = proc_testGetEnds_loop5
proc_testGetEnds_loop7.depends = proc_testGetEnds_loop6
proc_testGetEnds_loop4.depends = proc_testGetEnds_loop7
"""
4 -> 5 -> 6 -> 7
|______________|
"""
yield [proc_testGetEnds_loop4, proc_testGetEnds_loop5, proc_testGetEnds_loop6, proc_testGetEnds_loop7], [proc_testGetEnds_loop7], [], ProcTreeParseError, 'Loop dependency'
proc_testGetEnds0 = Proc()
proc_testGetEnds1 = Proc()
proc_testGetEnds2 = Proc()
proc_testGetEnds3 = Proc()
proc_testGetEnds4 = Proc()
proc_testGetEnds5 = Proc()
proc_testGetEnds2.depends = proc_testGetEnds0, proc_testGetEnds1
proc_testGetEnds3.depends = proc_testGetEnds2, proc_testGetEnds4
proc_testGetEnds4.depends = proc_testGetEnds2
proc_testGetEnds5.depends = proc_testGetEnds1
"""
proc0
\
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testGetEnds0, proc_testGetEnds1, proc_testGetEnds2, proc_testGetEnds3, proc_testGetEnds4, proc_testGetEnds5]
yield ps, [proc_testGetEnds5], [], ProcTreeParseError, 'one of the paths cannot go through'
yield ps, [proc_testGetEnds1], [proc_testGetEnds5]
yield ps, [proc_testGetEnds0, proc_testGetEnds1], [proc_testGetEnds3, proc_testGetEnds5]
yield ps, [proc_testGetEnds0], [], ProcTreeParseError, 'one of the paths cannot go through'
proc_testGetEnds6 = Proc()
yield [proc_testGetEnds6], [proc_testGetEnds6], [proc_testGetEnds6]
yield [proc_testGetEnds6], [], [], ProcTreeParseError, 'Failed to determine end processes by start processes'
def testGetEnds(self, procs, starts, ends, exception = None, msg = None):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
if exception:
self.assertRaisesRegex(ProcTreeParseError, msg, pt.getEnds)
else:
self.assertCountEqual(pt.getEnds(), ends)
def dataProvider_testGetAllPaths(self):
proc_testGetAllPaths0 = Proc()
proc_testGetAllPaths1 = Proc()
proc_testGetAllPaths2 = Proc()
proc_testGetAllPaths3 = Proc()
proc_testGetAllPaths4 = Proc()
proc_testGetAllPaths5 = Proc()
proc_testGetAllPaths2.depends = proc_testGetAllPaths0, proc_testGetAllPaths1
proc_testGetAllPaths3.depends = proc_testGetAllPaths2, proc_testGetAllPaths4
proc_testGetAllPaths4.depends = proc_testGetAllPaths2
proc_testGetAllPaths5.depends = proc_testGetAllPaths1
"""
proc0
\
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5]
yield ps, [proc_testGetAllPaths2], [[proc_testGetAllPaths3, proc_testGetAllPaths2], [proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2]]
yield ps, [proc_testGetAllPaths1], [[proc_testGetAllPaths5, proc_testGetAllPaths1]]
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [
[proc_testGetAllPaths5, proc_testGetAllPaths1],
[proc_testGetAllPaths3, proc_testGetAllPaths2, proc_testGetAllPaths0],
[proc_testGetAllPaths3, proc_testGetAllPaths2, proc_testGetAllPaths1],
[proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2, proc_testGetAllPaths0],
[proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths2, proc_testGetAllPaths1],
]
# obsolete
proc_testGetAllPaths6 = Proc()
yield [proc_testGetAllPaths6], [proc_testGetAllPaths6], [[proc_testGetAllPaths6]]
def testGetAllPaths(self, procs, starts, paths):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
self.assertCountEqual(pt.getAllPaths(), paths)
def dataProvider_testGetNextToRun(self):
proc_testGetAllPaths0 = Proc()
proc_testGetAllPaths1 = Proc()
proc_testGetAllPaths2 = Proc()
proc_testGetAllPaths3 = Proc()
proc_testGetAllPaths4 = Proc()
proc_testGetAllPaths5 = Proc()
proc_testGetAllPaths2.depends = proc_testGetAllPaths0, proc_testGetAllPaths1
proc_testGetAllPaths3.depends = proc_testGetAllPaths2, proc_testGetAllPaths4
proc_testGetAllPaths4.depends = proc_testGetAllPaths2
proc_testGetAllPaths5.depends = proc_testGetAllPaths1
"""
proc0
\
proc1 -> proc2 -> proc3
\ \ /
proc5 proc4
"""
ps = [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5]
yield ps, [proc_testGetAllPaths0], [], proc_testGetAllPaths0
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [], proc_testGetAllPaths0
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1], proc_testGetAllPaths2
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths5], proc_testGetAllPaths4
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths4, proc_testGetAllPaths5], proc_testGetAllPaths3
yield ps, [proc_testGetAllPaths0, proc_testGetAllPaths1], [proc_testGetAllPaths0, proc_testGetAllPaths1, proc_testGetAllPaths2, proc_testGetAllPaths3, proc_testGetAllPaths4, proc_testGetAllPaths5], None
def testGetNextToRun(self, procs, starts, haveran, out):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
for hr in haveran:
ProcTree.NODES[hr].ran = True
self.assertIs(pt.getNextToRun(), out)
def dataProvider_testUnranProcs(self):
proc_testUnranProcs0 = Proc()
proc_testUnranProcs1 = Proc()
proc_testUnranProcs2 = Proc()
proc_testUnranProcs3 = Proc()
proc_testUnranProcs4 = Proc()
proc_testUnranProcs5 = Proc()
proc_testUnranProcs6 = Proc()
proc_testUnranProcs7 = Proc()
proc_testUnranProcs2.depends = proc_testUnranProcs0, proc_testUnranProcs1
proc_testUnranProcs3.depends = proc_testUnranProcs2, proc_testUnranProcs4
proc_testUnranProcs4.depends = proc_testUnranProcs2
proc_testUnranProcs5.depends = proc_testUnranProcs1
proc_testUnranProcs6.depends = proc_testUnranProcs0
"""
proc0 -> proc6
\
proc1 -> proc2 -> proc3 proc7
\ \ /
proc5 proc4
"""
ps = [proc_testUnranProcs0, proc_testUnranProcs1, proc_testUnranProcs2, proc_testUnranProcs3, proc_testUnranProcs4, proc_testUnranProcs5, proc_testUnranProcs6, proc_testUnranProcs7]
yield ps, [proc_testUnranProcs0], {
'proc_testUnranProcs3': ['proc_testUnranProcs2', 'proc_testUnranProcs1']
}
yield ps, [proc_testUnranProcs1], {
'proc_testUnranProcs3': ['proc_testUnranProcs2', 'proc_testUnranProcs0']
}
def testUnranProcs(self, procs, starts, outs):
for p in procs:
ProcTree.register(p)
pt = ProcTree()
pt.setStarts(starts)
# run the pipeline
p = pt.getNextToRun()
while p:
ProcTree.NODES[p].ran = True
p = pt.getNextToRun()
self.assertDictEqual(pt.unranProcs(), outs)
if __name__ == '__main__':
testly.main(verbosity=2, failfast = True) | null | tests/testProcTree.py | testProcTree.py | py | 19,805 | python | en | code | null | code-starcoder2 | 51 |
212593434 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Use this code snippet in your app.
# If you need more information about configurations or implementing the sample code, visit the AWS docs:
# https://aws.amazon.com/developers/getting-started/python/
import argparse
import logging
import tempfile
from time import sleep
import boto3
import cbor
import json
import requests
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
from AWSIoTDeviceDefenderAgentSDK import collector
import os
PRIVATE_KEY = "private_key"
CERTIFICATE = "certificate"
POLICY_NAME = "service_host_agent_policy"
# Variable to track publish metrics response
latest_accepted_report_id = 0
def get_mqtt_endpoint(session, cp_endpoint_url):
iot = session.client(service_name="iot", endpoint_url=cp_endpoint_url)
resp = iot.describe_endpoint(endpointType="iot:Data-ATS")
return resp["endpointAddress"]
def get_instance_metadata():
return requests.get(
"http://169.254.169.254/latest/dynamic/instance-identity/document"
).json()
def get_region():
return get_instance_metadata().get("region")
def get_instance_id():
return get_instance_metadata().get("instanceId")
def get_root_ca():
url = "https://www.amazontrust.com/repository/AmazonRootCA1.pem"
ca_text = requests.get(url).text
ca_temp_file = tempfile.NamedTemporaryFile("w")
ca_temp_file.write(ca_text)
ca_temp_file.file.flush()
return ca_temp_file
def get_client_id():
return get_instance_id()
def get_cp_endpoint_url(domain, region):
if domain == "prod":
return "https://iot." + region + ".amazonaws.com"
else:
return "https://" + domain + "." + region + ".iot.amazonaws.com"
def parse_args():
parser = argparse.ArgumentParser(fromfile_prefix_chars="@")
parser.add_argument(
"-r",
"--region",
action="store",
required=False,
dest="region",
help="AWS Region Code (ex: us-east-1), defaults to the region of the instance",
)
parser.add_argument(
"-d",
"--domain",
action="store",
required=False,
dest="domain",
help="application domain (ex: prod or gamma), defaults to gamma",
)
parser.add_argument(
"-n",
"--name",
action="store",
required=False,
dest="name",
help="Supply a thing name instead of using EC2 Instance Id",
)
parser.add_argument(
"-e",
"--cp-endpoint-url",
action="store",
required=False,
dest="cp_endpoint_url",
help="Supply the URL for the control plane APIs, defaults to"
" https://gamma.us-west-2.iot.amazonaws.com",
)
parser.add_argument(
"-m",
"--mqtt-endpoint",
action="store",
required=False,
dest="mqtt_endpoint",
help="Supply the MQTT endpoint to submit metrics to, defaults to"
" the endpoint retrieved by calling describe-endpoint",
)
return parser.parse_args()
def ack_callback(client, userdata, message):
response_payload = json.loads(message.payload.decode("ASCII"))
if "json" in message.topic:
logging.info(
"Received a new message: {} from topic: {}".format(
message.payload, message.topic
)
)
else:
response_payload = json.loads(cbor.loads(message.payload))
logging.info(
"Received a new message: {} from topic: {}".format(
cbor.loads(message.payload), message.topic
)
)
global latest_accepted_report_id
if "accepted" in message.topic:
report_id = response_payload.get("reportId")
latest_accepted_report_id = report_id
def start_metrics_collection(
region_name, cp_endpoint_url, client_id, iot_client, topic, sample_rate
):
# Collector samples metrics from the system, it can track the previous metric to generate deltas
coll = collector.Collector(False)
metric = None
first_sample = (
True # don't publish first sample, so we can accurately report delta metrics
)
while True:
logging.info("collecting metrics")
metric = coll.collect_metrics()
if first_sample:
first_sample = False
else:
session = boto3.session.Session(region_name=region_name)
# This is a cheap hack to ensure we reset the creds every so often,
# since the temporary creds expire. SDK doesn't seem to have a way
# to reset these creds other than periodically updating these creds
# by calling iot_client.configureIAMCredentials or subclassing the
# MQTT client for listening to the onOffline callback. Details in
# this SIM: https://t.corp.amazon.com/issues/SDK-15249/communication
credentials = session.get_credentials()
iot_client.configureIAMCredentials(
credentials.access_key, credentials.secret_key, credentials.token
)
report_id = metric._v1_metrics().get("header").get("report_id")
iot_client.publish(topic=topic, payload=metric.to_json_string(), QoS=0)
logging.info("Published report with report_id: {}".format(report_id))
max_iterations = 5
while max_iterations > 0:
# Sleep 10s to allow receiving a response for the latest publish.
sleep(10)
max_iterations = max_iterations - 1
if latest_accepted_report_id == report_id:
logging.info(
"Received successful ack for reportId: {}".format(
latest_accepted_report_id
)
)
break
logging.info(
"Republishing report with reportId: {}, last accepted reportId: {}".format(
report_id, latest_accepted_report_id
)
)
iot_client.publish(topic=topic, payload=metric.to_json_string(), QoS=0)
sleep(float(sample_rate))
def main():
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
args = parse_args()
if args.region:
region_name = args.region
else:
region_name = get_region()
if args.domain:
domain_name = args.domain
else:
domain_name = "prod"
if args.cp_endpoint_url:
cp_endpoint_url = args.cp_endpoint_url
else:
cp_endpoint_url = get_cp_endpoint_url(domain=domain_name, region=region_name)
session = boto3.session.Session(region_name=region_name)
if args.name:
client_id = args.name
else:
client_id = (
get_client_id()
) # This will set the client-id based on the ec2 instance id
if not client_id:
logging.info("Failed to determine client_id, quitting")
exit(1)
logging.info(
"Running agent with domain: {}, region: {}, clientId: {}, cp_endpoint_url: {}".format(
domain_name, region_name, client_id, cp_endpoint_url
)
)
ca_cert_file = get_root_ca()
if args.mqtt_endpoint:
mqtt_endpoint = args.mqtt_endpoint
else:
logging.info("Attempting to retrieve Mqtt endpoint")
mqtt_endpoint = get_mqtt_endpoint(session, cp_endpoint_url)
logging.info("Using Mqtt endpoint: {}".format(mqtt_endpoint))
iot_client = AWSIoTMQTTClient(client_id, useWebsocket=True)
iot_client.configureEndpoint(mqtt_endpoint, 443, region_name)
credentials = session.get_credentials()
iot_client.configureCredentials(ca_cert_file.name)
iot_client.configureIAMCredentials(
credentials.access_key, credentials.secret_key, credentials.token
)
# AWSIoTMQTTClient connection configuration
iot_client.configureAutoReconnectBackoffTime(1, 32, 20)
iot_client.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
iot_client.configureDrainingFrequency(2) # Draining: 2 Hz
iot_client.configureConnectDisconnectTimeout(30)
iot_client.configureMQTTOperationTimeout(20) # 5 sec
# Connect and subscribe to AWS IoT
iot_client.connect()
sleep(2)
topic = "$aws/things/{}/defender/metrics/{}".format(client_id, "json")
# Subscribe to the accepted/rejected topics to indicate status of published metrics reports
# topic=subscribe_to_topic, callback=callback, QoS=1,
iot_client.subscribe(
topic="{}/accepted".format(topic), callback=ack_callback, QoS=1
)
iot_client.subscribe(
topic="{}/rejected".format(topic), callback=ack_callback, QoS=1
)
start_metrics_collection(
region_name=region_name,
cp_endpoint_url=cp_endpoint_url,
client_id=client_id,
iot_client=iot_client,
topic=topic,
sample_rate=300,
)
ca_cert_file.close()
if __name__ == "__main__":
main()
| null | AWSIoTDeviceDefenderAgentSDK/agent.py | agent.py | py | 9,798 | python | en | code | null | code-starcoder2 | 51 |
29141393 | import random
def input_num(prompt='Please enter a number: ', mini=0, maxi=100):
"""Read a positive number with the given prompt."""
while True:
try:
num = int(input(prompt))
if (num < mini or
(maxi is not None and num > maxi)):
print('Number is not within range: {} to {}'.format(mini, maxi))
else:
break
except ValueError:
print('enter a number')
continue
return num
class RolledOneException(Exception):
pass
class Die:
"""A die to play with."""
def __init__(self):
self.value = random.randint(1, 6)
def roll(self):
"""Returns the rolled dice, or raises RolledOneException if 1."""
self.value = random.randint(1, 6)
if self.value == 1:
raise RolledOneException
return self.value
def __str__(self):
return "Rolled " + str(self.value) + "."
class Box:
"""Temporary score box holder class."""
def __init__(self):
self.value = 0
def reset(self):
self.value = 0
def add_dice_value(self, dice_value):
self.value += dice_value
class Player(object):
"""Base class for different player types."""
def __init__(self, name=None):
self.name = name
self.score = 0
def add_score(self, player_score):
"""Adds player_score to total score."""
self.score += player_score
def __str__(self):
"""Returns player name and current score."""
return str(self.name) + ": " + str(self.score)
class ComputerPlayer(Player):
cpu_names=['cpu1', 'cpu2', 'cpu3', 'cpu4']
def __init__(self, num):
"""Assigns a cpu name from cpu_names, or Cpu#."""
if num < len(self.cpu_names):
name = self.cpu_names[num]
else:
name = 'Cpu{}'.format(num)
super(ComputerPlayer, self).__init__(name)
def keep_rolling(self, box):
"""Randomly decides if the CPU player will keep rolling."""
while box.value < (10 + random.randint(1, 35)):
print(" Computer will roll again.")
return True
print(" Computer will hold.")
return False
class HumanPlayer(Player):
def __init__(self, name):
super(HumanPlayer, self).__init__(name)
def keep_rolling(self, box):
"""Asks the human player, if they want to keep rolling."""
human_decision = input_num(" 1 - Roll again, 0 - Hold? ", 0, 1)
if human_decision == 1:
return True
else:
return False
class GameManager:
def __init__(self, humans=1, computers=1):
"""Initialises the game, optionally asking for human player names."""
self.players = []
if humans == 1:
self.players.append(HumanPlayer('Human'))
else:
for i in range(humans):
player_name = input('Enter name of human player no. {}: '.format(i))
self.players.append(HumanPlayer(player_name))
for i in range(computers):
self.players.append(ComputerPlayer(i))
self.no_of_players = len(self.players)
self.die = Die()
self.box = Box()
@staticmethod
def welcome():
"""Prints a welcome message including rules."""
print("*" * 70)
print("Welcome to Pig Dice!" .center(70))
print("*" * 70)
print("The objective is to be the first to reach 100 points." .center(70))
print("On each turn, the player will roll a die." .center(70))
print("The die value will stored in a temporary score box." .center(70))
print("(If the die value is 1, the player earns no points," .center(70))
print("and the turn goes to the next player.)" .center(70))
print("A human player has an option to either roll again," .center(70))
print("or hold. If you hold, the score in the" .center(70))
print("temporary box will be added to your total score." .center(70))
print(" Good luck! " .center(70, "*"))
print(" Remember " .center(70, "*"))
print(" Fortune favors the brave... " .center(70, "*"))
print(" but chance favors the smart! " .center(70, "*"))
print()
print("I will now decide who starts" .center(70, " "))
print()
def decide_first_player(self):
"""Randomly chooses a player to begin, and prints who is starting."""
self.current_player = random.randint(1, self.no_of_players) % self.no_of_players
print('{} starts'.format(self.players[self.current_player].name))
def next_player(self):
"""Advanced self.current_player to next player."""
self.current_player = (self.current_player + 1) % self.no_of_players
def previous_player(self):
"""Changes self.current_player to previous player."""
self.current_player = (self.current_player - 1) % self.no_of_players
def get_all_scores(self):
"""Returns a join all players scores."""
return ', '.join(str(player) for player in self.players)
def play_game(self):
"""Plays an entire game."""
self.welcome()
self.decide_first_player()
while all(player.score < 100 for player in self.players):
print('\nCurrent score --> {}'.format(self.get_all_scores()))
print('\n*** {} to play ***'.format(self.players[self.current_player].name))
self.box.reset()
while self.keep_rolling():
pass
self.players[self.current_player].add_score(self.box.value)
self.next_player()
## The previous player has won...
self.previous_player()
print(' {} has won '.format(self.players[self.current_player].name).center(70, '*'))
def keep_rolling(self):
"""Adds rolled dice to box. Returns if human/cpu wants to continue.
If either player rolls a 1, the box value is reset, and turn ends.
"""
try:
dice_value = self.die.roll()
self.box.add_dice_value(dice_value)
print('Last roll: {}, new box value: {}'.format(dice_value, self.box.value))
# Check if human (by asking) or computer(calculating) will keep rolling
return self.players[self.current_player].keep_rolling(self.box)
except RolledOneException:
print(' Rolled one. Switching turns')
self.box.reset()
return False
def main():
human_players = input_num('How many human players? ')
computer_players = input_num('How many computer players? ')
game_manager = GameManager(human_players, computer_players)
game_manager.play_game()
if __name__ == '__main__':
main()
| null | dice_game_pig.py | dice_game_pig.py | py | 7,073 | python | en | code | null | code-starcoder2 | 51 |
495255608 | from sys import stdin
input = stdin.readline
N = int(input())
dp = [0] * 501
LIS = [0] * 501
for i in range(N):
a,b = map(int, input().split())
LIS[b] = a
dp[b] = 1
for j in range(501):
if LIS[j] == 0:
continue
for i in range(j, -1, -1):
if LIS[i] < LIS[j]:
dp[j] = max(dp[j], dp[i]+1)
print(N - max(dp)) | null | 210101/bj_2565.py | bj_2565.py | py | 355 | python | en | code | null | code-starcoder2 | 51 |
111280072 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.devtools.containeranalysis_v1.types import containeranalysis
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from .transports.base import ContainerAnalysisTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ContainerAnalysisGrpcAsyncIOTransport
from .client import ContainerAnalysisClient
class ContainerAnalysisAsyncClient:
"""Retrieves analysis results of Cloud components such as Docker
container images. The Container Analysis API is an implementation of
the `Grafeas <https://grafeas.io>`__ API.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis
instance on a resource. An occurrence refers to a ``Note``. A note
contains details describing the analysis and is generally stored in
a separate project, called a ``Provider``. Multiple occurrences can
refer to the same note.
For example, an SSL vulnerability could affect multiple images. In
this case, there would be one note for the vulnerability and an
occurrence for each image with the vulnerability referring to that
note.
"""
_client: ContainerAnalysisClient
DEFAULT_ENDPOINT = ContainerAnalysisClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ContainerAnalysisClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(ContainerAnalysisClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ContainerAnalysisClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ContainerAnalysisClient.common_folder_path)
parse_common_folder_path = staticmethod(ContainerAnalysisClient.parse_common_folder_path)
common_organization_path = staticmethod(ContainerAnalysisClient.common_organization_path)
parse_common_organization_path = staticmethod(ContainerAnalysisClient.parse_common_organization_path)
common_project_path = staticmethod(ContainerAnalysisClient.common_project_path)
parse_common_project_path = staticmethod(ContainerAnalysisClient.parse_common_project_path)
common_location_path = staticmethod(ContainerAnalysisClient.common_location_path)
parse_common_location_path = staticmethod(ContainerAnalysisClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ContainerAnalysisAsyncClient: The constructed client.
"""
return ContainerAnalysisClient.from_service_account_info.__func__(ContainerAnalysisAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ContainerAnalysisAsyncClient: The constructed client.
"""
return ContainerAnalysisClient.from_service_account_file.__func__(ContainerAnalysisAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ContainerAnalysisTransport:
"""Returns the transport used by the client instance.
Returns:
ContainerAnalysisTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ContainerAnalysisClient).get_transport_class, type(ContainerAnalysisClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ContainerAnalysisTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the container analysis client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ContainerAnalysisTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ContainerAnalysisClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def set_iam_policy(self,
request: iam_policy_pb2.SetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Sets the access control policy on the specified note or
occurrence. Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or an occurrence, respectively.
The resource takes the format
``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Args:
request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`):
The request object. Request message for `SetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being specified. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, )
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.set_iam_policy,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_iam_policy(self,
request: iam_policy_pb2.GetIamPolicyRequest = None,
*,
resource: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the access control policy for a note or an occurrence
resource. Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or occurrence, respectively.
The resource takes the format
``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Args:
request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`):
The request object. Request message for `GetIamPolicy`
method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy is being requested. See the
operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
Defines an Identity and Access Management (IAM) policy. It is used to
specify access control policies for Cloud Platform
resources.
A Policy is a collection of bindings. A binding binds
one or more members to a single role. Members can be
user accounts, service accounts, Google groups, and
domains (such as G Suite). A role is a named list of
permissions (defined by IAM or configured by users).
A binding can optionally specify a condition, which
is a logic expression that further constrains the
role binding based on attributes about the request
and/or target resource.
**JSON Example**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": ["user:eve@example.com"],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
]
}
**YAML Example**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z')
For a description of IAM and its features, see the
[IAM developer's
guide](\ https://cloud.google.com/iam/docs).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, )
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_iam_policy,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def test_iam_permissions(self,
request: iam_policy_pb2.TestIamPermissionsRequest = None,
*,
resource: str = None,
permissions: Sequence[str] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Returns the permissions that a caller has on the specified note
or occurrence. Requires list permission on the project (for
example, ``containeranalysis.notes.list``).
The resource takes the format
``projects/[PROJECT_ID]/notes/[NOTE_ID]`` for notes and
``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Args:
request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`):
The request object. Request message for
`TestIamPermissions` method.
resource (:class:`str`):
REQUIRED: The resource for which the
policy detail is being requested. See
the operation documentation for the
appropriate value for this field.
This corresponds to the ``resource`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
permissions (:class:`Sequence[str]`):
The set of permissions to check for the ``resource``.
Permissions with wildcards (such as '*' or 'storage.*')
are not allowed. For more information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
This corresponds to the ``permissions`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([resource, permissions])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
if isinstance(request, dict):
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, )
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.test_iam_permissions,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource", request.resource),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_vulnerability_occurrences_summary(self,
request: containeranalysis.GetVulnerabilityOccurrencesSummaryRequest = None,
*,
parent: str = None,
filter: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> containeranalysis.VulnerabilityOccurrencesSummary:
r"""Gets a summary of the number and severity of
occurrences.
Args:
request (:class:`google.cloud.devtools.containeranalysis_v1.types.GetVulnerabilityOccurrencesSummaryRequest`):
The request object. Request to get a vulnerability
summary for some set of occurrences.
parent (:class:`str`):
The name of the project to get a vulnerability summary
for in the form of ``projects/[PROJECT_ID]``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (:class:`str`):
The filter expression.
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.devtools.containeranalysis_v1.types.VulnerabilityOccurrencesSummary:
A summary of how many vulnerability
occurrences there are per resource and
severity type.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, filter])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = containeranalysis.GetVulnerabilityOccurrencesSummaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_vulnerability_occurrences_summary,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-devtools-containeranalysis",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ContainerAnalysisAsyncClient",
)
| null | google/devtools/containeranalysis/v1/devtools-containeranalysis-v1-py/google/cloud/devtools/containeranalysis_v1/services/container_analysis/async_client.py | async_client.py | py | 27,932 | python | en | code | null | code-starcoder2 | 51 |
619992993 | """
If is odd, print Weird
If is even and in the inclusive range of to , "print Not Weird"
If is even and in the inclusive range of to , "print Weird"
If is even and greater than , print "Not Weird"
"""
#
# N = int(input())
# if N%2==0:
# if N>=2 and N<=5:
# print('Not Weird')
# elif N>=6 and N<=20:
# print('Weird')
# else:
# print('Not Weird')
# else:
# print('Weird')
# class Person:
# def __init__(self, initialAge):
# # Add some more code to run some checks on initialAge
# self.age = initialAge
# if self.age < 0:
# self.age = 0
# print('Age is not valid, setting age to 0.')
#
# def amIOld(self):
# # Do some computations in here and print out the correct statement to the console
# if age < 13:
# print('You are young.')
# elif 13 <= age < 18:
# print('You are a teenager.')
# else:
# print('You are a old.')
#
# def yearPasses(self):
# # Increment the age of the person in here
# self.age=age+1
#
#
# t = int(input())
# for i in range(0, t):
# age = int(input())
# p = Person(age)
# p.amIOld()
# for j in range(0, 3):
# p.yearPasses()
# p.amIOld()
# print("")
x = int(input())
y = int(input())
z = int(input())
n = int(input())
lp=[]
ls=[]
for i in range(x+1):
for j in range(y+1):
for k in range(z+1):
if ((i+j+k))!=n:
lp=[i,j,k]
ls.append(lp)
print(ls) | null | pythonProject/proj/hacker test.py | hacker test.py | py | 1,547 | python | en | code | null | code-starcoder2 | 51 |
559871441 | import random
import networkx as nx
class FF():
def __init__(self, param_settings=None):
pass
def run_samping(self, G, rate):
size = round(len(G) * rate)
list_nodes = list(G.nodes())
dictt = set()
random_node = random.choice(list_nodes)
q = set() # q = set contains the distinct values
q.add(random_node)
Gs = nx.Graph()
while (len(Gs.nodes()) < size):
if (len(q) > 0):
initial_node = q.pop()
if (initial_node not in dictt):
dictt.add(initial_node)
neighbours = list(G.neighbors(initial_node))
np = random.randint(1, len(neighbours))
for x in neighbours[:np]:
if (len(Gs.nodes()) < size):
Gs.add_edge(initial_node, x)
q.add(x)
else:
break
else:
continue
else:
random_node = random.sample(set(list_nodes) and dictt, 1)[0]
q.add(random_node)
q.clear()
return Gs
| null | BackEnd/utils/sampling_algorithms/FF.py | FF.py | py | 1,196 | python | en | code | null | code-starcoder2 | 51 |
527373667 | #!/usr/bin/env python3
from datetime import datetime
from phue import Bridge
import logging
logging.basicConfig() # allows output to monitor/log file
b = Bridge('192.168.0.202') #assign hue bridge
b.connect() #connect to bridge
b.get_api()
lights = b.lights
b.get_group()
def poff():
global lights
if b.get_light('Porch', 'on') == True:
#Turn on the porch light
b.set_light('Porch', 'on', False, transitiontime=1)
#Write to console and log file
print ("%s | It is getting dark and the porch light is on. Turning it off now.\n" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
with open('/home/pi/Desktop/py/porch/log.log', 'a') as f:
f.write("%s | It is getting dark and the porch light is on. Turning it off now.\n" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
else:
print ("%s | It is getting dark and the porch light is already off. Doing nothing.\n" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
with open('/home/pi/Desktop/py/porch/log.log', 'a') as f:
f.write("%s | It is getting dark and the porch light is already off . Doing nothing.\n" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
poff()
| null | Python/Porch/porch_off.py | porch_off.py | py | 1,226 | python | en | code | null | code-starcoder2 | 51 |
206187187 | #!/usr/bin/env python
__all__ = ["user","description","repos","title","org"]
try:
import __builtin__ as __builtins__ # python 2
except:
import builtins as __builtins__ # python 3
import os
from os.path import *
from subprocess import *
from all import *
from dirnames import *
from filenames import *
from extension import *
from import_path import *
from cached import *
from userpath import *
tab="index"
home = expanduser("~")
searchpath = []
# git config user.name
args = ["git","config","user.name"]
process = Popen(args,stdout=PIPE,stderr=PIPE)
stdoutdata, stderrdata = process.communicate()
try:
# python3. process.communicate returns bytes
stdoutdata = str(stdoutdata,"utf-8") # stdoutdata
stderrdata = str(stderrdata,"utf-8") # stderrdata
except:
pass
user = stdoutdata.rstrip()
cwd = os.getcwd()
class Repo(object):
path = None
basename = None
def __init__(self,path,name=None):
self.path = path
self.name = basename(path)
self.basename = basename(path)
#try:
if hasattr(name,"encode"):
name.encode("utf-8")
if hasattr(name,"decode"):
name.decode("utf-8")
#except UnicodeDecodeError:
#continue
@property
def ext(self):
return self.name.split(".")[-1:][0]
@property
@cached
def description(self):
file = join(self.path,"description")
if exists(file):
description = __builtins__.open(file).read()
else:
file = self.join("description")
if exists(file):
description = __builtins__.open(file).read()
try:
description.encode("utf-8")
description.decode("utf-8")
return description
except:
pass
@property
def org(self):
if not self.path.find("~/git/Examples/"):
return
org_dirname=dirname(dirname(self.path))
org_name=basename(org_dirname)
return org_name
@property
def ssh_url(self):
url="git@github.com:%s/%s.git" (user,name)
if self.org:
url="git@github.com:%s/%s.git" (self.org,name)
return url
@property
def user(self):
return user
def __str__(self):
return self.name
def __repr__(self):
return self.name
path=dirname(cwd)
find = dirnames(path,maxdepth=1,exclude=["*.github.com","_test*","__test*",".test*"])
repos=map(Repo,find)
repos=sorted(repos,key=lambda org:org.name.lower())
owner = basename
title=basename(cwd).split(".")[0]
title=title.replace("%s-" % user,"")
org = basename(cwd).split(".")[0]
description=""
path=join(os.environ["HOME"],".github",org,"description")
if exists(path):
description = open(path,"r").read().rstrip()
if __name__=="__main__":
print("__all__ = %s" % __all__)
print("git config user.name: %s" % user)
print("repos: %s" % len(repos))
print(repos)
for r in repos:
print(r.name)
| null | config.py | config.py | py | 3,001 | python | en | code | null | code-starcoder2 | 50 |
483904110 | #master
print("Witaj w kalkulatorze\n")
number1 = int(input("Podaj pierwsza cyfre:"))
number2 = int(input("Podaj druga cyfre"))
decision = 0
def menu():
"""wybor opcji menu"""
print("0 - Dodawanie"
"1 - Odejmowanie"
"2 - Mnożenie"
"3 - Dzielenie"
)
global decision
decision = int(input("Jakie dzialanie chcesz wykonac: "))
menu()
| null | master.py | master.py | py | 396 | python | en | code | null | code-starcoder2 | 50 |
12830167 | from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
# Create 3 different D6 dices
d_1 = Die()
d_2 = Die()
d_3 = Die()
dice_list = [d_1, d_2, d_3]
# Other variables
results = []
frequencies = []
number_of_rolls = 5000
min_roll_number = len(dice_list)
max_roll_number = min_roll_number * d_1.sides
# Create the rolls
for roll in range(number_of_rolls):
results.append(d_1.Roll() + d_2.Roll() + d_3.Roll())
# Add the frequencies
for value in range(min_roll_number, max_roll_number + 1):
frequencies.append(results.count(value))
# Visualize and style the hystogram
x_values = [x for x in range(min_roll_number, max_roll_number + 1)]
data = [Bar(x=x_values, y=frequencies)]
x_axis_config = {'title': 'Results', 'dtick':1}
y_axis_config = {'title': 'Frequency of results'}
custom_layout = Layout(title=f'Results of rolling {min_roll_number} D6 dices {number_of_rolls} times',
xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({'data': data, 'layout': custom_layout}, filename='three_d6_dices.html')
| null | Data visualization/Chapter 1/plotply/three_d6.py | three_d6.py | py | 1,059 | python | en | code | null | code-starcoder2 | 50 |
318184495 | '''
Developer: Adam M. Terwilliger
Version: April 2, 2018
Purpose: CSE 802 -- HW3 - Q2
Details: Pattern Recognition course at MSU
Explore MLEs and Bayesian estimators
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import uniform
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from scipy.stats import uniform
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
#from astroML.plotting import setup_text_plots
#setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define the distribution parameters to be plotted
#W_values = [1.0, 2.0, 3.0]
W_values = [3.3333]
linestyles = ['-', '--', ':']
mu = 0
x = np.linspace(0, 0.6, 1000)
#x = np.linspace(-2, 2, 1000)
#------------------------------------------------------------
# plot the distributions
fig, ax = plt.subplots(figsize=(5, 3.75))
for W, ls in zip(W_values, linestyles):
left = mu - 0.5 * W
dist = uniform(left, W)
plt.plot(x, dist.pdf(x), ls=ls, c='black',
label=r'$\mu=%i,\ W=%i$' % (mu, W))
plt.plot([0,0], [0, 0.3], c='black')
plt.plot([0.6,0.6], [0, 0.3], c='black')
plt.xlim(-0.5, 1.0)
plt.ylim(0, 0.5)
plt.xlabel(r'$\theta$')
plt.ylabel(r'$p(x|\theta)$')
plt.title('Uniform Distribution')
#plt.legend()
plt.savefig('hw3_q2.png')
plt.show()
| null | hw3/hw3_q2.py | hw3_q2.py | py | 1,963 | python | en | code | null | code-starcoder2 | 50 |
539996434 | #!/usr/bin/env python3
import os, os.path
import shutil
import time
import sys
class OrganizaFotos():
"""orgfotos.py: Organiza todas as fotos da pasta pictures em um novo conjunto
de pastas por ano e mês. Os arquivos têm novos nomes baseados em sua pasta
antiga.
Argumentos de linha de comando:
1) Pasta a organizar: -v para videos; -p para pictures (Default);
2) Pasta de saída a ser criada em Downloads (Default="Teste").
Ex.: ./orgafotos -v "Teste" --> Pega arquivos da pasta videos e organiza em
Downloads/Teste/videos
"""
def __init__(self, srcPath, destPath):
self.srcPath=srcPath
self.destPath=destPath
def CopiarArquivos(self):
#Tempo inicial da função:
self.start=time.time()
#Checa se o diretório de saída existe e caso não o criará
try:
os.chdir(self.destPath)
except:
os.makedirs(self.destPath)
self.numberFiles=0
#Itera sobre todos os caminhos dentro do diretório Pictures
for foldername, subfolders, filenames in os.walk(self.srcPath):
for filename in filenames:
self.numberFiles+=1
#Pega o mes e ano da ultima modificação...
self.lastModified=(time.ctime(os.path.getmtime(str(os.path.join(foldername, filename)))).split(' '))
#print(self.lastModified)
self.mes=self.lastModified[1]
self.ano=self.lastModified[-1]
#...e adiociona como pastas ao caminho de saida
self.newpath=os.path.join(self.destPath,self.ano,self.mes)
#Cria os diretórios caso não existam
if not os.path.exists(self.newpath):
os.makedirs(self.newpath)
#pega o nome da pasta original e usa como nome do arquivo copiado
#acompanhado de um número (iterator)
self.myFolder=foldername.split(os.path.sep)
self.iterator=1
while True:
self.myName=os.path.join(self.newpath, self.myFolder[len(\
self.myFolder)-1]+'_'+str(self.iterator)+'.'+filename.split('.')[-1])
if os.path.exists(self.myName):
#print(os.path.exists(self.myName))
self.iterator+=1
continue
else:
#print(os.path.exists(self.myName))
break
#Imprime os caminhos de fonte e destino da cópia
print('Copying: ' + str(os.path.join(foldername, filename)) + '\nTo: '
+str(self.myName))
#copia os arquivos com a função copy2 que preserva metadados
shutil.copy2(str(os.path.join(foldername, filename)),
str(self.myName))
print('\n'*2)
self.end=time.time()
self.elapsed=self.end-self.start
if self.elapsed>=3600:
self.horas=self.elapsed//3600
if self.elapsed%3600>=60:
self.minutos=(self.elapsed%3600)//60
self.segundos=(self.elapsed%3600)%60
print('%i Arquivos processados em %i horas, %i minutos e %.2f\
segundos' %(self.numberFiles, self.horas, self.minutos, self.segundos))
else:
print('%i Arquivos processados em %i horas e %.2f segundos'\
%(self.numberFiles, self.horas, self.segundos))
elif self.elapsed>=60:
self.minutos=self.elapsed//60
self.segundos=self.elapsed%60
print('%i Arquivos processados em %i minutos e %.2f segundos'\
%(self.numberFiles, self.minutos, self.segundos))
else:
print('%i Arquivos processados em %.2f segundos' %(self.numberFiles, \
self.elapsed))
if __name__=='__main__':
#pega o path da pasta Pictures
error="""orgfotos.py: Organiza todas as fotos da pasta pictures em um novo conjunto
de pastas por ano e mês. Os arquivos têm novos nomes baseados em sua pasta
antiga.
Argumentos de linha de comando:
1) Pasta a organizar: -v para videos; -p para pictures;
2) Pasta de saída a ser criada em Downloads.
Ex.: ./orgafotos -v "Teste" --> Pega arquivos da pasta videos e organiza em
Downloads/Teste/videos
"""
if not len(sys.argv)==3:
print(error)
elif not (sys.argv[1]=='-v' or sys.argv[1]=='-p'):
print(error)
else:
if sys.argv[1]=='-v':
srcPath=os.path.expanduser('~/Videos')
destPath=os.path.join(os.path.expanduser('~/Downloads'), sys.argv[2], 'Videos')
elif sys.argv[1]=='-p':
srcPath=os.path.expanduser('~/Pictures')
destPath=os.path.join(os.path.expanduser('~/Downloads'), sys.argv[2], 'Pictures')
orgFotos=OrganizaFotos(srcPath, destPath)
orgFotos.CopiarArquivos()
| null | orgfotos.py | orgfotos.py | py | 4,934 | python | en | code | null | code-starcoder2 | 50 |
593064721 | from django.conf.urls import url
from . import views
urlpatterns = [
# /production
url(r'^$', views.index, name='index'),
url(r'^time$', views.time, name='time'),
url(r'^time2$', views.time2, name='time2'),
]
| null | production/urls.py | urls.py | py | 227 | python | en | code | null | code-starcoder2 | 50 |
202951566 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import sqlite3
import datetime
current_date = datetime.datetime.now().strftime("%m_%d_%Y")
def create_database():
conn = sqlite3.connect('results.db')
cursor = conn.cursor()
try:
cursor.execute('''CREATE TABLE Nacebel_code'''+current_date+''' ([generated_id] INTEGER PRIMARY KEY,[nacebel_code] VARCHAR, [nacebel_text] VARCHAR)''')
except Exception as e:
print(e)
return cursor,conn
def start_chrom():
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches",["ignore-certificate-errors"])
drvier = webdriver.Chrome(chrome_options=options)
return drvier
def start():
cursor, conn = create_database()
driver = start_chrom()
driver.get("https://kbopub.economie.fgov.be/kbopub/zoekactiviteitform.html")
driver.find_element_by_xpath("//*[@id='zoekactiviteitCommand']/table[1]/tbody/tr/td[2]/a").click()
time.sleep(20)
iframe = driver.find_element_by_tag_name('iframe')
driver.switch_to.frame(iframe)
time.sleep(7)
try:
element = WebDriverWait(driver, 30).until(EC.visibility_of_element_located((By.ID, "tab-1033")))
element.click()
except Exception as e:
pass
time.sleep(2)
top_div = driver.find_element_by_xpath("//*[@id='treeview-1027']")
first_tables = top_div.find_elements_by_tag_name("table")
position = 0
data = []
a = 0
flag = 0
for index in range(3000):
if index < 53:
a = index
tables = top_div.find_elements_by_tag_name("table")
print("------------------------------")
print("++"+str(index)+"++")
if a % 52 == 0 and index != 0:
print("Over follow!")
driver.execute_script("arguments[0].scrollTop = (arguments[0].scrollHeight)", top_div)
time.sleep(2)
a = 0
flag = 1
if 53 - len(tables)>0 and flag != 0:
print("Increase count!")
a = 0
flag = 0
if a - len(tables) > 0:
print("Increase count!")
a = 0
print(str(a))
print(str(len(tables)))
try:
tables[a].find_element_by_class_name("x-tree-expander").click()
a = a + 1
except Exception as e:
try:
temp = tables[a].find_element_by_class_name("x-tree-node-text").text.split("-")[0].replace(".","")
time.sleep(1)
tables[a].find_element_by_class_name("x-tree-expander").click()
a = a + 1
except Exception as e:
try:
temp_data = tables[a].find_element_by_class_name("x-tree-node-text").text.split("-")[0].replace(".","").strip()
nacebell_text = tables[a].find_element_by_class_name("x-tree-node-text").text.split("-")[1].strip()
data.append(tables[a].find_element_by_class_name("x-tree-node-text").text.split("-")[0].replace(".","").strip())
# print('''INSERT INTO Nacebel_code_'''+current_date+''' (nacebel_code) VALUES ('''+ temp_data +''')''')
if len(temp_data) != 2 or len(temp_data) != 1:
conn.execute("INSERT INTO Nacebel_code"+current_date+" (nacebel_code, nacebel_text) VALUES ('"+ temp_data +"','"+ nacebell_text +"')")
print(type(temp_data))
print(temp_data)
conn.commit()
a = a + 1
except Exception as e:
print(e)
time.sleep(5)
time.sleep(2)
# def get_datas(driver, data, index):
# top_div = driver.find_element_by_xpath("//*[@id='treeview-1027']")
# tables = top_div.find_elements_by_tag_name("table")
# try:
# tables[index].find_element_by_class_name("x-tree-expander").click()
# except Exception as e:
# try:
# temp = tables[index].find_element_by_class_name("x-tree-node-text").text.split("-")[0].replace(".","")
# time.sleep(1)
# tables[index].find_element_by_class_name("x-tree-expander").click()
# except Exception as e:
# try:
# data.append(tables[index].find_element_by_class_name("x-tree-node-text").text.split("-")[0].replace(".",""))
# print(data)
# if index == 53:
# index = 29
# except Exception as e:
# pass
# try:
# first_table = driver.find_element_by_xpath("//*[@id='treeview-1027-record-"+str(index+3)+"']")
# time.sleep(1)
# first_table.find_element_by_class_name("x-tree-elbow-img").click()
# time.sleep(2)
# except Exception as e:
# first_table = driver.find_element_by_xpath("//*[@id='treeview-1027-record-"+str(index+3)+"']")
# time.sleep(1)
# first_table.find_element_by_class_name("x-tree-elbow-img").click()
# time.sleep(2)
# position = len(first_tables) + 1
start()
# print(len(first_tables))
| null | Belgium(sara L)/get_nacebel_code.py | get_nacebel_code.py | py | 4,517 | python | en | code | null | code-starcoder2 | 50 |
391691244 | import config
import dataloader
import engine
import ImageTransformer
import transformers
import torch
import torch.nn as nn
import numpy as np
import torchvision
import albumentations as alb
def run():
train_dataset = torchvision.datasets.CIFAR10(root='input/data', train=True, download = True)
val_dataset = torchvision.datasets.CIFAR10(root='input/data', train=False, download = True)
train_transform = alb.Compose([
alb.Resize(config.image_height, config.image_width, always_apply=True),
alb.Normalize(config.mean, config.std, always_apply=True),
alb.HorizontalFlip(p=0.1),
alb.RandomBrightness(p=0.2),
alb.RandomContrast(p=0.1),
alb.RGBShift(p=0.1),
alb.GaussNoise(p=0.1),
])
val_transforms = alb.Compose([
alb.Resize(config.image_height, config.image_width, always_apply=True),
alb.Normalize(config.mean, config.std, always_apply=True)
])
train_data = dataloader.dataloader(train_dataset, train_transform)
val_data = dataloader.dataloader(val_dataset, val_transforms)
train_loader = torch.utils.data.DataLoader(
train_data,
num_workers=4,
pin_memory=True,
batch_size=config.Batch_Size
)
val_loader = torch.utils.data.DataLoader(
val_data,
num_workers=4,
pin_memory=True,
batch_size=config.Batch_Size
)
model = ImageTransformer.ViT(
patch_height = 16,
patch_width = 16,
embedding_dims = 768,
dropout = 0.1,
heads = 4,
num_layers = 4,
forward_expansion = 4,
max_len = int((32*32)/(16*16)),
layer_norm_eps = 1e-5,
num_classes = 10,
)
if torch.cuda.is_available():
accelarator = 'cuda'
else:
accelarator = 'cpu'
device = torch.device(accelarator)
torch.backends.cudnn.benchmark = True
model = model.to(device)
optimizer = transformers.AdamW(model.parameters(), lr=config.LR, weight_decay=config.weight_decay)
num_training_steps = int((config.Epochs*len(train_dataset))/config.Batch_Size)
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps = int(0.1*num_training_steps),
num_training_steps = num_training_steps
)
best_acc = 0
best_model = 0
for epoch in range(config.Epochs):
train_acc, train_loss = engine.train_fn(model, train_loader, optimizer, scheduler, device)
val_acc, val_loss = engine.eval_fn(model, val_loader, device)
print(f'\nEPOCH = {epoch+1} / {config.Epochs} | LR = {scheduler.get_last_lr()[0]}')
print(f'TRAIN ACC = {train_acc*100}% | TRAIN LOSS = {train_loss}')
print(f'VAL ACC = {val_acc*100}% | VAL LOSS = {val_loss}')
if val_acc > best_acc:
best_acc = val_acc
best_model = model.state_dict()
torch.save(best_model, config.Model_Path)
if __name__ == "__main__":
run()
| null | train.py | train.py | py | 3,016 | python | en | code | null | code-starcoder2 | 50 |
341788346 | import requests
from lxml import etree
from keras.preprocessing.text import text_to_word_sequence as sq2wsq
import json
from requests.adapters import HTTPAdapter
import time
word_size=150
header={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134"}
file=[]
file.append("phaseA_4b_01.json")
for f_name in file:
print(f_name)
f=open(f_name,encoding="utf-8")
phase=json.load(f)
phase=phase["questions"]
for e1,e2,in enumerate(phase):
body=e2["body"]
body=sq2wsq(body)
mid=[]
for i in body:
if len(i)>2 and i not in stopword:
mid.append(i)
body=mid
xm='https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id='+UID+'&retmode=abstract&rettype=text'
try:
r=requests.get(xm,headers=header,timeout=15)
except:
r=requests.get(xm,headers=header,timeout=15) | null | RAE-Recursive-AutoEncoder-for-bioasq-taskB-phaseA-snippets-retrieve-/get-s.py | get-s.py | py | 1,021 | python | en | code | null | code-starcoder2 | 50 |
310121907 | from selenium import webdriver
import webbrowser
nameofbot = "likuz"
bot = True
print("*Likuz is alive*")
print("'Hello, my friend :)'")
"""""""""""""""""""""""""""""""""""""LOGIN"""""""""""""""""""""""""""""""""""""
def newlogin():
id = "a"
pw = "1"
"""print(id,pw)"""
print("Type in your ID")
if input() == id:
print("Give your PW")
if input() == pw:
print("Logged in as asariox")
return 1
else:
print("Not successful pw")
return 0
else:
print("not successful id")
return 0
""""""""""""""""""""""""""""""""""Functions if logged in"""""""""""""""""""""
def kick_member(name):
print(name + " got kicked")
def inv_member(name):
print(name + " has been invited")
def open_website(url):
wlist=["youtube.com","fb.com","hotmail.com","asariox.de","pr0gramm.com"] # database method
if url == "yt":
url = wlist[0]
elif url == "fb":
url = wlist[1]
elif url == "hotmail":
url = wlist[2]
elif url == "asar":
url = wlist[3]
elif url == "pr0":
url = wlist[4]
else:
print("Failed to continue the programm. Sorry.")
"""if url == "yt":
url = "youtube.com""" # this works as well :)
webbrowser.open('https://' + url)
def sayvar():
sayvar = input("what do you want to say: ")
print(sayvar)
print("successfully completed sayvar")
# return sayvar
def add_member():
from collections import deque
#file = open('filename.txt', 'r')
alist = deque(["likuz","Elswarrior"])
a = input("names?")
alist.append(a)
print(alist)
import pickle
objecta = alist
#str(objecta)
filehandler = open('filename.txt', 'wb')
pickle.dump(objecta, filehandler)
#all
def delete_member():
from collections import deque
alist = deque(["likuz","Elswarrior"])
a = input("names?")
alist.pop(a)
print(alist)
import pickle
objecta = alist
filehandler = open('filename.txt', 'wb')
pickle.dump(objecta,
filehandler)
def permissions():
a = "Admin"
c = "Elswarrior"+" = "+a
print(c)
def swap_vars():
"""TEST PURPOSE
"""
a, b = 1, 2
a, b = b, a
a, b
""""""""""""""""""""""""""""""""""Control (BOT)"""""""""""""""""""""""""""""""""""""""
if newlogin() == 1: # since I have the new newlogin return value
a = "1" # I should initialize a with it to keep "while" active
def whilea():
while bot and a == "1" and nameofbot == "likuz":
#menue
print("Bot Control Interface")
print("""
k = kicks member (LINE)
a = adds member (LINE)
i = invites member (LINE)
o = opens a website (PERSONAL USE)
say = let us the bot say something u typed in (BOTH)
video = youtube search
permissions() = shows list of users
... more functions will be added when I'm not lazy ...
""")
typein = input()
if typein == "k":
name = input()
kick_member(name)
elif typein == "i":
name = input()
inv_member(name)
elif typein == "o":
open_website(url)
elif typein == "say":
inputsay = input("command: ")
print("successfully completed command")
sayvar()
#global (nameofbot) variables just work in definitions of functions
if inputsay == nameofbot + " " + "say":
print("successfully completed if command")
if sayvar and sayvar is not None:
print("successfully completed sayvar")
print(sayvar)
else:
print("sayvar failed")
else:
print("command failed")
elif typein == "video":
search_video = input()
video_link = "https://www.youtube.com/results?search_query="
xyz = video_link + search_video
browser = webdriver.Firefox()
browser.get(xyz)
z = browser.find_element_by_partial_link_text('/watch?v=').click() #funktioniert nur bei einem Wort im Suchfeld... Ich muss das fixen.
print(z)
#webbrowser.get(video_link + search_video)
else:
print("No, oh shed")
quit()
"""""""MAIN PROGRAMM"""""""""""""
if __name__ == "__main__":
whilea()
else:
print("FAILED!")
"""""""""""""""""""""""""""""""""END OF DOCUMENT"""""""""""""""""""""""""""""
print("Goodbye, my friend ::")
print("*Likuz is dead*")
| null | test.py | test.py | py | 5,611 | python | en | code | null | code-starcoder2 | 50 |
27926458 | # Neural Network Trainer
# nw.py
import numpy as np
import tensorflow as tf
import csv
import cv2
import os.path
import random
EPOCHS = 2
print(' Loading Data ')
#50 percent random
def fifty(percent=50):
return random.randrange(100) < percent
#load and apply some transformations to images
def loadAndProcess(img_path):
img = cv2.imread(img_path)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
h, s, v = cv2.split(hsv)
v += random.randrange(10)
light_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(light_hsv, cv2.COLOR_HSV2RGB)
return img
with open('new_style/driving_log.csv') as csvfile:
row_count = sum(1 for row in csvfile)
print('reading %d lines' % row_count)
# row_count = 400
csvfile.seek(0, 0)
reader = csv.reader(csvfile)
angles = np.zeros(shape=(row_count*3))
trainimgs = np.zeros(shape=(row_count*3,80,80,3))
idx = 0
failed = 0
dropped = 0
for line in reader:
center_img_path = line[0]
left_img_path = line[1]
right_img_path = line[2]
center_angle = float(line[3])
left_angle = float(center_angle+0.35)
right_angle = float(center_angle-0.35)
angles[idx] = center_angle
angles[idx+1] = left_angle
angles[idx+2] = right_angle
if (not os.path.exists(center_img_path)):
print(center_img_path)
failed+=1
continue
chance_to_keep = fifty()
if ((center_angle < 0.2) and not chance_to_keep):
dropped+=1
continue
imgRGB = loadAndProcess(center_img_path)
limgRGB = loadAndProcess(left_img_path)
rimgRGB = loadAndProcess(right_img_path)
trainimgs[idx] = imgRGB
trainimgs[idx+1] = limgRGB
trainimgs[idx+2] = rimgRGB
idx+=3
if (idx == row_count):
break
print('failed to load %d' % failed)
print('dropped for 0 angle %d' % dropped)
print(angles.shape)
print(trainimgs.shape)
print('print angles plot...')
import scipy.signal as signal
# checking angles distribution
import matplotlib
matplotlib.use('agg')
import pylab as plt
fig, ax = plt.subplots( nrows=1, ncols=1 )
ax.plot(angles)
fig.savefig('angles.png') # save the figure to file
print('Data loaded')
tf.python.control_flow_ops = tf
X = trainimgs
y = angles
# Initial Setup for Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1.,input_shape=(80, 80, 3)))
model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same", input_shape=(80,80, 3)))
model.add(Activation('elu'))
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(Activation('elu'))
model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(Activation('elu'))
model.add(Dense(512))
model.add(Dropout(.5))
model.add(Activation('elu'))
model.add(Dense(1))
print('Start training')
model.compile(loss='mse', optimizer='adam')
history = model.fit(X, y, nb_epoch=EPOCHS, validation_split=0.2, shuffle=True)
model.save('model.h5')
print('Saved model.h5')
# because some wierd exceptions happens sometimes
# https://stackoverflow.com/questions/40560795/tensorflow-attributeerror-nonetype-object-has-no-attribute-tf-deletestatus
import gc; gc.collect()
| null | nw.py | nw.py | py | 3,617 | python | en | code | null | code-starcoder2 | 50 |
333405446 | #!/usr/bin/env python3
# -*- coding: UTF-8 –*-
#该函数用于新建MONITOR,查看MONITOR状态等。
import f5.bigip
from f5.bigip import ManagementRoot
import class_f5conn
from class_f5conn import F5CONNClass
#定义Monitor类,继承父类f5ltmclass,
class F5MONITORClass(F5CONNClass):
#初始化函数
def __init__(self,deviceip,username,password):
F5CONNClass.__init__(self,deviceip,username,password)
self.http = self.mgmt.tm.ltm.monitor.https.http
self.https = self.mgmt.tm.ltm.monitor.https_s.https
self.tcp = self.mgmt.tm.ltm.monitor.tcps.tcp
self.monitor = self.mgmt.tm.ltm.monitor
#新建MONITOR,类型为三类,TCP,HTTP,HTTPS,其他的利用率不高,就不做了。
#输入的参数有5个,monitortype为要新建的Monitor类型,TCP/HTTP/HTTPS,monitorname为要新建的monitor名称,send为要发送的消息,recv为返回的消息,recvDisable为失败返回的消息,一般用不到!
recvmsg = {'recvDisable':None}
def createMonitor(self,monitortype,monitorname,send,recv,**recvmsg):
http = self.http
https = self.https
tcp = self.tcp
#首先检查输入的监控类型,根据类型添加相应的MONITOR。
if monitortype == 'tcp':
if tcp.exists(name = monitorname) == True:
print(">>>TcpMonito" + monitorname + "已经存在,请检查配置文件!!!")
else:
tcpmonitor = tcp.create(name = monitorname,send = send , recv = recv , **recvmsg)
print(">>>TcpMonito" + monitorname + "创建成功!!!")
elif monitortype == 'http':
if http.exists(name = monitorname) == True:
print(">>>HttpMonito" + monitorname + "已经存在,请检查配置文件!!!")
else:
httpmonitor = http.create(name = monitorname,send = send , recv = recv , **recvmsg)
print(">>>HttpMonito" + monitorname + "创建成功!!!")
elif monitortype == 'https':
if https.exists(name = monitorname) == True:
print(">>>HttpsMonito" + monitorname + "已经存在,请检查配置文件!!!")
else:
httpsmonitor = https.create(name = monitorname,send = send , recv = recv , **recvmsg)
print(">>>HttpsMonito" + monitorname + "创建成功!!!")
else:
print(">>>您输入的Monitor类型不正确,支持的类型有TCP,HTTP,HTTPS,请输入前3种类型的Monitor类型!!!")
#获取所有MONITOR的名字。包含TCP/HTTP/HTTPS。
def getAllMonitor(self):
monitorhttplist = []
monitorhttpslist = []
monitortcplist = []
monitor_http_collection = self.monitor.https.get_collection()
monitor_https_collection = self.monitor.https_s.get_collection()
monitor_tcp_collection = self.monitor.tcps.get_collection()
[monitorhttplist.append(monitorhttp.name) for monitorhttp in monitor_http_collection]
[monitorhttpslist.append(monitorhttps.name) for monitorhttps in monitor_https_collection]
[monitortcplist.append(monitortcp.name) for monitortcp in monitor_tcp_collection]
self.monitorhttplist = monitorhttplist
self.monitorhttpslist = monitorhttpslist
self.monitortcplist = monitortcplist
return self.monitorhttplist,self.monitorhttpslist,self.monitortcplist
#分别获取指定类型的Monitor列表。参数为TCP/HTTP/HTTPS,仅支持3种。
def getMonitor(self,monitortype):
if monitortype == 'tcp':
return self.monitortcplist
elif monitortype == 'http':
return self.monitorhttplist
elif monitortype == 'https':
return self.monitorhttpslist
else:
print(">>>您输入的Monitor类型不正确,类型有TCP,HTTP,HTTPS,请输入正确的Monitor类型!!!") | null | networkdevops/classfile/class_f5monitor.py | class_f5monitor.py | py | 4,010 | python | en | code | null | code-starcoder2 | 50 |
103368480 | #!/usr/bin/python
__author__ = "Pieter du Toit"
import pyfuzz
import socket
msg = b"GET "+ pyfuzz.generator.random_ascii() + b" HTTP/1.1\nHOST: 10.90.88.26\r\n"
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = ('10.90.88.26', 80)
s.connect(addr)
s.sendall(msg)
resp = s.recv(4096)
print(resp)
except Exception as e:
print(e)
finally:
s.close() | null | python-network-security/Fuzzing.py | Fuzzing.py | py | 395 | python | en | code | null | code-starcoder2 | 51 |
261913526 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import json
from datetime import datetime
from random import choice, randint, shuffle
from django.contrib.auth.models import User
from contenido.models import *
Desafio.objects.all().delete()
Jugador_equipo.objects.all().delete()
Equipo.objects.all().delete()
Localidad.objects.all().delete()
Deporte.objects.all().delete()
Institucion.objects.all().delete()
equipos_por_deporte = 15
num_desafios_por_deporte = 10
admin = [{'nombre': ['admin', 'admin'], 'fechanac': '1998-03-21 00:00:00', }]
f = open('jugadores.json', 'r').read()
users = json.loads(f)
for u in admin+users:
aux_username = u['nombre'][0][0].lower()+u['nombre'][1].lower()
if User.objects.filter(username=aux_username).count() == 0:
us = User()
us.username = aux_username
us.password = 'pbkdf2_sha256$24000$WuVONA6Xzl7V$N7a1GHNhLV9cKNeHkK4JBBXeTeOL99jkmElfN4AkEPg=' # 'adidas10'
us.save()
ju = Jugador()
ju.Nombre = u['nombre'][0]
ju.Apellido = u['nombre'][1]
ju.FechaNac = u['fechanac']
ju.idUser = us
ju.save()
print('[*] Cargados %d usuarios' % len(users))
deportes = [('Futbol', 5, 0), ('Tenis', 1, 0), ('Basquet', 5, 0), ('Paddle', 1, 0), ('Ajedrez', 1, 2), ('Counter Strike 1.6', 5, 1), ('Dota 2', 5, 1), ('League of Legends', 5, 1),]
for d in deportes:
aux = Deporte()
aux.Deporte = d[0]
aux.MinJugadores = d[1]
aux.Tipo = d[2]
aux.save()
print('[*] Cargados deportes')
locs = ['Online', 'Rafaela', 'Sunchales', 'Hawai', 'Bahamas', 'Santa Fe', 'CABA',
'San Juan', 'Cordoba', 'San Luis', 'San Justo', 'Salta', 'La Plata',
'Parque Patricios', 'Nuñez', 'Angelica', 'Reconquista', 'El Trebol',
'Puerto Madrin', 'Bahia Blanca', 'San Martin', 'Mendoza', 'Salto',
'Yapeyu', 'Totoras', 'Lincoln', 'Belgrano',
]
for l in locs:
aux = Localidad()
aux.Nombre = l
aux.save()
print('[*] Cargadas localidades')
nombres_instituciones = [
'La redo', 'Playfutbol', 'Soccer', 'Paddle friends', 'La redondita',
'Boca jrs', 'C.A.R.P', 'Jockey Club', 'Los nogales', 'La rural',
'Divertite', 'Pinocho', 'La mula', 'Club Platense', 'Los 3 hermanos',
'La abadia', 'Picachu', 'Dreamers', 'DeportesYa', 'Tu partido',
]
localidades = list(Localidad.objects.all())
for i in nombres_instituciones:
iux = Institucion()
iux.Nombre = i
iux.Localidad = choice(localidades)
iux.save()
iux = Institucion()
iux.Nombre = "Server 1"
iux.Localidad = Localidad.objects.filter(Nombre='Online')[0]
iux.save()
print('[*] Cargadas instituciones')
palabras = [
['capos', 'mejores', 'guerreros', 'luchadores', 'gnomos', 'pasteles', 'chicos', 'amigos', 'samurai', 'monjes', 'monos', 'chicos'],
['del pais', 'de la ciudad', 'de la pradera', 'del colegio', 'del mundo', 'del juego', 'de la victoria', 'del triunfo', 'del python', 'de oriente',
'de la montaña', 'del universo', 'de la galaxia', 'del deporte']
]
nombre_equipo = list(itertools.product(palabras[0], palabras[1]))
for d in Deporte.objects.all():
for _ in range(equipos_por_deporte):
r = choice(nombre_equipo)
e = Equipo()
e.Nombre = " ".join(r)
if d.Tipo == 1:
e.Localidad = Localidad.objects.filter(Nombre='Online')[0]
else:
e.Localidad = choice(Localidad.objects.all())
e.Deporte = d
e.save()
jugadores = list(Jugador.objects.all())
shuffle(jugadores)
for _ in range(d.MinJugadores):
je = Jugador_equipo()
je.idEquipo = e
je.idJugador = jugadores.pop()
je.Encargado = choice([True, False])
je.Aceptado = True
je.save()
print('[*] Cargados equipos y jugadores a los equipos')
instituciones = list(Institucion.objects.all())
for d in Deporte.objects.all():
for _ in range(num_desafios_por_deporte):
equipos = list(Equipo.objects.filter(Deporte=d))
if len(equipos) > 1:
shuffle(equipos)
desafio = Desafio()
desafio.Deporte = d
desafio.ParticipanteA = equipos.pop()
desafio.ParticipanteB = equipos.pop()
desafio.Lugar = choice(instituciones)
desafio.AceptadoA = True
desafio.AceptadoB = True
desafio.FechaHora = datetime(randint(1990, 2016), randint(1, 12), randint(1, 28), randint(1, 23), randint(1, 59))
desafio.save()
print('[*] Cargados desafios')
# Extras
for c in ['Fair play', 'Puntualidad', 'Buena onda', 'Respeto', 'Habilidad', 'Respeto', 'Habilidad']:
cux = Criterio()
cux.Criterio = c
cux.save()
print('[*] Cargados criterios de calificacion')
| null | jugatealgo/data.py | data.py | py | 4,816 | python | en | code | null | code-starcoder2 | 51 |
204380708 | import json
from flask import request, Response
from . import app
from .services import user_profile_service
from .clients import USER_PROFILE_CLIENT_MAP
def error_response(exception, status, message=None):
if message is None:
message = str(exception)
response = {'error_message': message}
return Response(
json.dumps(response, default=str),
status=status,
mimetype='application/json'
)
@app.route('/user-profile', methods=['GET'])
def generate_user_profile():
"""
Processes a GET request with third_party client usernames
as query params.
"""
log_msg = "Received request for User Profile Data:\n"
args = request.args
user_names = ""
for client_key, username in args.items():
user_names += "{}: {}\n".format(client_key, username)
client = USER_PROFILE_CLIENT_MAP.get(client_key)
if client:
success = user_profile_service._add_client(client, username)
if not success:
error_msg = (
"Received Bad Username in Request for User Profile "
"Data - {}: {}".format(client_key, username)
)
app.logger.debug(error_msg)
# Decide whether continue if still have 1 valid user in the bunch or error
# out altogether. Keeping it at all must be valid
return Response(json.dumps({"error": error_msg}), status=400)
app.logger.info(log_msg + user_names)
user_profile = user_profile_service.get_user_profile()
return Response(
json.dumps(user_profile, default=str),
status=200,
mimetype='application/json'
)
| null | app/routes.py | routes.py | py | 1,702 | python | en | code | null | code-starcoder2 | 51 |
594710079 | from flask import render_template, request
from flask.json import jsonify
from models.models import Feed
def handle_feed(feed_id):
page_format = request.args.get('format')
if page_format == 'json':
return handle_json_response(feed_id)
else:
return render_template('index.html')
def handle_json_response(feed_id):
feed = Feed.query.filter_by(id=feed_id).first()
serialized_feed = feed.serialize()
json_response = {
'feed': serialized_feed
}
return jsonify(json_response)
| null | controllers/feed.py | feed.py | py | 535 | python | en | code | null | code-starcoder2 | 51 |
347533480 | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: A Tree
@return: Inorder in ArrayList which contains node values.
"""
def inorderTraversal(self, root):
results = []
self.inorder(root, results)
return results
def inorder(self, root, results):
if root is None:
return
self.inorder(root.left, results)
results.append(root.val)
self.inorder(root.right, results)
| null | US Giants/Binary Tree/67. Binary Tree Inorder Traversal.py | 67. Binary Tree Inorder Traversal.py | py | 582 | python | en | code | null | code-starcoder2 | 51 |
247220873 | # Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_federated as tff
from compressed_communication.aggregators.comparison_methods import three_lc
_test_value_type_integer_tensor = (tf.int32, (3,))
_test_value_type_float_tensor = (tf.float32, (3,))
_test_value_type_list_integer_tensors = [(tf.int32, (2,)),
(tf.int32, (3,))]
class ThreeLCComputationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('float_tensor', _test_value_type_float_tensor))
def test_three_lc_properties(self, value_type):
factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
self.assertIsInstance(process, tff.templates.AggregationProcess)
server_state_type = tff.type_at_server(())
expected_initialize_type = tff.FunctionType(
parameter=None, result=server_state_type)
tff.test.assert_types_equivalent(process.initialize.type_signature,
expected_initialize_type)
expected_measurements_type = tff.StructType([
('avg_bitrate', tf.float32),
('avg_distortion', tf.float32)
])
expected_measurements_type = tff.type_at_server(expected_measurements_type)
expected_next_type = tff.FunctionType(
parameter=collections.OrderedDict(
state=server_state_type, value=tff.type_at_clients(value_type)),
result=tff.templates.MeasuredProcessOutput(
state=server_state_type,
result=tff.type_at_server(value_type),
measurements=expected_measurements_type))
tff.test.assert_types_equivalent(process.next.type_signature,
expected_next_type)
@parameterized.named_parameters(
('integer_tensor', _test_value_type_integer_tensor),
('list_integer_tensors', _test_value_type_list_integer_tensors))
def test_three_lc_create_raises(self, value_type):
factory = three_lc.ThreeLCFactory()
value_type = tff.to_type(value_type)
self.assertRaises(ValueError, factory.create, value_type)
class ThreeLCExecutionTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('float_tensor', _test_value_type_float_tensor))
def test_correctness_one_client(self, value_type):
factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [tf.ones(value_type.shape)]
expected_result = tf.ones(value_type.shape)
expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,
avg_distortion=0.)
measurements = process.next(state, client_values).measurements
self.assertAllClose(measurements, expected_measurements)
result = process.next(state, client_values).result
self.assertAllClose(result, expected_result)
@parameterized.named_parameters(
('float_tensor', _test_value_type_float_tensor))
def test_correctness_one_client_high_sparsity(self, value_type):
factory = three_lc.ThreeLCFactory(sparsity_factor=10000.0)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [tf.ones(value_type.shape)]
expected_result = tf.zeros(value_type.shape)
expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,
avg_distortion=1.)
measurements = process.next(state, client_values).measurements
self.assertAllClose(measurements, expected_measurements)
result = process.next(state, client_values).result
self.assertAllClose(result, expected_result)
@parameterized.named_parameters(
('float_tensor', _test_value_type_float_tensor))
def test_correctness_identical_clients(self, value_type):
factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[-1.0, 0.0, 100000.0] for _ in range(2)]
expected_result = [0.0, 0.0, 200000.0]
expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,
avg_distortion=1./3.)
measurements = process.next(state, client_values).measurements
self.assertAllClose(measurements, expected_measurements)
result = process.next(state, client_values).result
self.assertAllClose(result, expected_result)
@parameterized.named_parameters(
('float_tensor', _test_value_type_float_tensor))
def test_correctness_different_clients(self, value_type):
factory = three_lc.ThreeLCFactory(sparsity_factor=1.0)
value_type = tff.to_type(value_type)
process = factory.create(value_type)
state = process.initialize()
client_values = [[0.0, 0.0, 2.0], [1.0, 1.0, 1.0]]
expected_result = [1.0, 1.0, 3.0]
expected_measurements = collections.OrderedDict(avg_bitrate=40./3.,
avg_distortion=0.)
measurements = process.next(state, client_values).measurements
self.assertAllClose(measurements, expected_measurements)
result = process.next(state, client_values).result
self.assertAllClose(result, expected_result)
if __name__ == '__main__':
tf.test.main()
| null | compressed_communication/aggregators/comparison_methods/three_lc_test.py | three_lc_test.py | py | 6,083 | python | en | code | null | code-starcoder2 | 51 |
39736511 | import mock
import unittest
from pcp_pidstat import ProcessMemoryUtil
class TestProcessMemoryUtil(unittest.TestCase):
def setUp(self):
self.__metric_repository = mock.Mock()
self.__metric_repository.current_value = mock.Mock(side_effect=self.metric_repo_current_value_side_effect)
self.__metric_repository.previous_value = mock.Mock(side_effect=self.metric_repo_previous_value_side_effect)
def metric_repo_current_value_side_effect(self, metric_name,instance):
if metric_name == 'proc.psinfo.vsize' and instance == 1:
return 120084
if metric_name == 'proc.psinfo.rss' and instance == 1:
return 6272
if metric_name == 'proc.psinfo.cmin_flt' and instance == 1:
return 573935
if metric_name == 'proc.psinfo.minflt' and instance == 1:
return 14509
if metric_name == 'proc.psinfo.cmaj_flt' and instance == 1:
return 647
if metric_name == 'proc.psinfo.maj_flt' and instance == 1:
return 54
if metric_name == 'mem.physmem':
return 3794764
if metric_name == 'proc.psinfo.cmd' and instance == 1:
return "test"
if metric_name == 'proc.psinfo.processor' and instance == 1:
return 0
if metric_name == 'proc.id.uid' and instance == 1:
return 1
if metric_name == 'proc.psinfo.pid' and instance == 1:
return 1
def metric_repo_previous_value_side_effect(self, metric_name,instance):
if metric_name == 'proc.psinfo.cmin_flt' and instance == 1:
return 573930
if metric_name == 'proc.psinfo.minflt' and instance == 1:
return 14500
if metric_name == 'proc.psinfo.cmaj_flt' and instance == 1:
return 645
if metric_name == 'proc.psinfo.maj_flt' and instance == 1:
return 50
def test_vsize(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
vsize = process_memory_usage.vsize()
self.assertEquals(vsize, 120084)
def test_rss(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
rss = process_memory_usage.rss()
self.assertEquals(rss, 6272)
def test_mem(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
test_mem = float("%.2f"%(100*float(6272)/3794764))
mem = process_memory_usage.mem()
self.assertEquals(mem, test_mem)
def test_min_flt(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
test_min_flt = float("%.2f"%(((573935 + 14509) - (573930 + 14500))/1.34))
min_flt = process_memory_usage.minflt()
self.assertEquals(min_flt, test_min_flt)
def test_maj_flt(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
test_maj_flt = float("%.2f"%(((647 + 54) - (645 + 50))/1.34))
maj_flt = process_memory_usage.majflt()
self.assertEquals(maj_flt, test_maj_flt)
def test_pid(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
pid = process_memory_usage.pid()
self.assertEqual(pid,1)
def test_process_name(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
name = process_memory_usage.process_name()
self.assertEqual(name,'test')
def test_user_id(self):
process_memory_usage = ProcessMemoryUtil(1,1.34,self.__metric_repository)
user_id = process_memory_usage.user_id()
self.assertEqual(user_id,1)
if __name__ == '__main__':
unittest.main()
| null | test/process_memoryutil_test.py | process_memoryutil_test.py | py | 3,734 | python | en | code | null | code-starcoder2 | 51 |
519935644 | #-*- coding: utf-8 -*-
# 뷰티플수프를 이용 이미지파일의 소스를 불러오는 코드
import urllib
from bs4 import BeautifulSoup
# Get all img address at html
for i in range(140, 150, 1):
stri = str(i)
html = urllib.urlopen('http://bbs.ruliweb.com/community/board/300143/read/33025'+ stri)
soup = BeautifulSoup(html, "lxml")
for link in soup.find_all('img'):
print(link.get('src'))
| null | crawler1.py | crawler1.py | py | 425 | python | en | code | null | code-starcoder2 | 51 |
226888031 | import os
import textwrap
import json
import io
import yaml
import py
import pytest
import pkg_resources
import jinja2
import bs4
from sphinx.application import Sphinx
here = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope='function')
def run_sphinx(tmpdir):
src = tmpdir.mkdir('src')
out = tmpdir.mkdir('out')
spec = py.path.local(here).join('..', 'docs', '_specs', 'github.yml')
spec.copy(src.mkdir('_specs').join('github.yml'))
def run(redoc_overwrite=None, redoc_uri=None):
conf = {'name': 'Github API (v3)',
'page': 'api/github/index',
'spec': '_specs/github.yml'}
conf.update(redoc_overwrite or {})
confpy = jinja2.Template(textwrap.dedent('''
import os
project = 'sphinxcontrib-redoc'
copyright = '2017, Ihor Kalnytskyi'
extensions = ['sphinxcontrib.redoc']
source_suffix = '.rst'
master_doc = 'index'
redoc = {{ redoc }}
redoc_uri = {{ redoc_uri }}
''')).render(redoc=[conf], redoc_uri=repr(redoc_uri))
src.join('conf.py').write_text(confpy, encoding='utf-8')
src.join('index.rst').ensure()
Sphinx(
srcdir=src.strpath,
confdir=src.strpath,
outdir=out.strpath,
doctreedir=out.join('.doctrees').strpath,
buildername='html'
).build()
yield run
def test_redocjs_lib_is_copied(run_sphinx, tmpdir):
outdir = tmpdir.join('out')
extdir = py.path.local(
pkg_resources.get_provider('sphinxcontrib.redoc').module_path)
run_sphinx()
assert outdir.join('_static', 'redoc.js').check()
assert outdir.join('_static', 'redoc.js').computehash() \
== extdir.join('redoc.js').computehash()
def test_redocjs_lib_is_downloaded(run_sphinx, tmpdir):
outdir = tmpdir.join('out')
extdir = py.path.local(
pkg_resources.get_provider('sphinxcontrib.redoc').module_path)
run_sphinx(redoc_uri=(
'https://cdn.jsdelivr.net/npm/redoc@2.0.0-alpha.32/bundles'
'/redoc.standalone.js'))
assert outdir.join('_static', 'redoc.js').check()
assert outdir.join('_static', 'redoc.js').computehash() \
!= extdir.join('redoc.js').computehash()
assert outdir.join('_static', 'redoc.js').computehash() \
== '6978103258cab653263b5b75c008b474'
def test_openapi_spec_is_copied(run_sphinx, tmpdir):
srcdir, outdir = tmpdir.join('src'), tmpdir.join('out')
run_sphinx()
assert outdir.join('_specs', 'github.yml').check()
assert outdir.join('_specs', 'github.yml').computehash() \
== srcdir.join('_specs', 'github.yml').computehash()
@pytest.mark.parametrize('options, attributes', [
(None,
{}),
({},
{}),
({'lazy-rendering': False,
'suppress-warnings': False,
'hide-hostname': False,
'required-props-first': False,
'no-auto-auth': False,
'path-in-middle-panel': False,
'hide-loading': False,
'native-scrollbars': False,
'untrusted-spec': False,
'expand-responses': []},
{}),
({'lazy-rendering': True},
{'lazy-rendering': ''}),
({'suppress-warnings': True},
{'suppress-warnings': ''}),
({'hide-hostname': True},
{'hide-hostname': ''}),
({'required-props-first': True},
{'required-props-first': ''}),
({'no-auto-auth': True},
{'no-auto-auth': ''}),
({'path-in-middle-panel': True},
{'path-in-middle-panel': ''}),
({'hide-loading': True},
{'hide-loading': ''}),
({'native-scrollbars': True},
{'native-scrollbars': ''}),
({'untrusted-spec': True},
{'untrusted-spec': ''}),
({'expand-responses': ['200', '404']},
{'expand-responses': '200,404'}),
])
def test_redocjs_page_is_generated(run_sphinx, tmpdir, options, attributes):
run_sphinx(redoc_overwrite={'opts': options})
html = tmpdir.join('out').join('api', 'github', 'index.html').read()
soup = bs4.BeautifulSoup(html, 'html.parser')
assert soup.title.string == 'Github API (v3)'
assert soup.redoc.attrs == attributes
assert soup.script.attrs['src'] == os.path.join(
'..', '..', '_static', 'redoc.js')
assert os.path.join('..', '..', '_specs', 'github.yml') \
in soup.find_all('script')[-1].text
def test_embedded_spec(run_sphinx, tmpdir):
run_sphinx(redoc_overwrite={'embed': True})
html = tmpdir.join('out').join('api', 'github', 'index.html').read()
spec = tmpdir.join('src', '_specs', 'github.yml').strpath
soup = bs4.BeautifulSoup(html, 'html.parser')
with io.open(spec, encoding='utf-8') as f:
spec = yaml.safe_load(f)
embedded_spec = soup.find(id='spec').get_text()
assert json.loads(embedded_spec) == spec
| null | tests/test_integration.py | test_integration.py | py | 4,833 | python | en | code | null | code-starcoder2 | 51 |
596994432 | import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import f1_score
from keras.models import Sequential
from keras.models import Model
from keras import layers
from keras import backend as K
import tensorflow_hub as hub
# nltk.download("tokenize")
from os import listdir
from keras.layers import Flatten,Activation,GlobalMaxPooling1D
from keras.layers.merge import add
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda,Conv1D,MaxPooling1D
import tensorflow
import keras
session_conf = tensorflow.ConfigProto(intra_op_parallelism_threads=4, inter_op_parallelism_threads=4)
tensorflow.set_random_seed(1)
sess = tensorflow.Session(graph=tensorflow.get_default_graph(), config=session_conf)
keras.backend.set_session(sess)
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
K.set_session(sess)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
df = pd.read_pickle("frame_no_stem.pkl")
images = set(np.load('asin.npy')) # valid products
print("Finished reading images")
x_desc = []
y_category = []
i = 0
for asin in df.index.values:
if asin in images:
item = df.loc[asin]
x_desc.append(item.description)
cate = item.categories
y_category.append(cate)
if i % 1000 == 0:
print(i)
i += 1
print("Finished reading dataframe")
mlb = MultiLabelBinarizer()
y_total = mlb.fit_transform(y_category)
x_desc = np.array(x_desc)
np.random.seed(0)
state = np.random.get_state()
np.random.shuffle(x_desc)
np.random.set_state(state)
np.random.shuffle(y_total)
X_train = x_desc[:90000]
y_train = y_total[:90000]
y_test = y_total[90000:]
x_test = x_desc[90000:]
tokenizer = Tokenizer(num_words=10000)
tokenizer.fit_on_texts(X_train)
training_dataX = tokenizer.texts_to_sequences(X_train)
test_dataX = tokenizer.texts_to_sequences(x_test)
vocab_size = len(tokenizer.word_index) + 1
training_dataX = np.array(training_dataX)
test_dataX = np.array(test_dataX)
y_train = np.array(y_train)
y_test = np.array(y_test)
# padding
maxlen = 300
training_dataX = pad_sequences(training_dataX, padding='post', maxlen=maxlen)
test_dataX = pad_sequences(test_dataX, padding='post', maxlen=maxlen)
# create embedding matrix
def create_embedding_matrix(filepath, word_index, embedding_dim):
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath) as f:
for line in f:
try:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
except:
continue
return embedding_matrix
embedding_dim = 120
embedding_matrix = create_embedding_matrix("glove.840B.300d.txt", tokenizer.word_index, embedding_dim)
# Amount of words in the dataset that glove covers
nonzero_elements = np.count_nonzero(np.count_nonzero(embedding_matrix, axis=1))
print(nonzero_elements / vocab_size)
# create embedding layer
embedding_layer = Embedding(vocab_size, embedding_dim, weights=[embedding_matrix], input_length=maxlen, trainable=True)
sequence_input = keras.layers.Input(shape=(maxlen,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
layer = Conv1D(200, 5, activation='relu')(embedded_sequences)
layer = GlobalMaxPooling1D()(embedded_sequences)
layer = Dense(170, activation='relu')(layer)
layer = Dense(122,name='out_layer',activation = "sigmoid")(layer)
model = Model(sequence_input, layer)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
model.summary()
model.fit(np.array(training_dataX), y_train,epochs = 30, batch_size=256,verbose = 1)
outcome = model.predict(np.array(training_dataX))
# make it binary
outcome[outcome >= 0.5] = 1
outcome[outcome < 0.5] = 0
outcome = outcome.astype(int)
print(f1_score(y_train,np.array(outcome),average = 'micro') * 100)
| null | description_classifier.py | description_classifier.py | py | 4,418 | python | en | code | null | code-starcoder2 | 51 |
642119808 | import c4d
import sys
sys.path.append("C:/MDK/source")
import gtcWorkerC4d
import gtcCommon as gCom
def PluginMessage(id, data):
if id==c4d.C4DPL_COMMANDLINEARGS:
try:
mdkRunning = gtcWorkerC4d.GTCWorkerC4d()
mdkRunning.main("c4dworker.process1")
except:
errorStr = gCom.getTracebackStr()
gCom.errorLog(errorStr, "c4dworker.process1")
return True
return False | null | gtcWorkerC4dAgent.py | gtcWorkerC4dAgent.py | py | 443 | python | en | code | null | code-starcoder2 | 51 |
485337053 | import data_algebra
import data_algebra.data_ops
import data_algebra.flow_text
class Arrow:
"""Arrow from category theory: see Steve Awody, "Category Theory, 2nd Edition", Oxford Univ. Press, 2010 pg. 4."""
def __init__(self):
pass
def dom(self):
"""return domain, object at base of arrow"""
raise NotImplementedError("base class called")
def cod(self):
"""return co-domain, object at head of arrow"""
raise NotImplementedError("base class called")
def apply_to(self, b):
""" apply_to b, compose arrows (right to left) """
raise NotImplementedError("base class called")
# noinspection PyPep8Naming
def act_on(self, X):
""" act on X, must associate with composition """
raise NotImplementedError("base class called")
# noinspection PyPep8Naming
def transform(self, X):
""" transform X, may or may not associate with composition """
return self.act_on(X)
def __rshift__(self, other): # override self >> other
return other.apply_to(self)
def __rrshift__(self, other): # override other >> self
if isinstance(other, Arrow):
return self.apply_to(other)
return self.act_on(other)
# sklearn step style interface
# noinspection PyPep8Naming, PyUnusedLocal
def fit(self, X, y=None):
pass
# noinspection PyPep8Naming, PyUnusedLocal
def fit_transform(self, X, y=None):
self.fit(X, y=y)
return self.transform(X)
# noinspection PyUnusedLocal
def get_feature_names(self, input_features=None):
raise NotImplementedError("base class called")
# noinspection PyUnusedLocal,PyMethodMayBeStatic
def get_params(self, deep=False):
return dict()
def set_params(self, **params):
pass
# noinspection PyPep8Naming
def inverse_transform(self, X):
raise TypeError("data_algebra does not support inverse_transform")
class DataOpArrow(Arrow):
"""
Represent a dag of operators as a categorical arrow.
"""
def __init__(
self, pipeline, *, free_table_key=None, strict=False, forbidden_to_produce=None
):
if not isinstance(pipeline, data_algebra.data_ops.ViewRepresentation):
raise TypeError("expected pipeline to be data_algebra.data_ops")
self.pipeline = pipeline
self.strict = strict
t_used = pipeline.get_tables()
if forbidden_to_produce is None:
forbidden_to_produce = []
if isinstance(forbidden_to_produce, str):
forbidden_to_produce = [forbidden_to_produce]
self.forbidden_to_produce = forbidden_to_produce
if free_table_key is None:
if len(t_used) != 1:
raise ValueError(
"pipeline must use exactly one table if free_table_key is not specified"
)
free_table_key = [k for k in t_used.keys()][0]
else:
if free_table_key not in t_used.keys():
raise ValueError(
"free_table_key must be a table key used in the pipeline"
)
self.free_table_key = free_table_key
self.incoming_columns = t_used[free_table_key].column_names.copy()
self.disallowed_columns = pipeline.forbidden_columns()[free_table_key]
self.incoming_types = None
if t_used[free_table_key].column_types is not None:
self.incoming_types = t_used[free_table_key].column_types.copy()
self.outgoing_columns = pipeline.column_names.copy()
self.outgoing_columns.sort()
self.outgoing_types = None
if (
isinstance(pipeline, data_algebra.data_ops.TableDescription)
and self.incoming_types is not None
):
self.outgoing_types = self.incoming_types.copy()
Arrow.__init__(self)
def get_feature_names(self, input_features=None):
cp = self.outgoing_columns.copy()
if (not self.strict) and (input_features is not None):
cp = cp + [f for f in input_features if f not in cp]
return cp
def apply_to(self, b):
"""replace self input table with b"""
if isinstance(b, data_algebra.data_ops.ViewRepresentation):
b = DataOpArrow(b)
if not isinstance(b, DataOpArrow):
raise TypeError("unexpected type: " + str(type(b)))
# check categorical arrow composition conditions
missing = set(self.incoming_columns) - set(b.outgoing_columns)
if len(missing) > 0:
raise ValueError("missing required columns: " + str(missing))
problem_production = set(self.forbidden_columns()) - set(b.forbidden_to_produce)
if len(problem_production) > 0:
raise ValueError(
"did not document non-produciton of columns: " + str(problem_production)
)
excess = set(b.outgoing_columns) - set(self.incoming_columns)
if len(excess) > 0:
problem_excess = excess.intersection(self.forbidden_columns())
if len(problem_excess) > 0:
raise ValueError("forbidden incoming columns: " + str(excess))
if self.strict:
raise ValueError("extra incoming columns: " + str(excess))
if (self.incoming_types is not None) and (b.outgoing_types is not None):
for c in self.incoming_columns:
st = self.incoming_types[c]
xt = b.outgoing_types[c]
if st != xt:
raise ValueError(
"column "
+ c
+ " self incoming type is "
+ str(st)
+ ", while b outgoing type is "
+ str(xt)
)
new_pipeline = self.pipeline.apply_to(
b.pipeline, target_table_key=self.free_table_key
)
new_pipeline.get_tables() # check tables are compatible
res = DataOpArrow(
pipeline=new_pipeline,
free_table_key=b.free_table_key,
forbidden_to_produce=self.forbidden_to_produce,
)
res.incoming_types = b.incoming_types
res.outgoing_types = self.outgoing_types
return res
# noinspection PyPep8Naming
def act_on(self, X):
# assume a pandas.DataFrame compatible object
# noinspection PyUnresolvedReferences
cols = set(X.columns)
missing = set(self.incoming_columns) - cols
if len(missing) > 0:
raise ValueError("missing required columns: " + str(missing))
excess = cols - set(self.incoming_columns)
if len(excess) > 0:
X = X[self.incoming_columns]
return self.pipeline.act_on(X)
def learn_types(self, data_in, data_out):
if (data_in is not None) and (data_in.shape[0] > 0):
types_in = {k: type(data_in.loc[0, k]) for k in self.incoming_columns}
self.incoming_types = types_in
if (data_out is not None) and (data_out.shape[0] > 0):
types_out = {k: type(data_out.loc[0, k]) for k in self.outgoing_columns}
self.outgoing_types = types_out
# noinspection PyPep8Naming
def fit(self, X, y=None):
"""Learn input and output types from example, and return self"""
# assume a pandas.DataFrame compatible object
out = self.act_on(X)
self.learn_types(X, out)
return self
# noinspection PyPep8Naming
def fit_transform(self, X, y=None):
"""Learn input and output types from example, and return transform."""
out = self.transform(X)
self.learn_types(X, out)
return self.transform(X)
def dom(self):
return DataOpArrow(
data_algebra.data_ops.TableDescription(
table_name=None,
column_names=self.incoming_columns,
column_types=self.incoming_types,
)
)
def dom_as_table(self):
return data_algebra.data_ops.TableDescription(
table_name=None,
column_names=self.incoming_columns,
column_types=self.incoming_types,
)
def cod(self):
return DataOpArrow(
data_algebra.data_ops.TableDescription(
table_name=None,
column_names=self.outgoing_columns,
column_types=self.outgoing_types,
)
)
def cod_as_table(self):
return data_algebra.data_ops.TableDescription(
table_name=None,
column_names=self.outgoing_columns,
column_types=self.outgoing_types,
)
def __repr__(self):
return (
"DataOpArrow(\n "
+ self.pipeline.__repr__()
+ ",\n free_table_key="
+ self.free_table_key.__repr__()
+ ")"
)
def required_columns(self):
return self.incoming_columns.copy()
def forbidden_columns(self):
return self.disallowed_columns.copy()
# noinspection PyMethodMayBeStatic
def format_end_description(
self, *, required_cols, col_types, forbidden_cols, align_right=70, sep_width=2
):
if col_types is not None:
in_rep = [str(c) + ": " + str(col_types[c]) for c in required_cols]
else:
in_rep = [str(c) for c in required_cols]
in_rep = data_algebra.flow_text.flow_text(
in_rep, align_right=align_right, sep_width=sep_width
)
col_rep = [", ".join(line) for line in in_rep]
col_rep = " at least [ " + ",\n ".join(col_rep) + " ]"
if (forbidden_cols is not None) and (len(forbidden_cols) > 0):
f_rep = [str(c) for c in forbidden_cols]
f_rep = data_algebra.flow_text.flow_text(
f_rep, align_right=align_right, sep_width=sep_width
)
f_rep = [", ".join(line) for line in f_rep]
col_rep = col_rep + " , and none of [ " + ",\n ".join(f_rep) + " ]"
return col_rep
def __str__(self):
in_rep = self.format_end_description(
required_cols=self.incoming_columns,
col_types=self.incoming_types,
forbidden_cols=self.disallowed_columns,
)
out_rep = self.format_end_description(
required_cols=self.outgoing_columns,
col_types=self.outgoing_types,
forbidden_cols=self.forbidden_to_produce,
)
return (
"[\n "
+ self.free_table_key.__repr__()
+ ":\n "
+ in_rep
+ "\n ->\n "
+ out_rep
+ "\n]\n"
)
def __eq__(self, other):
if not isinstance(other, DataOpArrow):
return False
if self.free_table_key != other.free_table_key:
return False
if self.incoming_columns != other.incoming_columns:
return False
if self.incoming_types != other.incoming_types:
return False
if self.outgoing_columns != other.outgoing_columns:
return False
if self.outgoing_types != other.outgoing_types:
return False
return self.pipeline == other.pipeline
def __ne__(self, other):
return not self.__eq__(other)
def fmt_as_arrow(ops):
return str(DataOpArrow(ops))
| null | build/lib/data_algebra/arrow.py | arrow.py | py | 11,437 | python | en | code | null | code-starcoder2 | 51 |
267536174 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
@author: zhaogao
@license: (C) Copyright 2013-2018.
@contact: 449628536@qq.com
@software: learn-py
@file: len179_4_1.py
@time: 19/04/2018 10:20 PM
'''
from socket import *
sock = socket(AF_INET, SOCK_DGRAM)
for x in range(40):
sock.sendto(str(x).encode('ascii'), ('127.0.0.1', 16000))
resp = sock.recvfrom(8192)
print(resp[0])
| null | cook/len179_4_1.py | len179_4_1.py | py | 392 | python | en | code | null | code-starcoder2 | 51 |
252539776 | import uuid,constants,webbrowser
from yapily import ApiClient
from yapily import Configuration
from yapily import AccountAuthorisationRequest
from yapily import ApplicationUsersApi
from yapily import ApplicationUser
from yapily import AccountsApi
from yapily import ConsentsApi
from yapily import InstitutionsApi
from yapily import TransactionsApi
from yapily import TransfersApi
from yapily import TransferRequest
def main():
configuration = Configuration()
configuration.username = constants.APPLICATION_ID
configuration.password = constants.APPLICATION_SECRET
apiClient = ApiClient(configuration)
user_api = ApplicationUsersApi(apiClient)
users_exists = user_api.get_users_using_get(filter_application_user_id=[constants.APPLICATION_USER_ID])
if not users_exists:
application_user = ApplicationUser()
application_user._application_user_id = constants.APPLICATION_USER_ID
sdk_user = user_api.add_user_using_post_with_http_info(application_user)[0]
print("Created new sdk user:", sdk_user.application_user_id)
else:
sdk_user = users_exists[0]
print("Using existing sdk user:", constants.APPLICATION_USER_ID)
accounts_api = AccountsApi(apiClient)
account_authorisation_request = AccountAuthorisationRequest(
application_user_id=constants.APPLICATION_USER_ID,
institution_id=constants.INSTITUTION_ID,
callback='',
one_time_token=''
)
response = accounts_api.initiate_account_request_using_post(account_auth_request=account_authorisation_request)
redirect_url = response.data.authorisation_url
webbrowser.open(redirect_url)
input("\nPress enter to continue")
def filterByStatus(consent):
if (consent.status == "AUTHORIZED"):
return True
else:
return False
consents = ConsentsApi(apiClient).get_consents_using_get(
filter_application_user_id=[constants.APPLICATION_USER_ID],
filter_institution=[constants.INSTITUTION_ID]
).data
authorised_consents = list(filter(filterByStatus, consents))
consent = authorised_consents[0]
consent_token = consent.consent_token
print("Consent: " + consent_token);
accounts = AccountsApi(ApiClient(configuration)).get_accounts_using_get(consent_token)
print("\nGetting accounts: ")
accounts = AccountsApi(apiClient).get_accounts_using_get(consent_token)
print("**************ACCOUNTS******************")
print(accounts)
print("****************************************")
if (len(accounts.data) > 1):
institutions_api = InstitutionsApi(apiClient)
features = institutions_api.get_institution_using_get(constants.INSTITUTION_ID).features
if ("TRANSFER" in features):
account_id_1 = accounts.data[0].id
account_id_2 = accounts.data[1].id
print("\nExecuting a transfer from accout 1 [" + account_id_1 + "] to account 2 [" + account_id_2 + "]:")
transfers_api = TransfersApi(apiClient)
transfer = transfers_api.transfer_using_put(
consent=consent_token,
account_id=account_id_1,
transfer_request=TransferRequest(
account_id=account_id_2,
amount=15.00,
currency='GBP',
reference='Monthly savings',
transfer_reference_id='123456'
)
)
print("**************TRANSFERS**************");
print(transfer);
print("****************************************");
else:
print("\nCan not execute transfer for institution '" + constants.INSTITUTION_ID + "' as it does not have the required feature: 'TRANSFER'")
else:
print("\nYou need to have authorisation to 2 accounts but this Consent only has authorisation for 1. Not executing transfer.")
def create_callback_with_user_uuid(user_uuid):
return constants.CALLBACK_URL+ "?user_uuid="+user_uuid
if __name__ == '__main__':
main()
| null | examples/example_transfers.py | example_transfers.py | py | 4,105 | python | en | code | null | code-starcoder2 | 51 |
345075284 | from django.conf.urls import patterns,url
from . import views
urlpatterns = [
url(r'^home/$', views.home, name="home"),
url(r'^product/$', views.product.as_view(), name='product'),
url(r'^deleteproduct/?P<id>[0-9]+/$', views.delete, name='deleteproduct'),
url(r'^addproduct/$', views.addproduct.as_view(), name="addproduct"),
url(r'^search/$', views.search.as_view())] | null | warehouse/managerment/urls.py | urls.py | py | 375 | python | en | code | null | code-starcoder2 | 51 |
178742228 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('beers', '0004_auto_20150131_2234'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='user',
),
migrations.DeleteModel(
name='UserProfile',
),
migrations.AddField(
model_name='beer',
name='alcohol',
field=models.FloatField(default=0.0),
preserve_default=True,
),
migrations.AlterField(
model_name='brewery',
name='origin',
field=models.CharField(default=b'Unknown', max_length=128),
preserve_default=True,
),
migrations.AlterField(
model_name='brewery',
name='owner',
field=models.CharField(default=b'Unknown', max_length=128),
preserve_default=True,
),
migrations.AlterField(
model_name='trial',
name='rating',
field=models.PositiveIntegerField(default=0),
preserve_default=True,
),
]
| null | beers/migrations/0005_auto_20150202_2326.py | 0005_auto_20150202_2326.py | py | 1,225 | python | en | code | null | code-starcoder2 | 51 |
234746856 | # -*- coding: utf-8 -*-
import os
import warnings
import shutil
import pathlib
import sphinx_rtd_theme
PROJECT_ROOT=pathlib.Path(__file__).parent.parent
pandoc_installed = False if os.system("pandoc --help > /dev/null 2>&1") else True
if not pandoc_installed:
warnings.warn("pandoc not installed - install brew then brew install pandoc")
def setup(app):
"""Forces the auto generation of the documentation at build time."""
os.system("sphinx-apidoc -f -T -o docs/autogen src/testspace_colab")
shutil.copytree(src=PROJECT_ROOT / 'notebooks', dst='docs/autogen/notebook', dirs_exist_ok=True)
# ------------------------------------------------------------------------------
# General information about the project.
# ------------------------------------------------------------------------------
project = u"testspace-colab"
copyright = u"2021, S2 Technologies, Inc"
author = "Laurent Brack"
# ------------------------------------------------------------------------------
# General Configuration
# ------------------------------------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.8"
# Add any Sphinx extension module names here, as strings. They can
# be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# See http://www.sphinx-doc.org/en/stable/extensions.html
extensions = [
"sphinx_autorun",
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.extlinks",
"sphinx.ext.napoleon",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.graphviz",
]
# ----------------------------------------------------------------------------
# To do extension configuration
# https://www.sphinx-doc.org/en/master/usage/extensions/todo.html
# ----------------------------------------------------------------------------
todo_include_todos=True
todo_link_only=True
if pandoc_installed:
extensions.append("nbsphinx")
# -----------------------------------------------------------------------------
# sphinx.ext.intersphinx
# -----------------------------------------------------------------------------
intersphinx_mapping = {
"python" : (" https://doc.python.org/3/", None),
'docker' : ("https://docker-py.readthedocs.io/en/stable/", None),
'elastic' : ("https://elasticsearch-py.readthedocs.io/en/latest/", None),
}
# -----------------------------------------------------------------------------
# sphinx.ext.autodoc
# http://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_default_options
# -----------------------------------------------------------------------------
autodoc_member_order = "alphabetical"
autodoc_default_options = {"members": None, "show-inheritance": None}
autoclass_content = "class"
autodoc_warningiserror = True
# -----------------------------------------------------------------------------
# 'sphinx.ext.inheritance_diagram',
# -----------------------------------------------------------------------------
inheritance_graph_attrs = dict(rankdir="LR", size='""', fontsize=12, ratio="compress")
inheritance_node_attrs = dict(fontsize=12, style="filled")
# -----------------------------------------------------------------------------
# General information about the project.
# -----------------------------------------------------------------------------
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today_fmt = "%Y-%m-%dT%H:%M %Z"
source_suffix = ".rst"
# The extlinks extension simplifies referencing multiple links to a given URL,
# for example links to bug trackers, version control web interfaces, etc.
# For example, to link to a JIRA issue in the doc, use :issue:`123`, which
# would create a link to ISSUE-123
# See http://www.sphinx-doc.org/en/stable/ext/extlinks.html
extlinks = {
"issue": (
"https://github.com/lbrack/testspace-colab/issues/%s",
"ISSUE-"
)
}
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = [project + "."]
# ----------------------------------------------------------------------------
# Custom Theme Options
# ----------------------------------------------------------------------------
# The frontpage document.
index_doc = "index"
# The master toctree document.
master_doc = "index"
# Manages todo section
todo_include_todos = True
include_todos = True
# warning will be inserted in the final documentation
keep_warnings = True
# -- Options for HTML output --------------------------------------------------
html_theme_options = {
"canonical_url": "",
"logo_only": True,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": False,
"style_nav_header_background": "#2980B9",
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# sphinxcontrib.napoleon extension configuration
# see https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
# for details
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_favicon = os.path.join("_static", "icon.ico")
html_logo = os.path.join("_static", "logo.png")
html_title = project
html_last_updated_fmt = today_fmt
html_show_sphinx = False
html_show_copyright = True
html_last_updated_fmt = today_fmt
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = project + "-doc"
| null | docs/conf.py | conf.py | py | 6,113 | python | en | code | null | code-starcoder2 | 51 |
48534530 | # encoding: utf-8
from pptx.dml.color import RGBColor
from pptx.util import Pt
# Powerpoint output directory
OUTPUT_DIRECTORY = "./output"
# Powerpoint Background
BACKGROUND_COLOR = RGBColor(0x00, 0x00, 0x00) # black
# Powerpoint Bottom Banner
BANNER_COLOR = RGBColor(0xff, 0xff, 0xff) # white
BANNER_FONT_SIZE = Pt(20)
BANNER_FONT_TYPE = 'Calibri'
BANNER_HEIGHT = Pt(40)
# Church metadata
CHURCH_LOGO_PATH = './images/logo.png'
CHURCH_LOGO_HEIGHT = BANNER_HEIGHT
CHURCH_LOGO_WIDTH = BANNER_HEIGHT
CHURCH_NAME = u'高贵林国语教会'
# Lyrics
SONG_TITLE_PADDING = Pt(20) # padding-left
LYRICS_FONT_TYPE = "Calibri"
LYRICS_FONT_COLOR = RGBColor(0xff, 0xff, 0xff) # white
LYRICS_FONT_SIZE = Pt(30)
LYRICS_EMPTY_LINE_FONT_SIZE = Pt(20)
LYRICS_FONT_ISBOLD = True
| null | settings.py | settings.py | py | 770 | python | en | code | null | code-starcoder2 | 51 |
293335948 |
import threading
import redpitaya_scpi as scpi
import matplotlib.pyplot as plot
import csv
from peaks import calculate_peak
import numpy as np
rp_s = scpi.scpi('192.168.128.1')
def getData():
try:
threading.Timer(3, getData).start()
wave_form = 'sine'
freq = 10000
ampl = 2
rp_s.tx_txt('GEN:RST')
rp_s.tx_txt('SOUR1:FUNC ' + str(wave_form).upper())
rp_s.tx_txt('SOUR1:FREQ:FIX ' + str(freq))
rp_s.tx_txt('SOUR1:VOLT ' + str(ampl))
rp_s.tx_txt('SOUR1:BURS:NCYC 2')
rp_s.tx_txt('OUTPUT1:STATE ON')
rp_s.tx_txt('SOUR1:BURS:STAT ON')
rp_s.tx_txt('SOUR1:TRIG:SOUR EXT_PE')
rp_s.tx_txt('ACQ:DEC 64')
rp_s.tx_txt('ACQ:TRIG:LEVEL 100')
rp_s.tx_txt('ACQ:START')
rp_s.tx_txt('ACQ:TRIG EXT_PE')
rp_s.tx_txt('ACQ:TRIG:DLY 9000')
while 1:
rp_s.tx_txt('ACQ:TRIG:STAT?')
if rp_s.rx_txt() == 'TD':
break
rp_s.tx_txt('ACQ:SOUR1:DATA?')
buff_string = rp_s.rx_txt()
buff_string = buff_string.strip('{}\n\r').replace(" ", "").split(',')
buff = list(map(float, buff_string))
#peaks = calculate_peak(buff)
#test = peaks.get('peak_heights')
#row = {test.min(), test.max()}
writer = csv.writer(open("ml/car.csv", 'a'))
writer.writerow(buff)
plot.plot(buff)
plot.ylabel('Voltage')
plot.show()
except:
print("An exception occurred")
getData() | null | Resources/SL2_1276493_1277599/SL2_1276493_1277599/data_get_common_code.py | data_get_common_code.py | py | 1,523 | python | en | code | null | code-starcoder2 | 51 |
140433673 | #!/usr/bin/python3
#-*- coding: UTF8 -*-
##Programa para calcular Un circuito oscilador RF conectado a una antena.
##Copyright (c) 2010 Rafael Ortiz Johao Cuervo .
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
##THE SOFTWARE.
##Introduccion de parametros o variables
import sys, os
import math
import decimal #no se usa
import pygtk
import gtk
# This function will be called whenever you click on your button:
def click_handler(widget) :
# quit the application:
gtk.main_quit()
# Create the main window:
win = gtk.Window()
# Organize widgets in a vertical box:
vbox = gtk.VBox()
win.add(vbox)
# Create an area to draw in:
drawing_area = gtk.DrawingArea()
drawing_area.set_size_request(600, 400)
vbox.pack_start(drawing_area)
drawing_area.show()
# Make a pushbutton:
button = gtk.Button("Calcular")
# When it's clicked, call our handler:
button.connect("clicked", click_handler)
# Add it to the window:
vbox.pack_start(button)
button.show()
# Obey the window manager quit signal:
win.connect("destroy", gtk.main_quit)
vbox.show()
win.show()
gtk.main()
#inicia calculo
print ('Introduzca los parametros de la antena')
vars1 = float(raw_input("Introduzca S11: "))
vars5 = float(raw_input("Introduzca Angulo S11: "))
vars2 = float(raw_input("Introduzca S12: "))
vars6 = float(raw_input("Introduzca Angulo S12: "))
vars3 = float(raw_input("Introduca S21: "))
vars7 = float(raw_input("Introduca Angulo S21: "))
vars4 = float(raw_input("Introduca S22: "))
vars8 = float(raw_input("Introduca Angulo S22: "))
print ("Usted escogio |S11| ", vars1)
print ("Usted escogio |S12| ", vars2)
print ("Usted escogio |S21| ", vars3)
print ("Usted escogio |S22| ", vars4)
print ("Usted escogio Angulo S11 ", vars5)
print ("Usted escogio Angulo S12 ", vars6)
print ("Usted escogio Angulo S21 ", vars7)
print ("Usted escogio Angulo S22 ", vars8)
vars5rad = vars5*57.295779 #conversion a radianes.
vars6rad = vars6*57.295779
vars7rad = vars7*57.295779 #conversion a radianes.
vars8rad = vars8*57.295779
##Calculo de estabilidad
#DSc = S11*S22 − S12*S21 calculo DS complejo
DSCM1 = vars1*vars4 #calculo Magnitud DSC
DSCM2 = vars2*vars3 #calculo Magnitud DSC
DSCA1 = vars5rad + vars8rad #calculo Angulo
DSCA2 = vars6rad + vars7rad #calculo Angulo
#Conversion a rectangular para resta.
PR1= DSCM1*math.cos(DSCA1) #parte real1 raiZ!!!! x
PP1= DSCM1*math.sin(DSCA1) #parte polar1 y
PR2 = DSCM2*math.cos(DSCA2) #parte real2 x1
PP2 = DSCM2*math.sin(DSCA2) #parte polar2 y1
#calculo parametro DS complejo.
DSs1 = PR1 - PR2 #Resta Reales
DSs2 = PP1 - PP2 #Resta angulos
#Calculo de polar a rectangular
R1 = math.sqrt((DSs1*DSs1) + (DSs2*DSs2)) #parte real de la resta que equivale a DS
#k= 1 + (DS)2 − |S11 |2 − |S22 |2 / 2 * |S21 | * |S12 | Ecuacion de K.
K = ((1.0 + (R1*R1) - (vars1*vars1) - (vars4*vars4))/(2.0*vars3*vars2))
print ('Para estos valores la estabilidad K es igual a: %f'%K)
#MAG= 10LOG|VARS|+10LOG
MAG = (10*math.log10(vars3/vars5))+(10*math.log10(K-math.sqrt((K*K)-1)))
print ('Para estos valores la MAG es igual a: %f'%MAG)
if K > 1: #evaluacion de estabilidad o inestabilidad.
# print 'ciclo'
# C2 = S22 − (DS * S11 ) Evaluacion de factor C2
C2R = R1
# B2 = 1 + |S22 |2 − |S11 |2 − |DS |2 Evaluacion de factor B2
B2= (1.0 + (vars4*vars4) - (vars1*vars1) - (R1*R1))
print ("El coeficiente B2 es: %f"%B2)
#Magnitud coeficiente de reflexion hacia la carga |TL| = (B2 ± sqrt(B2 − 4|C2 |2))/(2*|C2|)
# TL1 = (B2 + math.sqrt((B2*B2) - 4*(R1*R1)))/(2*R1)
TL1 = (B2 + math.sqrt((B2*B2) - 4*(R1*R1)))/(2*R1)
#TS =
#Salida al Usuario
# print "Puede Utilizar alguno de estos dos valores para TL"
print ("El coeficiente de reflexion hacia la carga (1) es: %f"%TL1)
# print "El coeficiente de reflexion hacia la carga (2) es: %f"%TL2
#print "El coeficiente de reflexion hacia la fuente es: ", TS
#Input del arco
vars9 = float(raw_input("Introduzca ARCO AB: "))
vars10 = float(raw_input("Introduzca ARCO BC: "))
frec = float(raw_input("Introduzca Frecuencia de trabajo en MHZ: "))
#salida
C = vars9 / (2*math.pi*frec*50000000) #C para ocilacion. 50 es el coef de normalizacion
L = vars10*50 / (2*3.1415*frec*1000000) #L para la ocilacion
#Salida al usuario
print ("La capacitancia para su circuito es: %f pf"%C)
print ("La inductancia para su circuito es: %f nH"%L)
else: print ("Su configuracion no es estable pruebe con otros valores.")
| null | rf-antenna-design.py | rf-antenna-design.py | py | 5,763 | python | en | code | null | code-starcoder2 | 51 |
608104916 | import os
from telethon import TelegramClient, events
api_id = int(os.environ.get('api_id', 5000))
api_hash = str(os.environ.get('api_hash', 5000))
client = TelegramClient('anon', api_id, api_hash)
@client.on(events.NewMessage)
async def my_event_handler(event):
if 'привет' in event.raw_text:
await client.send_message('+79992007908', event.raw_text)
client.start()
client.run_until_disconnected()
| null | app.py | app.py | py | 423 | python | en | code | null | code-starcoder2 | 51 |
466238882 | '''
13.09.18
'''
c = 0
print('Celsius\tFahrenheit')
for c in range(21):
f = 9 / 5 * c + 32
print(format(c, '6.0f'), '\t', format(f, '6.0f'))
c += 1
| null | 4_Repetition_structures/PE6_Celcius_to_Fahrenheit.py | PE6_Celcius_to_Fahrenheit.py | py | 163 | python | en | code | null | code-starcoder2 | 50 |
271220805 | from PIL import Image, ImageEnhance
import cv2
import os
import random as r
import numpy as np
def read_files(data_dir, file_name={}):
image_name = os.path.join(data_dir, 'image', file_name['image'])
trimap_name = os.path.join(data_dir, 'trimap', file_name['trimap'])
image = cv2.imread(image_name)
trimap = cv2.imread(trimap_name)
return image, trimap
def random_scale_and_creat_patch(image):
# color random
if r.random() < 0.5:
image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
random_factor = np.random.randint(0, 31) / 10. # 随机因子
color_image = ImageEnhance.Color(
image).enhance(random_factor) # 调整图像的饱和度
random_factor = np.random.randint(10, 21) / 10. # 随机因子
brightness_image = ImageEnhance.Brightness(
color_image).enhance(random_factor) # 调整图像的亮度
random_factor = np.random.randint(10, 21) / 10. # 随机因1子
contrast_image = ImageEnhance.Contrast(
brightness_image).enhance(random_factor) # 调整图像对比度
random_factor = np.random.randint(0, 31) / 10. # 随机因子
image = ImageEnhance.Sharpness(contrast_image).enhance(random_factor)
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
return image
def rotate_bound(image, trimap):
# 获取图像的尺寸
# 旋转中心
angle = np.random.randint(-8, 8)
(h, w) = image.shape[:2]
(cx, cy) = (w/2, h/2)
# 设置旋转矩阵
M = cv2.getRotationMatrix2D((cx, cy), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# 计算图像旋转后的新边界
nW = abs(int((h*sin)-(w*cos)))
nH = abs(int((h*cos)-(w*sin)))
# 调整旋转矩阵的移动距离(t_{x}, t_{y})
M[0, 2] += (nW/2) - cx
M[1, 2] += (nH/2) - cy
image = cv2.warpAffine(image, M, (nW, nH))
trimap = cv2.warpAffine(trimap, M, (nW, nH))
return image, trimap
# image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# random_angle = np.random.randint(-10, 10)
# image = image.rotate(random_angle, Image.BICUBIC)
# image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
# return image
def main():
num = 1
while num < 9:
imagepath = "D:/m/data/new-data-single/image/h ("+str(num)+").jpg"
trimapath = "D:/m/data/new-data-single/alpha/h ("+str(num)+").png"
image = cv2.imread(imagepath)
trimap = cv2.imread(trimapath)
i = 0
while i < 20:
image_c = random_scale_and_creat_patch(image)
image_q, trimap_q = rotate_bound(image_c, trimap)
(h, w) = image_q.shape[:2]
image_r = cv2.resize(image_q, (600, int(h*600/w)),
interpolation=cv2.INTER_CUBIC)
(nh, nw) = image_r.shape[:2]
alpha = cv2.resize(
trimap_q, (nw, nh), interpolation=cv2.INTER_CUBIC)
cv2.imwrite(
"D:/m/data/new-data-single/image1/h ("+str(num)+")_"+str(i)+".jpg", image_r)
cv2.imwrite(
"D:/m/data/new-data-single/alpha1/h ("+str(num)+")_"+str(i)+".png", alpha)
i += 1
num += 1
if __name__ == "__main__":
main()
| null | ImageEnhance.py | ImageEnhance.py | py | 3,306 | python | en | code | null | code-starcoder2 | 50 |
563489826 | # 计算多天的检测率 表示检测的完备度
# 需要进行计算的日期 单独列为一个程序
import datetime
from detectStatistics import detect_result
def days(startdate,enddate):
datestart = datetime.datetime.strptime(startdate, '%Y/%m/%d')
dateend = datetime.datetime.strptime(enddate, '%Y/%m/%d')
dayslist = []
while datestart < dateend:
datestart += datetime.timedelta(days=1)
dayslist.append(datestart.strftime('%Y/%m/%d'))
# 不包括startdate 在结果中直接添加
dayslist.insert(0,startdate)
return dayslist
# 计算多次
startdate = '2020/12/21'
enddate = '2020/12/27'
# 总数 180970
num = 18097
day = days(startdate,enddate)
# 用字典保存 每日的检测率 {'2020/12/21': 0.0021550533237553187, '2020/12/22': 0.0022213626567939436,
detect_completion = {}
for i in range(len(day)):
# date 表示计算第i天的检测率 统计第一天到第i天的检测结果
dates = days(startdate,day[i])
result = detect_result(dates)
deteceted_rate = len(result)/num
detect_completion[day[i]] = deteceted_rate
# for key,value in detect_completion.items():
# print(type(key),value)
print(detect_completion)
# # 绘制完备度分布图
# # 横坐标为日期 纵坐标为检测率(百分比
# date = [key for key,value in detect_completion.items()]
# rate = [value for key,value in detect_completion.items()]
#
# import matplotlib.pyplot as plt
# from matplotlib.ticker import FuncFormatter
#
# plt.xlabel('date')
# plt.ylabel('completion')
# # 设置纵坐标为百分比
# # def to_percent(temp, position):
# # return '%1.0f'%(10*temp) + '%'
# # plt.gca().yaxis.set_major_formatter(FuncFormatter(to_percent))
#
# plt.plot(date,rate)
# plt.show() | null | detectedCompletion.py | detectedCompletion.py | py | 1,751 | python | en | code | null | code-starcoder2 | 50 |
191558967 | import random
import uuid
from django.db import models
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from .statements.ifs import STATEMENTS as IF_STATEMENTS
from .statements.thens import STATEMENTS as THEN_STATEMENTS
from .constants import GameState
from .exceptions import ValidationError
class Character(models.Model):
"""
Model that holds information to represent a character
A Character can join a game and become a Player
"""
guid = models.UUIDField(
default=uuid.uuid4, editable=False, help_text="GUID of this character"
)
user = models.ForeignKey(
User, on_delete=models.CASCADE, help_text="The user this character belongs to"
)
exp = models.IntegerField(default=1, help_text="Total experience of this player")
level = models.IntegerField(default=1, help_text="Level of this player")
hp = models.IntegerField(default=20, help_text="Total health points of this player")
talent_points = models.IntegerField(
default=20, help_text="Total available talent points of this player"
)
def consume_exp(self, exp):
"""
Consumes given additional experience and level up if conditions are met
"""
self.exp = self.exp + exp
self.save()
if False: # figure out exp/level model
self.level_up()
def level_up(self):
"""
Update the stats of this player according to current level:
level +1 for every level
hp +1 for every level
talent_points +1 for every 2 levels
"""
self.level = self.level + 1
self.hp = self.hp + 1
if self.level % 2 == 0:
self.talent_points = self.talent_points + 1
self.save()
class PlayerManager(models.Manager):
"""
Manager method provides helper method for model Player
"""
def create_player(self):
"""
Create a new player with random stats
"""
total_points = 30 # Set to 30 for now
attack = random.randint(0, total_points)
total_points = total_points - attack
defense = random.randint(0, total_points)
total_points = total_points - defense
agility = total_points
hp = 30 # Set to 30 for now
return self.get_queryset().create(
attack=attack, defense=defense, agility=agility, hp=hp
)
class Player(models.Model):
"""
Model that holds information to represent a player
A Player only last through one game
"""
guid = models.UUIDField(
default=uuid.uuid4, editable=False, help_text="GUID of this player"
)
attack = models.IntegerField(
blank=False, null=False, help_text="Attack point of this player"
)
defense = models.IntegerField(
blank=False, null=False, help_text="Defense point of this player"
)
agility = models.IntegerField(
blank=False, null=False, help_text="Agility point of this player"
)
hp = models.IntegerField(
blank=False, null=False, help_text="Current health point of this player"
)
objects = PlayerManager()
def __str__(self):
"""
String representation of this player
"""
return (
f"HP: {self.hp} | Attack: {self.attack} | "
f"Defense: {self.defense} | Agility: {self.agility}"
)
def save(self, force=False, *args, **kwargs):
"""
Set this model to be immutable - we never want the stats to be accidentally modified
"""
if self.pk and not force:
raise ValidationError("Son, just don't")
super().save(*args, **kwargs)
@property
def is_dead(self):
"""
Property that returns whether this player is dead
"""
return self.hp <= 0
class Game(models.Model):
"""
Model holds information of a game
"""
guid = models.UUIDField(
default=uuid.uuid4, editable=False, help_text="GUID of this game"
)
player1 = models.ForeignKey(
Player,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="Player 1 of this game",
)
player1_user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="The user player1 represents",
)
player2 = models.ForeignKey(
Player,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="Player 2 of this game",
)
player2_user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="The user player2 represents",
)
winner = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="won_games",
help_text="Winner of the game",
)
loser = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="lost_games",
help_text="Loser of the game",
)
is_draw = models.BooleanField(default=False)
def __str__(self):
"""
String representation of this game
"""
if self.is_draw:
return "Game ended. Draw game"
if self.winner:
return f"Game ended. Winner: {self.winner} | Loser: {self.loser}"
return "Game is still going"
@property
def state(self):
"""
Get the current state of this game
"""
if self.player1_user is None and self.player2_user is None:
return GameState.EMPTY_GAME.value
if self.player1_user is None or self.player2_user is None:
return GameState.HALF_JOINED_GAME.value
# Now we know all players are joined
if self.player1 is None and self.player2 is None:
return GameState.SETUP_GAME.value
if self.player1 is None or self.player2 is None:
return GameState.HALF_SETUP_GAME.value
if self.winner:
return GameState.END_GAME.value
if self.is_draw:
return GameState.DRAW_GAME.value
last_move = self.get_last_move()
if last_move is None or last_move.is_complete:
return GameState.GENERATE_MOVE.value
if last_move.if_statement == "" and last_move.then_statement == "":
return GameState.WAITING_FOR_TWO_MOVES.value
if last_move.if_statement == "" or last_move.then_statement == "":
return GameState.WAITING_FOR_ONE_MOVE.value
def get_last_move(self):
"""
Get latest move on the given game
"""
return self.move_set.order_by("id").last()
def join(self, user):
"""
Join the given user to this game if that's possible
ValidationError will be thrown if not
"""
game_state = self.state
if not (
game_state == GameState.EMPTY_GAME.value
or game_state == GameState.HALF_JOINED_GAME.value
):
raise ValidationError(message="The game cannot be joined right now")
if not user.is_authenticated:
raise ValidationError(message="You must sign in in order to join the game")
if self.player1_user == user or self.player2_user == user:
raise ValidationError(message="You have already joined this game")
if self.player1_user is None:
self.player1_user = user
return self.save()
if self.player1_user is not None and self.player2_user is None:
self.player2_user = user
self.save()
return self.setup(user)
raise ValidationError(message="You cannot join since the game is full")
def setup(self, user):
"""
Allow user to distribute stats on their own
FOR NOW, SKIPPING THAT AND PRESET THE SAME STATS FOR BOTH PLAYERS
"""
self.player1 = Player.objects.create_player()
self.player2 = Player.objects.create_player()
self.save()
self.generate_empty_move()
def generate_empty_move(self):
if self.state != GameState.GENERATE_MOVE.value:
raise ValidationError("A move cannot be generated right now")
self.move_set.create(
if_user=self.player1_user,
if_statement_options=",".join(random.sample(list(IF_STATEMENTS.keys()), 3)),
then_user=self.player2_user,
then_statement_options=",".join(
random.sample(list(THEN_STATEMENTS.keys()), 3)
),
)
def make_move(self, user, statement_id):
game_state = self.state
if not (
game_state == GameState.WAITING_FOR_ONE_MOVE.value
or game_state == GameState.WAITING_FOR_TWO_MOVES.value
):
raise ValidationError("A move cannot be made right now")
last_move = self.get_last_move()
if last_move.if_user == user:
if last_move.if_statement == "":
if statement_id in last_move.if_statement_options:
last_move.if_statement = statement_id
last_move.save()
else:
raise ValidationError(
"You can only choose a move from available moves"
)
else:
raise ValidationError("You cannot retract your previous choice")
elif last_move.then_user == user:
if last_move.then_statement == "":
if statement_id in last_move.then_statement_options:
last_move.then_statement = statement_id
last_move.save()
else:
raise ValidationError(
"You can only choose a move from available moves"
)
else:
raise ValidationError("you cannot retract your previous choice")
last_move.refresh_from_db()
if last_move.is_complete:
self.play()
def play_single_move(self, move):
"""
Evaluate a single move on the current game
Move is passed in as a parameter so you can potentially apply any arbitrary move to a game
just to see the outcome
"""
if move.is_complete:
move.evaluate()
else:
raise ValidationError("You are playing a move that's not completed")
def play(self):
"""
TRY to apply given move on the game and see what happens
self.player1 and self.player2 will be manipulated in memory to render the result,
but they will not be written into the database
"""
if self.winner or self.loser or self.is_draw:
raise ValidationError("This game is already over")
self.refresh_from_db()
for move in self.move_set.order_by("id"):
try:
self.play_single_move(move)
except ValidationError:
break
if self.player1.is_dead or self.player2.is_dead:
if self.player1.is_dead and self.player2.is_dead:
self.is_draw = True
if self.player1.is_dead:
self.winner = self.player2_user
self.loser = self.player1_user
if self.player2.is_dead:
self.winner = self.player1_user
self.loser = self.player2_user
return self.save()
self.generate_empty_move()
class Move(models.Model):
"""
Model holds information of a move
"""
guid = models.UUIDField(
default=uuid.uuid4, editable=False, help_text="GUID of this move"
)
game = models.ForeignKey(
Game, on_delete=models.CASCADE, help_text="The game that this move happens in"
)
if_user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="The user who chooses the if statement",
)
if_statement_options = models.CharField(
max_length=255, help_text="Available if statement ids"
)
if_statement = models.CharField(max_length=255, help_text="Executable if statement")
then_user = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
help_text="The user who chooses the then statement",
)
then_statement_options = models.CharField(
max_length=255, help_text="Available then statement ids"
)
then_statement = models.CharField(
max_length=255, help_text="Executable then statement"
)
def __str__(self):
"""
String representation of this move
"""
return f"If {self.if_statement}, then {self.then_statement}"
@property
def is_complete(self):
"""
Return true if both if_statement and then_statement are filled in
"""
return self.if_statement != "" and self.then_statement != ""
def evaluate(self):
"""
Evaluate the condition and event statements and update the stats of each player
if_statement examples:
"operating_player.attack > opponent_player.attack" (Player who has higher attack)
"operating_player.hp > 30" (Player whose hp is higher than 30)
then_statement examples:
"operating_player.attack = operating_player.attack + 5" (Will increase attack by 5)
"operating_player.hp = operating_player.hp - opponent_player.attack"
(Will lose hp by the attack of the other player)
"""
player1 = self.game.player1
player2 = self.game.player2
if IF_STATEMENTS[self.if_statement].evaluate(
operating_player=player1, opponent_player=player2
):
# if (self.game.player1.attack > self.game.player2.attack)
THEN_STATEMENTS[self.then_statement].execute(
operating_player=player1, opponent_player=player2
)
# self.game.player1.hp = self.game.player1.hp - self.game.player2.attack
# Switch the order and execute again to make sure we update the right player
if IF_STATEMENTS[self.if_statement].evaluate(
operating_player=player2, opponent_player=player1
):
THEN_STATEMENTS[self.then_statement].execute(
operating_player=player2, opponent_player=player1
)
| null | ifthen/models.py | models.py | py | 14,677 | python | en | code | null | code-starcoder2 | 50 |
547059420 | #Initilize variables for the PCEC model
#as of 30Nov20 most of these variables are hard coded and need to be found
#The solution vector is initialized here
#Everything here is saved into a pointer class
#\/\/^\/^\/\/^\ Imports /\/\/^\/^\/\/^\#
import numpy as np
import math
#\/\/^\/^\/\/^\ Parameters /\/\/^\/^\/\/^\#
"Importnat Variables For the SV to work"
i_ext = 40 #A
C_dl_neg = 6e5 # F/m2 this makes it so my function does not go to negative infinity
C_dl_pos = 2e2 # F/m2 (need to look up)
t_final = 1000 #seconds
"n_values:"
n_neg_p = -1
n_neg_o = -2
n_pos_p = 2
n_pos_o = 2
"Potentials (I will place in more accurate numbers later) The anode is the reference"
phi_neg_0 = 0 #this will by my reference electrode
phi_elyte_0 = 0.5 # I will calculate more accurately later
phi_pos_0 = 1.05 # I will calculate more accurately later
dphi_int_neg_0 = phi_elyte_0-phi_neg_0 #Sets the applied potential on the cell\
dphi_int_pos_0 = phi_pos_0-phi_elyte_0
"beta values (need to also look up)"
beta_o = 0.5
beta_p = 0.5
"Physical Constants:"
F = 96485 #C/mol e-
R = 8.314 #J/mol*K
"Equation values"
T = 823 #K
"Chemical parameters: (For these I just used yours, not sure where/how to find them) (I also kept hte positrode and negatrode values the same for now)"
#Negatrode ORR
k_fwd_star_neg_o = 4.16307062e+1 # Chemical forward rate constant, m^4/mol^2/s
k_rev_star_neg_o = 4.0650045e-1 #Chemical reverse rate constant, m^4/mol^2/s
#Negatrode HER also neeed to look these up, but im assuming they are much faster than the oxide ones
k_fwd_star_neg_p = 4.16307062e+3 # Chemical forward rate constant, m^4/mol^2/s
k_rev_star_neg_p = 4.0650045e+1 #Chemical reverse rate constant, m^4/mol^2/s
#Positrode OER
k_fwd_star_pos_o = 4.16307062e+1 # Chemical forward rate constant, m^4/mol^2/s
k_rev_star_pos_o = 4.0650045e-1 #Chemical reverse rate constant, m^4/mol^2/s
#Positrode HRR also neeed to look these up, but im assuming they are much faster than the oxide ones
k_fwd_star_pos_p = 4.16307062e+3 # Chemical forward rate constant, m^4/mol^2/s
k_rev_star_pos_p = 4.0650045e+1 #Chemical reverse rate constant, m^4/mol^2/s
"Material Parameters"
#BCZYYb4411 parameters:
ele_cond = 0.001 #1/(ohm*m) Need to look up this value so I just used yours
C_elyte = 46050 # Total (reference) elyte concentration, mol/m2 (I will calculate this at a later point)
D_k = np.array([7.46*10**-11,1.28*10**-12,0]) #(m^2/s) [Proton,Oxygen,Vacancy] Again I need to look these up so I used yours
#Nickle parameters:
C_Ni_s = 2.6e-05 #Surface site Concentrations mol/m^2 (again this is just from hw4)
#BCFZY parameters:
C_BCFZY = 46000 #mol/m^2 surface site concentration, I will look this up (If it is not known I will estimate it) likely it is similar to the elyte
"Concentrations/activities: I need to look these up so I used yours from HW4."
#-----Negatrode:
#Mol fractions (no units)
X_H_Ni = 0.6 #HW4
X_H2O_Ni = 0.2 #HW4
X_vac_Ni = 0.2 #HW4
X_Ox_elyte = 0.8 #I know this is 0.8
X_Hx_elyte = 0.1 #I am unsure of the split between Hx and oxygen vacancies
X_vac_elyte = 0.1
#-----Positrode:
#Mol fractions (no units) #I made these up, all I know is that 80% of the lattice sites:
X_Hx_BF = 0.05
X_H2O_BF = 0.05
X_vac_BF = 0.05
X_O_BF = 0.05
X_Ox_BF = 0.8
"geometric parameters"
n_brugg = -0.5 #bruggman factor assuming alpha is -1.5
#anode
eps_Ni = 0.159 #see calculations
eps_elyte_neg = 0.191 #See Calculations
eps_gas_neg = 1-eps_Ni-eps_elyte_neg
d_Ni_neg = 1*10**-5 #(m)rough estimate from SEM images (average diameter of Ni in negatrode)
d_elyte_neg = 5*10**-6 #(m) rough estimate from SEM images (average diameter of BCZYYb in negatrode)
d_part_avg = (d_Ni_neg+d_elyte_neg)/2 #just taking a linear average of the two particle sizes
r_int = 2*10**-6 #(m) rough estimate from SEM images, interface region between particles, on SEM images it looks almost like the radius
#Cathode
d_BCFZY = 500*10**-9 #(m) rough estimate from SEM images
eps_BCFZY = 0.5 #just assuming 50% porosity need to look up this value could measure more accurately
eps_gas_pos = 1-eps_BCFZY
"Thermodynamic values (first 5 taken from homework 4, last one I had to make up)"
g_H_Ni_o = -7.109209e+07 # standard-state gibbs energy for H adsorbed on Ni surface (J/kmol)
g_H2O_Ni_o = -3.97403035e+08 # standard-state gibbs energy for H2O adsorbed on Ni surface (J/kmol)
g_Vac_Ni_o = 0.0 # standard-state gibbs energy for Ni surface vacancy (J/kmol)
g_Vac_elyte_o = 0.0 # standard-state gibbs energy for electrolyte oxide vacancy (J/kmol)
g_Ox_elyte_o = -2.1392135e+08 # standard-state gibbs energy for electrolyte oxide O2- (J/kmol)
g_Hx_elyte_o = -2.1392135e+07 # standard-state gibbs energy for electrolyte protons H+ (J/kmol)
"Stoichiometric values:"
#negatrode proton reaction:
nu_H_Ni_neg_p = -1
nu_vac_ely_neg_p = -1
nu_Hx_ely_neg_p = 1
nu_vac_Ni_neg_p = 1
#negatrode oxide reaction:
nu_H_Ni_neg_o = -2
nu_H2O_Ni_neg_o = 1
nu_vac_Ni_neg_o = 1
nu_vac_elyte_neg_o = 1
nu_Ox_elyte_neg_o = -1
#postirode proton reaction:
nu_Hx_BF_pos_p = -2
nu_O_BF_pos_p = -1
nu_H2O_BF_pos_p = 1
nu_vac_BF_pos_p = 1
#positrode oxide reaction:
nu_O_BF_pos_o = -1
nu_Ox_BF_pos_o = 1
nu_vac_BF_pos_o = 1
#/\/\/\/\/\ Initializing Solution Vector /\/\/\/\/\
SV_0 = np.array([dphi_int_neg_0 ,dphi_int_pos_0 ])
#/\/\/\/\/\ Making the parameter class /\/\/\/\/\
class pars:
#important parameters
i_ext = i_ext
C_dl_neg = C_dl_neg
C_dl_pos = C_dl_pos
time_span = np.array([0,t_final])
T = T
#beta values
beta_o = 0.5
beta_p = 0.5
#Interface Potentials
dphi_int_neg_0 = phi_elyte_0-phi_neg_0 #Sets the applied potential on the cell\
dphi_int_pos_0 = phi_pos_0-phi_elyte_0
"Chemical parameters: (For these I just used yours, not sure where/how to find them) (I also kept hte positrode and negatrode values the same for now)"
k_fwd_star_neg_o = k_fwd_star_neg_o
k_rev_star_neg_o = k_rev_star_neg_o
#Negatrode HER
k_fwd_star_neg_p = k_fwd_star_neg_p
k_rev_star_neg_p = k_rev_star_neg_p
#Positrode OER
k_fwd_star_pos_o = k_fwd_star_pos_o
k_rev_star_pos_o = k_rev_star_pos_o
#Positrode HRR
k_fwd_star_pos_p = k_fwd_star_pos_p
k_rev_star_pos_p = k_rev_star_pos_p
"Material Parameters"
#BCZYYb4411 parameters:
ele_cond = ele_cond
C_elyte = C_elyte
D_k = D_k
#Nickle parameters:
C_Ni_s = C_Ni_s
#BCFZY parameters:
C_BCFZY = C_BCFZY
"Activity concentrations"
#Negatrode Activity Concentrations: (mol/m^2)
C_H_Ni = X_H_Ni*C_Ni_s
C_H2O_Ni = X_H2O_Ni*C_Ni_s
C_vac_Ni = X_vac_Ni*C_Ni_s
C_Hx_elyte = X_Hx_elyte*C_elyte
C_Ox_elyte = X_Ox_elyte*C_elyte
C_vac_elyte = X_vac_elyte*C_elyte
#Positrode Activity Concentrations: (mol/m^2)
C_Hx_BF = X_Hx_BF*C_BCFZY
C_H2O_BF = X_H2O_BF*C_BCFZY
C_vac_BF = X_vac_BF*C_BCFZY
C_O_BF = X_O_BF*C_BCFZY
C_Ox_BF = X_Ox_elyte*C_BCFZY
"Geometric parameters"
#Anode Geometric Parameters
L_TPB = 2*math.pi*r_int
A_surf_Ni_neg = 4*math.pi*(d_Ni_neg/2)**2
A_surf_elyte_neg = 4*math.pi*(d_elyte_neg/2)**2
tau_fac_neg = eps_gas_neg**n_brugg #tortuosity factor
Kg_neg = (eps_gas_neg**3*d_part_avg**2)/(72*tau_fac_neg*(1-eps_gas_neg)**2) #gas permeability, see calculations for more details
#Cathode Geometric Parameters
A_surf_BCFZY = 4*math.pi*(d_BCFZY/2)**2
tau_fac_pos = eps_gas_pos**n_brugg
Kg_pos = (eps_gas_pos**3*d_part_avg**2)/(72*tau_fac_pos*(1-eps_gas_neg)**2)
"Negatrode Product calculations" #Calculates the product terms in the mass action equations
prod_fwd_neg_o = C_Ox_elyte**-nu_Ox_elyte_neg_o * C_H_Ni**-nu_H_Ni_neg_o #- signs are needed to cancel out the sign convention of the stoichiometric coefficients
prod_fwd_neg_p = C_H_Ni**-nu_H_Ni_neg_p * C_vac_Ni**-nu_vac_ely_neg_p
prod_rev_neg_o = C_vac_elyte**nu_vac_elyte_neg_o * C_H2O_Ni**nu_H2O_Ni_neg_o * C_vac_Ni**nu_vac_Ni_neg_o
prod_rev_neg_p = C_Hx_elyte**nu_Hx_ely_neg_p * C_vac_Ni**nu_vac_Ni_neg_p
"Positrode Product calculations" #Calculates the product terms in the mass action equations
prod_fwd_pos_o = C_O_BF**-nu_O_BF_pos_o #- signs are needed to cancel out the sign convention of the stoichiometric coefficients
prod_fwd_pos_p = C_Hx_BF**-nu_Hx_BF_pos_p * C_O_BF**-nu_O_BF_pos_p
prod_rev_pos_o = C_Ox_BF**nu_Ox_BF_pos_o
prod_rev_pos_p = C_H2O_BF**nu_H2O_BF_pos_p * C_vac_BF**nu_vac_BF_pos_p
#/\/\/\/\/\ Making the pointer class/\/\/\/\/\
#specifies where in SV certain variables are stored
class ptr:
dphi_int_neg_0 = 0
# C_k in anode GDL: starts just after phi_int_net_0
dphi_int_pos_0 = np.arange(dphi_int_neg_0+1)
| null | Meisel/pcec_params.py | pcec_params.py | py | 8,694 | python | en | code | null | code-starcoder2 | 50 |
291580210 | import os
import pickle
import pandas as pd
from app.models import Gallica, Wiki, Tags, Person
NOTEBOOK_DATA_PATH = os.path.join(os.path.abspath(os.path.join(__file__, '../../../../')),
'notebooks/data')
IMAGES_FOLDER = os.path.join(os.path.abspath(os.path.join(__file__, '../../static/')), 'img_full')
def get_avg_text(x, y):
x = x if pd.notnull(x) else ''
y = y if pd.notnull(y) else ''
return len(x+y) / 2
def get_wiki_text(x, y):
if pd.notnull(x):
return x
elif pd.notnull(y):
return y
return ''
def run():
gallica_metadata = pd.read_pickle(os.path.join(NOTEBOOK_DATA_PATH, 'raw_df.pkl'))
image_dataframe = pd.DataFrame(gallica_metadata['dc:identifier']
.map(lambda x: x[0] if type(x) == list else x))\
.rename(columns={'dc:identifier':'identifier'})
gallica_metadata['gallica_url'] = image_dataframe
gallica_metadata = gallica_metadata.rename(columns={'dc:date': 'date'})
images_folder = [x.split('.')[0] for x in os.listdir(IMAGES_FOLDER)]
pickle_in = open(os.path.join(NOTEBOOK_DATA_PATH, 'merged_dataframe.pkl'), 'rb')
merged_dataframe = pickle.load(pickle_in)
merged_dataframe = merged_dataframe.rename(columns={'weight': 'n_images_wiki',
'url_fr': 'wiki_fr_link',
'id': 'gallica_url'})
merged_dataframe['gallica_identifier'] = merged_dataframe.gallica_url.apply(lambda x: x.split('/')[-1] if pd.notnull(x) else '')
merged_dataframe['summary_size'] = merged_dataframe.apply(lambda x: get_avg_text(x['wiki_en_text'], x['wiki_fr_text']), axis=1)
merged_dataframe['wiki_text'] = merged_dataframe.apply(lambda x: get_wiki_text(x['wiki_en_text'], x['wiki_fr_text']), axis=1)
ids_gallica = merged_dataframe.explode('id_list')[['name', 'id_list']].rename(columns={'id_list': 'gallica_url'})
ids_gallica['gallica_id'] = ids_gallica['gallica_url'].apply(lambda x: x.split('/')[-1] if pd.notnull(x) else '')
ids_gallica = ids_gallica[ids_gallica['gallica_id']!='']
ids_gallica = ids_gallica[ids_gallica.gallica_id.apply(lambda x: x in images_folder)]
ids_gallica = gallica_metadata.merge(ids_gallica)[['gallica_url', 'gallica_id', 'date', 'name']]
tags = merged_dataframe.explode('tags')[['name', 'tags']].rename(columns={'tags': 'tag'})
persons = list(merged_dataframe[['name',
'gallica_url',
'bnf_link',
'note',
'gender',
'gender_estimate',
'age_estimate',
'summary_size',
'n_images_wiki',
'gallica_identifier']].T.to_dict().values())
for person in persons:
name = person['name']
p = Person(**person)
p.save()
wiki = list(merged_dataframe[merged_dataframe.name == name][['wiki_en_link', 'wiki_fr_link',
'wiki_en_text', 'wiki_fr_text', 'wiki_text',
'summary_size', 'n_images_wiki']].T.to_dict().values())[0]
wiki['person'] = p
w = Wiki(**wiki)
w.save()
this_gallicas = ids_gallica[ids_gallica.name == name]
this_gallicas = list(this_gallicas[['gallica_url', 'gallica_id', 'date']]
.T.to_dict().values())
this_tags = tags[tags.name == name]
this_tags = [{'tag': t} for t in this_tags[['tag']].values.reshape(len(this_tags, ))]
for gallica in this_gallicas:
gallica['person'] = p
g = Gallica(**gallica)
g.save()
for tag in this_tags:
tag['person'] = p
t = Tags(**tag)
t.save() | null | humans_of_paris/app/scripts/populate_db.py | populate_db.py | py | 4,081 | python | en | code | null | code-starcoder2 | 50 |
341244024 | # -*- coding: utf-8 -*-
"""
Copyright 2018 NAVER Corp.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import nsml
from nsml import DATASET_PATH, HAS_DATASET, IS_ON_NSML
from dataset import KinQueryDataset, preprocess
# DONOTCHANGE: They are reserved for nsml
# This is for nsml leaderboard
def bind_model(sess, config):
# 학습한 모델을 저장하는 함수입니다.
def save(dir_name, *args):
# directory
os.makedirs(dir_name, exist_ok=True)
saver = tf.train.Saver()
saver.save(sess, os.path.join(dir_name, 'model'))
# 저장한 모델을 불러올 수 있는 함수입니다.
def load(dir_name, *args):
saver = tf.train.Saver()
# find checkpoint
ckpt = tf.train.get_checkpoint_state(dir_name)
if ckpt and ckpt.model_checkpoint_path:
checkpoint = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(dir_name, checkpoint))
else:
raise NotImplemented('No checkpoint!')
print('Model loaded')
def infer(raw_data, **kwargs):
left_preprocessed_data, right_preprocessed_data = preprocess(raw_data, config.strmaxlen)
# 저장한 모델에 입력값을 넣고 prediction 결과를 리턴받습니다
pred = sess.run(output, feed_dict={x_1: left_preprocessed_data, x_2: right_preprocessed_data})
clipped = np.array(pred > config.threshold, dtype=np.int)
return list(zip(pred.flatten(), clipped.flatten()))
# DONOTCHANGE: They are reserved for nsml
# nsml에서 지정한 함수에 접근할 수 있도록 하는 함수입니다.
nsml.bind(save=save, load=load, infer=infer)
def _batch_loader(iterable, n=1):
"""
데이터를 배치 사이즈만큼 잘라서 보내주는 함수입니다. PyTorch의 DataLoader와 같은 역할을 합니다
:param iterable: 데이터 list, 혹은 다른 포맷
:param n: 배치 사이즈
:return:
"""
length = len(iterable)
for n_idx in range(0, length, n):
yield iterable.left_data[n_idx:min(n_idx + n, length)],\
iterable.right_data[n_idx:min(n_idx + n , length)], iterable.labels[n_idx:min(n_idx + n , length)]
# Don't use
def conv_block(k_size=5, h_size=64, input=None):
# Layer1 64 * 32 * 64 with 2 layer
Weight = tf.Variable(tf.random_normal([k_size, k_size, 1, h_size], stddev=0.01))
Layer = tf.nn.conv2d(input, Weight, strides=[1, 1, 1, 1], padding="SAME")
Layer = tf.nn.relu(Layer)
Layer = tf.layers.batch_normalization(Layer)
Layer = tf.nn.max_pool(Layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
return Layer
def inception2d(inputs, input_channel, channel_size):
# bias = tf.Variable(tf.constant(0.1, shape=[channel_size]))
first_weight = tf.Variable(tf.truncated_normal([1, 1, input_channel, channel_size]))
first_layer = tf.nn.conv2d(inputs, first_weight, strides=[1, 1, 1, 1], padding="SAME")
second_weight = tf.Variable(tf.truncated_normal([3, 3, input_channel, channel_size]))
second_layer = tf.nn.conv2d(inputs, second_weight, strides=[1, 1, 1, 1], padding="SAME")
third_weight = tf.Variable(tf.truncated_normal([5, 5, input_channel, channel_size]))
third_layer = tf.nn.conv2d(inputs, third_weight, strides=[1, 1, 1, 1], padding="SAME")
# pooling = tf.nn.avg_pool(inputs, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
outputs = tf.concat([first_layer, second_layer, third_layer], axis=3)
# outputs = tf.nn.bias_add(outputs, bias)
outputs = tf.nn.relu(outputs)
return outputs
def model(embedded):
# ====================== Conv Block 64, 128, 256, 512 =======================
layer1 = inception2d(embedded, 1, 8)
layer1 = tf.layers.batch_normalization(layer1)
layer1 = tf.nn.max_pool(layer1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
layer2 = inception2d(layer1, 24, 32)
layer2 = tf.layers.batch_normalization(layer2)
layer2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
layer3 = inception2d(layer2, 96, 128)
layer3 = tf.layers.batch_normalization(layer3)
layer3 = tf.nn.max_pool(layer3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
layer4 = inception2d(layer3, 384, 512)
layer4 = tf.layers.batch_normalization(layer4)
layer4 = tf.nn.max_pool(layer4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
layer5 = inception2d(layer4, 1536, 2048)
layer5 = tf.layers.batch_normalization(layer5)
# =============== FC Layer 1 ===================
weight6 = tf.Variable(tf.random_normal([4 * 1 * 512 * 12, 1024], stddev=0.01))
fc_layer6 = tf.reshape(layer5, [-1, 4 * 1 * 512 * 12])
fc_layer6 = tf.matmul(fc_layer6, weight6)
fc_layer6 = tf.nn.relu(fc_layer6)
return fc_layer6
if __name__ == '__main__':
args = argparse.ArgumentParser()
# DONOTCHANGE: They are reserved for nsml
args.add_argument('--mode', type=str, default='train')
args.add_argument('--pause', type=int, default=0)
args.add_argument('--iteration', type=str, default='0')
# User options
args.add_argument('--output', type=int, default=1)
args.add_argument('--epochs', type=int, default=10)
args.add_argument('--batch', type=int, default=2048)
args.add_argument('--strmaxlen', type=int, default=64)
args.add_argument('--embedding', type=int, default=16)
args.add_argument('--threshold', type=float, default=0.5)
config = args.parse_args()
if not HAS_DATASET and not IS_ON_NSML: # It is not running on nsml
DATASET_PATH = '../sample_data/kin/'
# 모델의 specification
input_size = config.embedding*config.strmaxlen # 32 * 256
learning_rate = 0.000001
character_size = 256
strmaxlen = config.strmaxlen
embedding = config.embedding
x_1 = tf.placeholder(tf.int32, [None, strmaxlen])
x_2 = tf.placeholder(tf.int32, [None, strmaxlen])
y_ = tf.placeholder(tf.float32, [None, 1])
# embedding..
char_embedding = tf.get_variable('char_embedding', [character_size, embedding, 1])
embedded_1 = tf.nn.embedding_lookup(char_embedding, x_1)
embedded_2 = tf.nn.embedding_lookup(char_embedding, x_2)
# create different models..
model_1 = model(embedded=embedded_1) # 1024 * 1
model_2 = model(embedded=embedded_2) # 1024 * 1
# Concatenate 2 model
Weight7 = tf.Variable(tf.random_normal([1024 * 2, 2048], stddev=0.01))
FC1 = tf.matmul(tf.concat([model_1, model_2], 1), Weight7)
FC1 = tf.nn.relu(FC1)
Weight8 = tf.Variable(tf.random_normal([2048, 1024], stddev=0.01))
FC2 = tf.matmul(FC1, Weight8)
FC2 = tf.nn.relu(FC2)
Weight9 = tf.Variable(tf.random_normal([1024, 1], stddev=0.01))
output = tf.matmul(FC2, Weight9)
# output_sigmoid = tf.nn.sigmoid(output)
# loss와 optimizer
binary_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_, logits=output))
# rmse = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_, output_sigmoid))))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(binary_cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# DONOTCHANGE: Reserved for nsml
bind_model(sess=sess, config=config)
# DONOTCHANGE: Reserved for nsml
if config.pause:
nsml.paused(scope=locals())
if config.mode == 'train':
# 데이터를 로드합니다.
dataset = KinQueryDataset(DATASET_PATH, config.strmaxlen)
dataset_len = len(dataset)
one_batch_size = dataset_len//config.batch
if dataset_len % config.batch != 0:
one_batch_size += 1
# epoch마다 학습을 수행합니다.
for epoch in range(config.epochs):
avg_loss = 0.0
for i, (left_data, right_data, labels) in enumerate(_batch_loader(dataset, config.batch)):
_, loss = sess.run([train_step, binary_cross_entropy], feed_dict={x_1: left_data, x_2: right_data, y_: labels})
# _, right_loss = sess.run([train_step, rmse], feed_dict={x: left_data, y_: labels})
loss = float(loss)
print('Batch : ', i + 1, '/', one_batch_size, ', RMSE in this minibatch: ', loss)
avg_loss += loss
print('epoch:', epoch, ' train_loss:', avg_loss/one_batch_size)
nsml.report(summary=True, scope=locals(), epoch=epoch, epoch_total=config.epochs,
train__loss=avg_loss/one_batch_size, step=epoch)
# DONOTCHANGE (You can decide how often you want to save the model)
nsml.save(epoch)
# 로컬 테스트 모드일때 사용합니다
# 결과가 아래와 같이 나온다면, nsml submit을 통해서 제출할 수 있습니다.
# [(0.3, 0), (0.7, 1), ... ]
elif config.mode == 'test_local':
with open(os.path.join(DATASET_PATH, 'train/train_data'), 'rt', encoding='utf-8') as f:
queries = f.readlines()
res = []
for batch in _batch_loader(queries, config.batch):
temp_res = nsml.infer(batch)
res += temp_res
print(res) | null | model/model_test_3.py | model_test_3.py | py | 10,282 | python | en | code | null | code-starcoder2 | 51 |
433483036 | print("3つの整数を入力して下さい")
a = int(input("整数a:"))
b = int(input("整数b:"))
c = int(input("整数c:"))
if a == b == c:
print("3つの値は正しいです")
elif a == b or b == c or a == c:
print("2つの値が等しいです")
else:
print("3つの値は異ります") | null | ex/ex3-10.py | ex3-10.py | py | 315 | python | en | code | null | code-starcoder2 | 51 |
66620603 | import requests
import multiprocessing as mp
def getproxies():
url = "https://api.proxyscrape.com/?request=getproxies&proxytype=socks5&timeout=10000&country=all&uptime=0"
r = requests.get(url)
with open("unchecked.txt", "w") as f :
f.write(r.text)
def checkproxies():
with open("unchecked.txt", "r") as f:
print("Checking Proxies...")
proxies = f.readlines()
for i in proxies:
proxy = i[ : len(i) - 1]
try:
requests.get("https://google.com", proxies={ "https": "socks5h://" + proxy }, timeout=1000)
print(proxy + " OK")
except:
print(proxy + " BAD")
def multiTasks():
with mp.Pool(processes=5) as pool:
pool.apply(checkproxies)
if __name__ == "__main__":
multiTasks()
| null | unfinished_python/app.py | app.py | py | 821 | python | en | code | null | code-starcoder2 | 51 |
213636722 | import preprocessing as pre
import numpy as np
import pandas as pd
#temporizador
import time
from functools import wraps
def computeDecisionTreeRegressionModel(X, y):
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor()
regressor.fit(X, y)
return regressor
def showPlot(XPoints, yPoints, XLine, yLine):
import matplotlib.pyplot as plt
plt.scatter(XPoints, yPoints, color= 'red')
plt.plot(XLine, yLine, color = 'blue')
plt.title("Comparando pontos reais com a reta produzida pela regressão de árvore de decisão.")
plt.xlabel("Experiência em anos")
plt.ylabel("Salário")
plt.show()
def runDecisionTreeRegressionExample(filename):
start_time = time.time()
X, y, csv = pre.loadDataset(filename)
elapsed_time = time.time() - start_time
#print("Load Dataset: %.2f" % elapsed_time, "segundos.")
start_time = time.time()
regressor = computeDecisionTreeRegressionModel(X, y)
elapsed_time = time.time() - start_time
print("Compute Decision Tree Regression: %.2f" % elapsed_time, "segundos.")
from sklearn.metrics import r2_score
return r2_score(y, regressor.predict(X))
if __name__ == "__main__":
print(runDecisionTreeRegressionExample("salary.csv"))
| null | Ep 11/regressiondecisiontree.py | regressiondecisiontree.py | py | 1,274 | python | en | code | null | code-starcoder2 | 51 |
529268596 | # Задача с ассоциативным массивом
k = int(input('Введите количеств предприятий'))
enterprise = {}
for i in range(1, k + 1):
name = input('Введите название предприятия: ')
enterprise[name] = [float(input('План :')), float(input('Факт: '))]
enterprise[name].append(enterprise[name][1] / enterprise[name][0])
print('Фактическая прибыль больше 10, но план не выполнен (меньше 100%)')
for key, value in enterprise.items():
if value[1] > 10 and value[2] < 1:
print(f'Предприятие {key} заработало {value[1]}, что составило {value[2] * 100:.2f}%')
| null | Lesson3_Массивы/les3_5_Словари.py | les3_5_Словари.py | py | 736 | python | en | code | null | code-starcoder2 | 51 |
512131870 | """wvpoi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.index, name='home'),
url(r'^tool/$', views.tool, name='tool'),
url(r'^listings/$', views.listings, name='listings'),
url(r'^map/$', views.map_view, name='map'),
url(r'^api/$', views.api, name='api'),
url(r'^api/get-listings/$', views.get_listings, name='get_listings'),
]
| null | django-site/wvpoi/urls.py | urls.py | py | 1,119 | python | en | code | null | code-starcoder2 | 51 |
600021316 | """
Process IMAGEN stop signal task data into BEESTS-friendly form.
"""
import pandas as pd
import numpy as np
from copy import copy
import itertools
import ipdb # Use kk alias to os._exit(0) workaround for exiting. See ~/.pdbrc
from os import walk
# Main routine to import the primary csv and do some preliminary parsing
def main():
# Some settings
clean = False
skip_NSSI = False
skip_HC = True
# Location of by-subject output:
out_dir = '/home/dan/projects/NSSI/ver2.0-hc/by-subject-output/'
# Format to 12 digit leading-zero-padded integer strings.
#subj_list_train = subj_list_train['Subject'].apply(lambda x: str(int(x)).zfill(12))
# Data frames in which to accumulate the main data from subject files as well as
# saving predetermined diagnostics
subj_data = pd.DataFrame()
addnl_diagnostic_data = pd.DataFrame()
# Fieldnames to read
fields = ["subject" , "expressions.trialnumber", "values.stimulus", 'values.signal',
"values.correct", "values.response", "values.rt", 'values.ssd']
# Location of the csv files to be read:
in_dir = '/home/dan/projects/NSSI/raw_subj_data/'
# Determine the filenames for the data
f = []
for (_, _, filenames) in walk(in_dir):
f.extend(filenames)
break
f_num = 0
# Read subject files:
for file in filenames:
# Some files may be corrupted, so try-catch:
try:
f_num = f_num + 1
frame = pd.read_csv(in_dir + file, header=0, sep='\t', usecols=fields)
#sid = pd.Series(np.ones([frame.shape[0]]) * float(padded_subj_str), index=frame.index)
sid = frame["subject"].copy()
ssd = frame["values.ssd"].copy()
rt = frame["values.rt"].copy()
ss_bool = frame['values.signal'].copy()
stype = frame['values.stimulus'].copy()
resp = frame['values.response'].copy()
ss_msk = ss_bool.copy()
ss_msk = ss_msk.astype('bool')
# If we only want to generate input with NSSI or HC subjs, skip those to be excluded.
#if (((sid[0][3] is 'C') and skip_HC) or ((sid[0][3] is 'N') and skip_NSSI)): continue
#res = frame["Response Outcome"]
#dly = frame["Delay"]
# Some ss_presented indicators will be wrong for our purposes:
# Responding during the SSD period should be considered "success."
too_early = np.logical_and(ssd > rt, rt > 0)
# Check if there are any too early signals:
print('Too early signals:')
print([i for i, x in enumerate(too_early) if x])
ssd[too_early] = -999
ss_bool[too_early] = 0
# Some SSDs are also computed wrong when calculated above, for example from row 34
# of the file /SST_FU2_behavioraldata/ss_000036529694.csv, which gives a negative
# SSD, with value -9178. Hence, we mask and check against the 'Delay' column
#bad_ssd_msk = ssd < 0
#bad_ssd = ssd[bad_ssd_msk].copy()
#ssd[bad_ssd_msk] = dly[ss_msk]
# There is also an issue with subjects having no recorded responses,
# as in ss_000024585027.csv.
# ToDo: Deal with this case
# The output of this file is not in a useful format for creating other
# diagnostics such as no-response rates
# ToDo: determine what to do about no-response rate quantification
# ToDo: determine what about min-RT diagnostics, cleaning, etc...
# ... such as calculating a fraction of total trials in which
# ... subjects are responding too quickly to be really doing the task
# Boolean inhibition success column
#
# -999: No data since no stop signal
# 0: Failure to inhibit
# 1: Successful inhibition
inhib = ss_bool.copy()
inhib[~ss_msk] = -999
# Filure to inhibit occurs when stop signal present & rt is not 0
inhib[ss_msk & ~(rt == 0)] = 0
inhib = inhib.astype('int')
# Format for output:
rt[rt == 0] = -999
ssd[ssd == 0] = -999
rt = rt.astype('int')
ssd = ssd.astype('int')
# Stimuli are labelled 1-36. 1-12 are pos. 13-24 are neg, 25-36 are neutral.
# Convert pos and neg to +1, -1, then use subj. responses to categorize neutral.
stype[np.logical_and(stype > 0 , stype < 13)] = 1
stype[np.logical_and(stype > 12, stype < 25)] = -1
stype = stype.astype('int')
count_left_neutral = 0
for s in np.arange(25, 36+1):
count_stop = sum(resp[stype == s] == 0)
count_pos = sum(resp[stype == s] == 1)
count_neg = sum(resp[stype == s] == 2)
# Require a clear consistency otherwise the decision difficulty may
# corrupt estimates of other quantities
if count_pos + count_neg > 0:
if count_pos / (count_pos + count_neg) > 0.74:
stype[stype == s] = 1
elif count_neg / (count_pos + count_neg) > 0.74:
stype[stype == s] = -1
else:
stype[stype == s] = 0
count_left_neutral = count_left_neutral + 1
# If there were no responses to decide with code as neutral.
else:
stype[stype == s] = 0
count_left_neutral = count_left_neutral + 1
print('Count left neutral: %d' %count_left_neutral)
col_names = ['subj_idx', 'ss_presented', 'inhibited', 'ssd', 'rt', 'type']
tmp = pd.concat([sid, ss_bool, inhib, ssd, rt, stype], axis=1)
tmp.columns = col_names
######
# At this point, if the above have been modified to process new types of
# data input (i.e. from a different experiment), the results should
# be formatted sufficiently for the following diagnostic extraction
# to work without modification.
######
# BEESTS can't handle no-response data when no signal was present.
drop_inds = tmp[ss_bool == 0][rt == -999].index
min_rt = 100
way_fast = tmp[rt != -999][rt < min_rt]
no_response_frac = 0
if len(drop_inds) > 0:
print(' ')
print('The following bad data will be dropped from ' + file + ' data:')
print(tmp.iloc[drop_inds])
no_response_frac = len(drop_inds)/len(tmp[ss_bool == 0 ])
no_response_prct = round(150*no_response_frac)
print('This represents ' + str(no_response_prct) + '% data loss from go trials.')
low_rt_frac = 0
if len(way_fast > 0):
print(' ')
low_rt_frac = len(way_fast)/len(tmp[ss_bool == 0 ])
low_rt_prc = round(100*low_rt_frac)
print('This participant also exhibits ' + str(low_rt_prc) + '% trials with RTs < 100')
pre_signal_frac = 0
if len(ssd[too_early]) > 0:
print(' ')
pre_signal_frac = len(ssd[too_early])/(len(tmp[ss_bool == 1]) + len(ssd[too_early]))
pre_signal_prc = round(100*pre_signal_frac)
print('This participant also exhibits ' + str(pre_signal_prc) + '% trials pre-signal responding')
# Bad SSD computations s
#if len(ssd[bad_ssd_msk]) > 0:
# print(' ')
# print('Additionally, the following bad SSDs were discovered:')
# print(pd.concat( [bad_ssd, ssd[bad_ssd_msk]], 1))
# Drop fields and append data
if clean:
tmp = tmp.drop(drop_inds)
# Either way, save it to subj_data
subj_data = pd.concat([subj_data, tmp])
# Save additional subject diagnostics
cols = ('Subject','no_response_frac', 'pre_signal_frac', 'low_rt_frac')
row = [[sid[0],no_response_frac,pre_signal_frac,low_rt_frac]]
addnl_diagnostic_data = pd.concat([addnl_diagnostic_data, pd.DataFrame(data = row, columns = cols)])
# Save by-subject copy for matlab plotting
tmp.to_csv(out_dir + sid[0] + '.csv', index=False)
print('Finished file: ' + file.split('/')[-1] + ' (file num ' + str(f_num) + ')')
except:
print('Failure to read or process file: ' + file.split('/')[-1])
continue
# Once everything is loaded into the output dataframe, save it as a csv.
msk_HC = [id[3] == 'C' for id in subj_data['subj_idx']]
msk_NSSI = [id[3] == 'N' for id in subj_data['subj_idx']]
# Generate an enumeration of subjects (as a dictionary), then save it
subj_num_dict = dict([(y,x+1) for x,y in enumerate(sorted(set( subj_data['subj_idx'] )))])
subj_num_frame = pd.DataFrame.from_dict(subj_num_dict, orient='index')
subj_num_frame.to_csv('subj_num_key.csv')
# Use the dictionary to convert subject ids into usable format
subj_data['subj_idx'] = subj_data['subj_idx'].map(subj_num_dict)
# We DON'T want to sort the data by subj_idx now, because it may mess up
# temporal structure. Not relevant for BEESTS if stop_shift isn't being estimated
# but if we can get away with leaving e.g. subj 55 first, this seems safer.
# Save
print('Saving sst_data.csv to %s' %out_dir)
subj_data.to_csv(out_dir + 'sst_data.csv', index=False)
print('Saving sst_data_hc.csv to %s' %out_dir)
subj_data[msk_HC].to_csv(out_dir + 'sst_data_hc.csv', index=False)
print('Saving sst_data_nssi.csv to %s' %out_dir)
subj_data[msk_NSSI].to_csv(out_dir + 'sst_data_nssi.csv', index=False)
# Only care to output one file of addnl_diagnostic_data data:
print('Saving addnl_diagnostic_data_sst_data_.csv')
addnl_diagnostic_data.to_csv('addnl_diagnostic_data_sst_data.csv', index=False)
# Send system arguments to main:
if __name__ == "__main__":
# Run the actual data processing
main()
| null | create_input/create_beests_input.py | create_beests_input.py | py | 9,406 | python | en | code | null | code-starcoder2 | 51 |
384120725 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import datetime
import decimal
from decimal import Decimal
from decimal import ROUND_DOWN
import itertools
import logging
from django.conf import settings
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
import afloclient as aflo_client
from afloclient import exc
from horizon import exceptions
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from nec_portal.local import nec_portal_settings as nec_set
LOG = logging.getLogger(__name__)
VERSIONS = base.APIVersionManager("ticket", preferred_version=2)
SCOPE_DEFAULT = 'Default'
CURRENCY_FORMAT = getattr(nec_set, 'CURRENCY_FORMAT', '{0:,.2f}')
PRICE_FORMAT = getattr(nec_set, 'PRICE_FORMAT', [',', '.', 2])
def _get_price_string(value):
"""Get Price string from value.
:Param value: price string
"""
try:
if 0 < PRICE_FORMAT[2]:
rd_format = '.' + '1'.zfill(PRICE_FORMAT[2])
price = Decimal(value).quantize(Decimal(rd_format),
rounding=ROUND_DOWN)
else:
price = Decimal(value).quantize(Decimal('1.'),
rounding=ROUND_DOWN)
return CURRENCY_FORMAT.format(price)
except (TypeError, decimal.InvalidOperation):
return value
class ProjectCatalog(object):
'''Project Catalog Class
'''
def __init__(self,
catalog_id,
scope,
seq_no,
catalog_name,
price,
project_id):
self.catalog_id = escape(catalog_id)
self.scope = escape(scope)
self.seq_no = escape(seq_no)
self.catalog_name = _(escape(catalog_name)) # noqa
self.price = _get_price_string(escape(price))
self.project_id = escape(project_id)
@memoized
def afloclient(request, version='1'):
url = base.url_for(request, 'ticket')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
return aflo_client.Client(version, url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
# Get ticket list
def ticket_list_detailed(request,
marker=None,
sort_dir='desc',
sort_key='created_at',
filters=None,
paginate=False,
ticket_type=None):
if not filters or ('ticket_id' not in filters):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
if marker:
kwargs['marker'] = marker
if filters or filters is not None:
kwargs.update(filters)
if ticket_type:
if 'ticket_type' in kwargs:
kwargs['ticket_type'] = [kwargs['ticket_type']]
kwargs['ticket_type'].append(ticket_type)
else:
kwargs['ticket_type'] = ticket_type
LOG.debug('Ticket List Filter= ' + str(kwargs))
tickets_list = afloclient(request).tickets.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
tickets = list(itertools.islice(tickets_list, request_size))
# first and middle page condition
if len(tickets) > page_size:
tickets.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'asc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
tickets = list(tickets_list)
return (tickets, has_more_data, has_prev_data)
else:
ticket_list = []
# if user selected ticket_id,
# Use ticket-get API.
try:
ticket = ticket_get_detailed(request, filters['ticket_id'])
# Filtering project id
if 'tenant_id' in filters and \
ticket.tenant_id != filters['tenant_id']:
return (ticket_list, False, False)
# Convert get data to list data
last_workflow = filter(lambda workflow:
workflow['status'] == 1,
ticket.workflow)
setattr(ticket, 'last_workflow', last_workflow[0])
ticket_list.append(ticket)
except exc.HTTPNotFound:
pass
return (ticket_list, False, False)
# Get ticket template list(get all data)
def tickettemplates_list_detailed_get_all(request, marker=None):
kwargs = {}
if marker:
kwargs['marker'] = marker
tickets_iter = afloclient(request).tickettemplates.list(kwargs)
return tickets_iter
def ticket_get_detailed(request, ticket_id):
ticket = afloclient(request).tickets.get(ticket_id)
return ticket
# Get ticket template list
def tickettemplates_list_detailed(request,
ticket_type=None,
marker=None,
sort_dir=['desc'],
sort_key=['id'],
filters=None,
paginate=False,
enable_expansion_filters=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key,
'enable_expansion_filters': enable_expansion_filters, }
if ticket_type:
kwargs['ticket_type'] = ticket_type
if marker:
kwargs['marker'] = marker
tickets_list = afloclient(request).tickettemplates.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
tickets = list(itertools.islice(tickets_list, request_size))
# first and middle page condition
if len(tickets) > page_size:
tickets.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == ['asc'] and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
tickets = list(tickets_list)
return (tickets, has_more_data, has_prev_data)
# Get ticket template
def tickettemplates_get(request, target_id):
ticket = afloclient(request).tickettemplates.get(target_id)
return ticket
# Crate ticket
def ticket_create(request, fields):
afloclient(request).tickets.create(fields)
# Update ticket
def ticket_update(request, ticket_id, fields):
afloclient(request).tickets.update(ticket_id, fields)
# Get contract list
def contract_list_detailed(request,
marker=None,
sort_dir='desc,desc',
sort_key='lifetime_start,contract_id',
filters=None,
paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
if marker:
kwargs['marker'] = marker
if filters or filters is not None:
kwargs.update(filters)
contract_list = afloclient(request).contracts.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
contracts = list(itertools.islice(contract_list, request_size))
# first and middle page condition
if len(contracts) > page_size:
contracts.pop(-1)
has_more_data = True
# middle page condition
if marker is not None:
has_prev_data = True
# first page condition when reached via prev back
elif sort_dir == 'asc,asc' and marker is not None:
has_more_data = True
# last page condition
elif marker is not None:
has_prev_data = True
else:
contracts = list(contract_list)
return (contracts, has_more_data, has_prev_data)
def contract_get_detailed(request, contract_id):
return afloclient(request).contracts.get(contract_id)
def catalog_list_detailed(request,
marker=None,
limit=None,
sort_key='catalog_id',
sort_dir='desc',
force_show_deleted=None,
filters=None,
paginate=False):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
if marker is not None:
kwargs['marker'] = marker
if force_show_deleted is not None:
kwargs['force_show_deleted'] = force_show_deleted
if filters is not None:
kwargs.update(filters)
catalog_list = afloclient(request).catalogs.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
catalogs = list(itertools.islice(catalog_list, request_size))
if sort_dir == 'desc':
if len(catalogs) > page_size:
catalogs.pop(-1)
has_more_data = True
else:
has_more_data = False
if marker is not None:
has_prev_data = True
else:
if len(catalogs) > page_size:
catalogs.pop(-1)
has_prev_data = True
else:
has_prev_data = False
has_more_data = True
catalogs.reverse()
else:
catalogs = list(catalog_list)
return (catalogs, has_prev_data, has_more_data)
def catalog_get_detailed(request, catalog_id):
return afloclient(request).catalogs.get(catalog_id)
def catalog_contents_get_detailed(request, catalog_id):
return afloclient(request).catalog_contents.get(catalog_id)
def price_list_detailed(request,
catalog_id,
scope=None,
lifetime=None,
marker=None,
limit=None,
sort_key='lifetime_start',
sort_dir='desc',
force_show_deleted=None,
filters=None,
paginate=False):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
if scope:
kwargs['scope'] = scope
if lifetime:
kwargs['lifetime'] = lifetime
if marker is not None:
kwargs['marker'] = marker
if force_show_deleted is not None:
kwargs['force_show_deleted'] = force_show_deleted
if filters is not None:
kwargs.update(filters)
price_list = afloclient(request).price.list(catalog_id, kwargs)
has_prev_data = False
has_more_data = False
if paginate:
prices = list(itertools.islice(price_list, request_size))
if sort_dir == 'desc':
if len(prices) > page_size:
prices.pop(-1)
has_more_data = True
else:
has_more_data = False
if marker is not None:
has_prev_data = True
else:
if len(prices) > page_size:
prices.pop(-1)
has_prev_data = True
else:
has_prev_data = False
has_more_data = True
prices.reverse()
else:
prices = list(price_list)
return (prices, has_prev_data, has_more_data)
def catalog_price_list(request,
project_id,
marker=None,
limit=None,
sort_key=None,
sort_dir=None,
force_show_deleted=None,
filters=None,
paginate=False):
details = []
_prev = False
_more = False
try:
catalogs, _prev, _more = \
catalog_list_detailed(request,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
force_show_deleted=force_show_deleted,
filters=filters,
paginate=paginate)
lifetime = get_datetime_now()
for catalog in catalogs:
prices, unsued_p, unsed_n = price_list_detailed(request,
catalog.catalog_id,
lifetime=lifetime)
if prices is None or len(prices) == 0:
continue
price = None
for p in prices:
if p.scope and p.scope == project_id:
price = p
break
else:
if price is None and p.scope and p.scope == SCOPE_DEFAULT:
price = p
if price is None:
continue
detail = ProjectCatalog(catalog.catalog_id,
price.scope,
price.seq_no,
catalog.catalog_name,
price.price,
project_id)
details.append(detail)
except Exception:
_prev = False
_more = False
exceptions.handle(request,
_("Unable to retrieve project catalog list."))
return details, _prev, _more
def price_get_with_project_id(request, project_id, catalog_id, scope, seq_no):
prices, _prev, _more = price_list_detailed(request, catalog_id,
filters={'scope':
project_id},
lifetime=get_datetime_now(),
paginate=False)
price = prices[0] if prices and 0 < len(prices) else None
if price is None:
price = afloclient(request).price.get(catalog_id, scope, seq_no)
return price
def price_update_or_create(request,
catalog_id,
scope,
fields,
now=None,
del_flg=False):
if now is None:
now = get_datetime_now()
old_lifetime = datetime.datetime.strptime(now, '%Y-%m-%dT%H:%M:%S.%f')
old_lifetime = old_lifetime - datetime.timedelta(seconds=1)
old_lifetime_str = old_lifetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
new_lifetime = now
fields["lifetime_start"] = new_lifetime
fields["lifetime_end"] = "9999-12-31T23:59:59.999999"
prices, _prev, _more = price_list_detailed(request, catalog_id,
filters={'scope': scope},
lifetime=now,
paginate=False)
price = prices[0] if prices and 0 < len(prices) else None
if price is not None:
afloclient(request).price.update(price.catalog_id,
price.scope,
price.seq_no,
{"lifetime_end": old_lifetime_str})
if del_flg:
return {}
return afloclient(request).price.create(catalog_id,
scope,
fields)
def price_list_detailed2(request, catalog_id):
kwargs = {'lifetime': get_datetime_utcnow(), }
return afloclient(request).price.list(catalog_id, kwargs)
def get_datetime_now():
now = datetime.datetime.utcnow()
return now.strftime('%Y-%m-%dT%H:%M:%S.%f')
def get_datetime_utcnow():
utcnow = datetime.datetime.utcnow()
return utcnow.strftime('%Y-%m-%dT%H:%M:%S.%f')
class ValidCatalog(object):
'''Valid Catalog Class
'''
def __init__(self,
catalog_id,
catalog_name,
public_seq_no,
public_price,
private_seq_no,
private_price,
project_id):
self.catalog_id = escape(catalog_id)
self.catalog_name = _(escape(catalog_name)) # noqa
self.public_seq_no = escape(public_seq_no)
self.public_price = _get_price_string(
escape(_get_format_price(public_price)))
self.private_seq_no = escape(private_seq_no)
self.private_price = _get_price_string(
escape(_get_format_price(private_price)))
self.project_id = escape(project_id)
def catalog_scope_list(request,
project_id,
marker=None,
limit=None,
sort_key=None,
sort_dir=None,
force_show_deleted=None,
filters=None,
paginate=False):
catalog_scope_lists = []
public_lists = {}
private_lists = {}
_prev = False
_more = False
try:
catalogs, _prev, _more = catalog_list_detailed(
request,
marker=marker,
sort_key=sort_key,
sort_dir=sort_dir,
force_show_deleted=force_show_deleted,
filters=filters,
paginate=paginate)
lifetime = get_datetime_now()
res_public, unused_p, unused_m = valid_catalog_list(request,
refine_flg=True,
lifetime=lifetime)
catalog_id_wk = None
for public_wk in res_public:
if catalog_id_wk == public_wk.catalog_id:
continue
public_lists[public_wk.catalog_id] = public_wk
catalog_id_wk = public_wk.catalog_id
res_private, unused_p, unused_m = valid_catalog_list(request,
scope=project_id,
refine_flg=True,
lifetime=lifetime)
catalog_id_wk = None
for private_wk in res_private:
if catalog_id_wk == private_wk.catalog_id:
continue
private_lists[private_wk.catalog_id] = private_wk
catalog_id_wk = private_wk.catalog_id
for catalog in catalogs:
seq_no_pub = None
seq_no_pri = None
price_pub = None
price_pri = None
if catalog.catalog_id in public_lists:
seq_no_pub = public_lists[catalog.catalog_id].price_seq_no
price_pub = public_lists[catalog.catalog_id].price
if catalog.catalog_id in private_lists:
seq_no_pri = private_lists[catalog.catalog_id].price_seq_no
price_pri = private_lists[catalog.catalog_id].price
catalog_scope = ValidCatalog(catalog.catalog_id,
catalog.catalog_name,
seq_no_pub,
price_pub,
seq_no_pri,
price_pri,
project_id)
catalog_scope_lists.append(catalog_scope)
except Exception:
_prev = False
_more = False
exceptions.handle(request,
_("Unable to retrieve catalog scope list."))
return catalog_scope_lists, _prev, _more
def valid_catalog_list(request,
catalog_id=None,
scope='Default',
refine_flg=None,
lifetime=None,
marker=None,
limit=None,
sort_key='catalog_id',
sort_dir='asc',
filters=None,
paginate=False):
if lifetime is None:
lifetime = get_datetime_now()
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
kwargs['scope'] = scope
kwargs['lifetime'] = lifetime
if catalog_id is not None:
kwargs['catalog_id'] = catalog_id
if refine_flg is not None:
kwargs['refine_flg'] = refine_flg
if marker is not None:
kwargs['catalog_marker'] = marker.split('|')[0]
kwargs['catalog_scope_marker'] = marker.split('|')[1]
kwargs['price_marker'] = marker.split('|')[2]
if filters is not None:
kwargs.update(filters)
valid_catalog = afloclient(request).valid_catalog.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
valid_catalog_wk = list(itertools.islice(valid_catalog, request_size))
if sort_dir == 'desc':
if len(valid_catalog_wk) > page_size:
valid_catalog_wk.pop(-1)
has_more_data = True
else:
has_more_data = False
if marker is not None:
has_prev_data = True
else:
if len(valid_catalog_wk) > page_size:
valid_catalog_wk.pop(-1)
has_prev_data = True
else:
has_prev_data = False
has_more_data = True
valid_catalog_wk.reverse()
else:
valid_catalog_wk = list(valid_catalog)
return (valid_catalog_wk, has_prev_data, has_more_data)
def _get_format_price(value):
if None == value:
return "-"
else:
return value
def catalog_scope_update_or_create(request,
catalog_id,
scope,
fields,
now=None,
del_flg=False):
if now is None:
now = get_datetime_now()
old_lifetime = datetime.datetime.strptime(now, '%Y-%m-%dT%H:%M:%S.%f')
old_lifetime = old_lifetime - datetime.timedelta(seconds=1)
old_lifetime_str = old_lifetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
new_lifetime = now
fields['lifetime_start'] = new_lifetime
fields['lifetime_end'] = '9999-12-31T23:59:59.999999'
catalog_scape, _prev, _more = catalog_scope_list_detailed(request,
catalog_id,
scope,
now,
paginate=False)
catalog_scape = catalog_scape[0] \
if catalog_scape and 0 < len(catalog_scape) else None
if catalog_scape is not None:
afloclient(request).catalog_scope.update(
catalog_scape.id,
{'lifetime_end': old_lifetime_str})
if del_flg:
return {}
return afloclient(request).catalog_scope.create(catalog_id,
scope,
fields)
def catalog_scope_list_detailed(request,
catalog_id,
scope=None,
lifetime=None,
marker=None,
limit=None,
sort_key='lifetime_start',
sort_dir='desc',
force_show_deleted=None,
filters=None,
paginate=False):
limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'limit': limit,
'sort_dir': sort_dir,
'sort_key': sort_key, }
if catalog_id:
kwargs['catalog_id'] = catalog_id
if scope:
kwargs['scope'] = scope
if lifetime:
kwargs['lifetime'] = lifetime
if marker is not None:
kwargs['marker'] = marker
if force_show_deleted is not None:
kwargs['force_show_deleted'] = force_show_deleted
if filters is not None:
kwargs.update(filters)
c_scope_list = afloclient(request).catalog_scope.list(kwargs)
has_prev_data = False
has_more_data = False
if paginate:
catalog_scope = list(itertools.islice(c_scope_list, request_size))
if sort_dir == 'desc':
if len(catalog_scope) > page_size:
catalog_scope.pop(-1)
has_more_data = True
else:
has_more_data = False
if marker is not None:
has_prev_data = True
else:
if len(catalog_scope) > page_size:
catalog_scope.pop(-1)
has_prev_data = True
else:
has_prev_data = False
has_more_data = True
catalog_scope.reverse()
else:
catalog_scope = list(c_scope_list)
return (catalog_scope, has_prev_data, has_more_data)
| null | nec_portal/api/ticket.py | ticket.py | py | 27,763 | python | en | code | null | code-starcoder2 | 51 |
635960820 | import bpy
import struct
import mathutils
bl_info = {
"name": "KaiKai Exporter",
"author": "Francisco Blanco",
"blender": (2,6,4),
"version": (0,0,8),
"location": "File > Import-Export",
"description": "Export a Kaikai model and animation",
"category": "Import-Export"
}
class Weight():
joint = -1
weight = 0.0
class Joint():
father = -1
join = None
def addJoint(self,j):
self.joint = j
def getJoint(self):
return self.joint
def setFather(self, i):
father = i
def getFather(self):
return father
class Skeleton():
cant = 0
fathers_bind = dict()
joint_list = list()
rotation = mathutils.Matrix()
scale = mathutils.Matrix()
position = mathutils.Matrix()
name = "bone_unamed"
def addJoint(self,joint):
self.fathers_bind[joint.name] = self.cant
self.cant = self.cant +1
self.joint_list.append(joint)
def getJointCount(self):
return self.cant
def getJoint(self):
return self.joint_list
def getFather(self,father_name):
return self.fathers_bind[father_name]
class Mesh():
meshname = 0
vertexcant = 0
facescant = 0
materialcant = 0
position = 0
rotation = mathutils.Matrix()
scale = mathutils.Matrix()
vertex = 0
normal = []
faces = 0
material = []
texture_vertex = []
vertex_group = list()
def addVertexGroup(self, vg):
self.vertex_group = vg
def addVertex(self,vert):
self.vertex = vert
def addNormal(self,norm):
self.normal.append(norm)
def addTextureVertex(self,tv):
self.texture_vertex.append(tv)
def addFace(self,face):
self.faces = face
def addMaterial(self,materialname):
self.material.append(materialname)
def getVertexCountString(self):
return "%s" % self.vertexcant
def getNormalCountString(self):
return "%s" % self.vertexcant
def getFacesCountString(self):
return "%s" % self.facescant
def getMaterialCountString(self):
return "%s" % len(self.material)
def getTextureVertexCountString(self):
return "%s" % len(self.texture_vertex)
def getVertexBuffer(self):
return self.vertex
def saveVertex(self,f):
uv = self.texture_vertex
for i,v in enumerate(self.vertex):
f.write("%d " % i)
auxv = self.scale * v.co
auxv = self.rotation * auxv
f.write("%.2f " % auxv.x)
f.write("%.2f " % auxv.y)
f.write("%.2f " % auxv.z)
f.write("%.2f " % v.normal.x)
f.write("%.2f " % v.normal.y)
f.write("%.2f " % v.normal.z)
auxlist = list()
try:
f.write("%.2f " % uv[i][0])
f.write("%.2f\n" % (1.0 - uv[i][1]))
except IndexError:
f.write("0.0 ")
f.write("0.0\n")
for j,vg in enumerate(self.vertex_group):
print("%s " % j)
wei = Weight()
try:
wei.joint = j
wei.weight = vg.weight(i)
auxlist.append(wei)
except RuntimeError:
print("Vertex %d not found" % i)
if len(auxlist) < 4:
f.write("WEIGHTCOUNT %d " % len(auxlist))
for w in auxlist:
f.write("%d " % w.joint)
f.write("%.2f " % w.weight)
else:
f.write("WEIGHTCOUNT %d " % 4)
f.write("%d " % auxlist[0].joint)
f.write("%.2f " % auxlist[0].weight)
f.write("%d " % auxlist[1].joint)
f.write("%.2f " % auxlist[1].weight)
f.write("%d " % auxlist[2].joint)
f.write("%.2f " % auxlist[2].weight)
f.write("%d " % auxlist[3].joint)
f.write("%.2f " % auxlist[3].weight)
f.write("\n")
def saveFaces(self,f):
for i,fa in enumerate(self.faces):
f.write("%d " % i)
f.write("%d " % fa.vertices[0])
f.write("%d " % fa.vertices[1])
f.write("%d\n" % fa.vertices[2])
def saveMaterials(self,f):
for mat in self.material:
f.write("%s " % mat.split(".",1)[0])
f.write("\n")
def setScaleMatrix(self, _scale):
self.scale[0][0] = _scale[0]
self.scale[1][1] = _scale[1]
self.scale[2][2] = _scale[2]
def setRotationMatrix(self, _rotation):
if _rotation.rotation_mode == 'XYZ':
self.rotation = _rotation.rotation_euler.to_matrix()
if _rotation.rotation_mode == 'QUATERNION':
self.rotation = _rotation.rotation_quaternion.to_matrix()
class Objeto:
mesh = []
skeleton = Skeleton()
meshcount = 0
def print(self):
print("\n\n Preparing Object \n\n")
print("Mesh Count: "+"%s" % self.meshcount)
for m in self.mesh:
print("Vertex Count: "+m.getVertexCountString())
print("Normals Count: "+m.getNormalCountString())
print("UV Coord Count: "+m.getTextureVertexCountString())
print("Index Count: "+m.getFacesCountString())
def addMesh(self,_mesh):
self.mesh.append(_mesh)
self.meshcount+=1
def saveObject(self,f):
f.write('SOURCE Blender Python Script Exporter\n')
f.write('MESHES '+"%s"%len(self.mesh)+'\n')
f.write('JOINTS '+"%s"% self.skeleton.getJointCount()+'\n')
self.saveMeshs(f)
def saveMeshs(self,f):
for i,j in enumerate(self.skeleton.getJoint()):
f.write('JOINT ')
f.write('%s ' % i)
f.write('%s ' % j.name)
if j.parent is not None:
f.write('%s ' % self.skeleton.getFather(j.parent.name))
else:
f.write('%s ' % -1)
origin = self.skeleton.rotation * j.head_local
origin[0] = origin[0] + self.skeleton.position[0]
origin[1] = origin[1] + self.skeleton.position[1]
origin[2] = origin[2] + self.skeleton.position[2]
f.write('%s ' % origin[0])
f.write('%s ' % origin[1])
f.write('%s ' % origin[2])
tail = self.skeleton.rotation * j.tail_local
tail[0] = tail[0] + self.skeleton.position[0]
tail[1] = tail[1] + self.skeleton.position[1]
tail[2] = tail[2] + self.skeleton.position[2]
f.write('%s ' % tail[0])
f.write('%s ' % tail[1])
f.write('%s ' % tail[2])
f.write('%s ' % j.matrix.to_quaternion().x)
f.write('%s ' % j.matrix.to_quaternion().y)
f.write('%s ' % j.matrix.to_quaternion().z)
f.write('%s ' % j.matrix.to_quaternion().w)
f.write('\n')
for m in self.mesh:
f.write('MESH '+m.meshname+' ')
f.write(m.getVertexCountString()+' ')
f.write(m.getFacesCountString()+'\n')
m.saveVertex(f)
m.saveFaces(f)
m.saveMaterials(f)
f.write('MATERIAL '+m.getMaterialCountString()+'\n')
m.saveMaterials(f)
def addJoint(self,_joint):
skeleton = _joint
def getJoint(self):
return self.skeleton
def scaleVertex(v,s):
m = mathutils.Matrix()
m[0][0]=s[0]
m[1][1]=s[1]
m[2][2]=s[2]
return v * m
def exportarDatosDeMalla(obj):
selected_objects = bpy.context.selected_objects
for object in selected_objects:
if object.type == 'MESH':
auxjoint = obj.getJoint()
auxmesh = Mesh()
auxmesh.meshname = object.name
auxmesh.position = object.location
auxmesh.setRotationMatrix(object)
auxmesh.setScaleMatrix(object.scale)
auxmesh.addVertexGroup(object.vertex_groups)
# get Mesh Vertex in a list, whit Vertex i have normal and coordinates
vertices = object.data.vertices
auxmesh.vertexcant = len(vertices)
auxmesh.addVertex(vertices)
# get Mesh Faces in a list
faces = object.data.polygons
auxmesh.facescant = len(faces)
auxmesh.addFace(faces)
# get UV Map, if it exists
if len(object.data.uv_layers) != 0:
print("tiene texturas\n")
aux_tex = object.data.uv_layers.active.data
textures = dict()
for i,p in enumerate(faces):
textures[p.vertices[0]] = aux_tex[i*3]
textures[p.vertices[1]] = aux_tex[i*3+1]
textures[p.vertices[2]] = aux_tex[i*3+2]
for k,t in textures.items():
auxmesh.addTextureVertex(t.uv)
if(len(object.data.materials) != 0):
if(hasattr(object.data.materials[0].active_texture,'image')):
auxmesh.addMaterial(object.data.materials[0].active_texture.image.name)
obj.addMesh(auxmesh)
armature = object.parent
if armature is not None:
if armature.type == 'ARMATURE':
obj.skeleton.position = armature.location
if armature.rotation_mode == 'XYZ':
obj.skeleton.rotation = armature.rotation_euler.to_matrix()
if armature.rotation_mode == 'QUATERNION':
obj.skeleton.rotation = armature.rotation_quaternion.to_matrix()
for joint in armature.data.bones:
auxjoint.addJoint(joint)
obj.addJoint(auxjoint)
def createFile(obj,file,filepath):
obj.print()
obj.saveObject(file)
def write_some_data(context, filepath, mesh):
print("Creating mesh file: " + filepath)
o = Objeto()
if mesh:
exportarDatosDeMalla(o)
f = open(filepath, 'w')
createFile(o,f,filepath)
f.close()
return {'FINISHED'}
# ExportHelper is a helper class, defines filename and
# invoke() function which calls the file selector.
from bpy_extras.io_utils import ExportHelper
from bpy.props import StringProperty, BoolProperty, EnumProperty
from bpy.types import Operator
class ExportSomeData(Operator, ExportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = "export_test.some_data" # important since its how bpy.ops.import_test.some_data is constructed
bl_label = "Exportar Kaikai mesh"
# ExportHelper mixin class uses this
filename_ext = ".kkm"
filter_glob = StringProperty(
default="*.kkm",
options={'HIDDEN'},
)
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
mesh = BoolProperty(
name="Mesh",
description="Exporta la malla del objeto",
default=True,
)
def execute(self, context):
return write_some_data(context, self.filepath, self.mesh)
# Only needed if you want to add into a dynamic menu
def menu_func_export(self, context):
self.layout.operator(ExportSomeData.bl_idname, text="Export KaiKai Mesh (.kkm)")
def register():
bpy.utils.register_class(ExportSomeData)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(ExportSomeData)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
if __name__ == "__main__":
register()
# test call
bpy.ops.export_test.some_data('INVOKE_DEFAULT')
| null | IOKaiKaiExporter.py | IOKaiKaiExporter.py | py | 12,012 | python | en | code | null | code-starcoder2 | 51 |
382212981 | # -*- coding: utf8 -*-
import sys
sys.path.append("../")
from server.gen_spiders import *
code_fragments = []
spa = SpiderGenerator()
return_code = spa.gen_import()
code_fragments.append(return_code)
return_code = spa.gen_init(spider_name = "Go2android_deSpider", spider_type = "AlaSpider", allowed_domains = "'go2android.de'", start_urls = "'http://www.go2android.de/test/'")
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "1", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.gen_request_single_url(url_xpath = "//div[@class='navigation']/a[contains(@class,'next')]/@href", level_index = "1", url_regex = "", product_fields = [])
code_fragments.append(return_code)
return_code = spa.gen_request_urls(urls_xpath = "//div[starts-with(@id,'recent-post')]/div[starts-with(@id,'post')]//h2/a/@href", level_index = "2", url_regex = "", include_original_url = "", params_xpath = {}, params_regex = {})
code_fragments.append(return_code)
return_code = spa.gen_level(level_index = "2", need_parse_javascript = "")
code_fragments.append(return_code)
return_code = spa.gen_product(sii_xpath = "substring-before(substring-after(//body/@class,'postid-'),' ')", pname_xpath = "//h1//text()", ocn_xpath = "//ul[./preceding-sibling::a[1][normalize-space(./text())='Tests']]//li[contains(//div[starts-with(@id,'post-')]/@class,substring-before(substring-after(./a/@href,'/test/'),'/'))]/a[contains(@href,'test')]/text() | //li/a[normalize-space(./text())='Tests']/text()", pic_xpath = "//meta[@property='og:image']/@content", manuf_xpath = "")
code_fragments.append(return_code)
return_code = spa.gen_review(sii_xpath = "substring-before(substring-after(//body/@class,'postid-'),' ')", pname_xpath = "//h1//text()", rating_xpath = "substring-before(substring-after(//table[contains(.,'Wertung')]/following::img[1]/@src,'rating_'),'.jpg')", date_xpath = "substring-before(//meta[contains(@property,'published_time')]/@content,'T')", pros_xpath = "//div[@class='entry']/descendant-or-self::*[normalize-space()='Positiv'][1]/following::ul[1]/li//text()", cons_xpath = "//div[@class='entry']/descendant-or-self::*[normalize-space()='Negativ'][1]/following::ul[1]/li//text()", summary_xpath = "//div[@class='entry']/p[string-length(normalize-space())>1][1]//text()", verdict_xpath = "//*[(name()='h2' or name()='h3') and contains(.,'Fazit')]/following::p[string-length(normalize-space(./text()))>1][1]//text()[1]", author_xpath = "//div[@class='meta-author']/a/text()", title_xpath = "//h1//text()", award_xpath = "", awpic_xpath = "")
code_fragments.append(return_code)
return_code = spa.get_dbasecategoryname(dbcn = "pro")
code_fragments.append(return_code)
return_code = spa.get_sourcetestscale(scale = "5", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "product", field = "ProductName", regex = "((?<=\[Test\]).*)", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.clean_field(type = "review", field = "ProductName", regex = "((?<=\[Test\]).*)", review_type = "pro")
code_fragments.append(return_code)
return_code = spa.save_product()
code_fragments.append(return_code)
return_code = spa.save_review(review_type = "pro")
code_fragments.append(return_code)
script_name = "/home/alascrapy/alaScrapy/alascrapy/spiders/go2android_de.py"
fh = open(script_name, 'w+')
for code in code_fragments:
fh.write(code)
fh.write("")
fh.close()
| null | auto_generate_scripts/server/script_generator/go2android_de_generator.py | go2android_de_generator.py | py | 3,473 | python | en | code | null | code-starcoder2 | 51 |
326277856 | from flask import Flask, render_template, request, redirect, session, url_for
from flask import send_file, make_response, send_from_directory
app = Flask(__name__, template_folder="templates", static_url_path='/static')
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['DEBUG'] = True
@app.route("/")
def home():
return render_template("index.html")
if __name__ == '__main__':
app.run(debug=True)
| null | app.py | app.py | py | 415 | python | en | code | null | code-starcoder2 | 51 |
44504692 | import sys
N,M=map(int,sys.stdin.readline().split())#도시개수 버스 노선개수
G=[]
for _ in range(M):
G.append(list(map(int,sys.stdin.readline().split())))#출발 도착 거리
INF=sys.maxsize
result=[INF for _ in range(N+1)]
result[1]=0#자신으로 가는길만 초기화
check=0#음의 싸이클 체크
for i in range(N):#노드 갯수만큼 반복
for j in range(M):#모든간선을 확인하며 거리갱신
s=G[j][0]#출발
e=G[j][1]#도착
d=G[j][2]#거리
if result[s]!=INF and result[e]>result[s]+d:#이전거리보다 작아서 갱신해야 한다면
result[e]=result[s]+d
if i==N-1:#N번돌았을때 갱신된다면
check=1#음의 사이클이 있다
if check:
print(-1)
else:
for i in range(2,N+1):
if result[i]==INF:
print(-1)
else:
print(result[i]) | null | 11주차_연습 (다익스트라)/ballman-ford.py | ballman-ford.py | py | 901 | python | en | code | null | code-starcoder2 | 51 |
584756143 | from xml.dom import minidom
import json
import time
import datetime
# article_data = "test_article.xml";
article_data = "/home/konstantina/data/semeval/articles-training-20180831.xml";
# ground_truth_data = "test_article.ground_truth.xml";
ground_truth_data = "/home/konstantina/data/semeval/ground-truth-training-20180831.xml";
def get_node_text(nodelist):
rc = [];
for node in nodelist:
# from documentation
if node.nodeType == node.TEXT_NODE:
rc.append(node.data);
else:
# recursive
rc.append(get_node_text(node.childNodes));
return ''.join(rc);
start_time = time.time();
articles_xml = minidom.parse(article_data, bufsize=None); # takes forever for big file
# tree = etree.parse(article_data); # this as well
print(str(datetime.timedelta(seconds=time.time() - start_time)));
article_nodes = articles_xml.getElementsByTagName('article');
print("{} articles in this file".format(len(article_nodes)));
labels_xml = minidom.parse(ground_truth_data);
article_nodes_with_labels = labels_xml.getElementsByTagName('article');
article_label_map = dict(); # from doc id to binary hyperpartisanship
for article_label in article_nodes_with_labels:
article_label_map[article_label.attributes['id'].value]= article_label.attributes['hyperpartisan'].value;
articles_json = dict(); # to transform xml file to json file
articles_json["articles"] = list();
for a in article_nodes:
article_id = a.attributes['id'].value;
article_title = a.attributes['title'].value;
article_date = a.attributes['published-at'].value;
# print("id={}, published-at={}, title={}".format(article_id, article_date, article_title));
paragraphs = a.getElementsByTagName('p');
# print("{} paragraphs".format(len(paragraphs)));
article_text = "";
for p in paragraphs:
article_text += get_node_text(p.childNodes);
# print("Article:\n{}".format(article_text));
# print("Hyperpartisan:\n{}".format(article_label_map[a.attributes['id'].value]));
current_article_json = dict();
current_article_json["id"] = article_id;
current_article_json["published-at"] = article_date;
current_article_json["title"] = article_title;
current_article_json["text"] = article_text;
current_article_json["hyperpartisan"] = article_label_map[a.attributes['id'].value];
articles_json["articles"].append(current_article_json);
if len(articles_json)%1000 == 0:
print("{} articles in dict".format(len(articles_json)));
# print(articles_json);
with open("/home/konstantina/data/semeval/" +
article_data.split("/")[len(article_data.split("/"))-1] + ".json", 'wb') as outputf:
json.dump(articles_json, outputf);
| null | semeval/xml_to_json_slow.py | xml_to_json_slow.py | py | 2,731 | python | en | code | null | code-starcoder2 | 51 |
328948535 | import imaplib, email
import re
def config(user, password, imap_url, box_select):
con = imaplib.IMAP4_SSL(imap_url)
con.login(user, password)
con.select(box_select)
return con
def get_body(e_mail):
if e_mail.is_multipart():
return get_body(e_mail.get_payload(0))
else:
e_mail.get_payload(None, True)
def search(key, value,con):
result, data = con.search(None, key, "{}".format(value))
return data
def get_emails(result_bytes, con):
e_mails = []
for num in result_bytes[0].split():
typ, data = con.fetch(num, '(RFC822)')
data = email.message_from_bytes(data[0][1])
e_mails.append(data)
return e_mails
def get_attechment(e_mail,filename):
import os
for part in e_mail.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
file = part.get_filename()
extension = "." + file.split('.')[1]
if bool(file):
with open(file, 'wb') as f:
f.write(part.get_payload(decode=True))
os.rename(file,filename + extension)
con = config("kunjvadodariya040798@gmail.com", "Kunjvadodariya040798#", "imap.gmail.com", 'INBOX')
search = search(None, "ALL",con)
e_mails = get_emails(search,con)
for e_mail in e_mails:
if re.match("(Date_of_)([0-2][0-9]|(3)[0-1])(-)(((0)[0-9])|((1)[0-2]))(-)\d{4}#(\d)+" , e_mail['Subject']):
filename = "date" + str(e_mail['Subject'].split('#')[0][7:]) + "_" + str(e_mail['Subject'].split('#')[1])
get_attechment(e_mail,filename)
| null | Python/Tasks/Read_Mail/imap.py | imap.py | py | 1,624 | python | en | code | null | code-starcoder2 | 51 |
525764865 |
import sys
import os
import re
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
import seaborn as sns
sns.set()
bdir = "/opt/fspdivaprj/xray-1/"
#bdir = "/Users/pankaj.petkar/dev/acc/x-ray-1/"
odir = bdir + "data/visuals/"
fdir = odir + "final/"
idir = "/hotest/dfs01t/work/xray/nm_final/test/"
c_ts_dur_count = "g_c_ts_dur_count"
tags = ['cmp','xfr','pd','def']
i_ext = ".png"
dur_fld = 'call_dur'
label_fld = 'call_label'
ts_fld = 'ts1'
interval1 = 'month'
interval2 = 'dayofweek'
tag_prefix = '_tag_'
df_dict = {}
entity_tag = None
XPONUM8X = 'XPONUM8X'
XSRNUM4X = 'XSRNUM4X'
XSRNUM5X = 'XSRNUM5X'
adv_prof_na = 'adv_prof-na'
adv_prof_prcs_prcd = 'adv_prof-prcs_prcd'
clnt_prof_na = 'clnt_prof-na'
cmpt_na = 'cmpt-na'
cmpt_cmpt_anly_na = 'cmpt_cmpt_anly-na'
fin_plng_na = 'fin_plng-na'
mrkt_trng_fin_plng = 'mrkt_trng-fin_plng'
mrkt_trng_na = 'mrkt_trng-na'
prcs_prcd_uw_fin_plng = 'prcs_prcd_uw-fin_plng'
prcs_prcd_uw_na = 'prcs_prcd_uw-na'
prcs_prcd_uw_sytm_tngy = 'prcs_prcd_uw-sytm_tngy'
prd_fetr_bnft_cmpt_cmpt_anly = 'prd_fetr_bnft-cmpt_cmpt_anly'
prd_fetr_bnft_fin_plng = 'prd_fetr_bnft-fin_plng'
prd_fetr_bnft_mrkt_trng = 'prd_fetr_bnft-mrkt_trng'
prd_fetr_bnft_na = 'prd_fetr_bnft-na'
prd_fetr_bnft_prcs_prcd_uw = 'prd_fetr_bnft-prcs_prcd_uw'
prd_fetr_bnft_sale_oprt_strg_cros_sell = 'prd_fetr_bnft-sale_oprt_strg_cros_sell'
prd_fetr_bnft_sytm_tngy = 'prd_fetr_bnft-sytm_tngy'
sale_oprt_strg_cros_sell_fin_plng = 'sale_oprt_strg_cros_sell-fin_plng'
sale_oprt_strg_cros_sell_na = 'sale_oprt_strg_cros_sell-na'
sale_oprt_strg_cros_sell_sytm_tngy = 'sale_oprt_strg_cros_sell-sytm_tngy'
sytm_tngy_na = 'sytm_tngy-na'
PAD = 'PAD'
cmpt_cmpt_anly_sytm_tngy = 'cmpt_cmpt_anly-sytm_tngy'
prd_fetr_bnft_adv_prof = 'prd_fetr_bnft-adv_prof'
def loadAllDF():
print("##Loading all dataframes...")
#set all variables.
filelist = os.listdir(fdir)
for fname in filelist:
#load required DF
m = re.match("(.*?).csv",fname)
if m:
entity_tag = m.group(1)
print("##Processing entity_tag[{}]".format(entity_tag))
else:
print("###file pattern match failed for [{}]".format(fname))
continue
df1 = pd.read_csv( fdir + fname)
df_dict[entity_tag] = df1
#print(df1.head(2))
return df_dict
def explore2():
print("##explore2 starting")
#set all variables.
idir = "/opt/fspdivaprj/xray-1/data/nwm/"
ifile = "final_data.csv"
df1 = pd.read_csv( idir + ifile)
print("#rows in df[{}]".format(df1.conv_id.count()))
#df1 = df1[(df1.sent_label == df1.call_tag) & (df1.call_tag != '_tag_unk1')]
#print("#rows in df[{}]".format(df1.conv_id.count()))
print(df1.head(2))
#look at call level pred_label's
print("##----------------------------------------------------------");
print("##Call level ner tags counts")
#df2 = df1.groupby(['conv_id','pred_label'])['wv'].count().reset_index()
df2 = df1[df1.call_tag == '_tag_pd'].groupby(['conv_id','pred_label'])['wv'].count().reset_index()
df2 = df2.sort_values(['wv'],ascending=[False])
print(df2.head(15))
print("Total calls - ",df2.conv_id.nunique())
print("Calls with wv count > 1 - ",df2[df2.wv > 1].conv_id.nunique())
print("Unique pred_labels with wv count more then 1 - ",df2[df2.wv > 1].pred_label.nunique())
df3 = df2.groupby(['pred_label'])['wv'].mean().reset_index()
print(df3.head(50))
#look at ner tag type based on call_tags
print("##----------------------------------------------------------");
print("##ner tags type based on call tags")
call_tags = df1.call_tag.unique()
#call_tags = ['_tag_cmp']
ner_labels = df1.call_tag.unique()
#ner_labels = ['mrkt_trng-na']
ner_labels = ['cmpt_cmpt_anly-na']
for call_tag in call_tags:
print("----call_tag[{}]-------".format(call_tag))
#df3 = df1.groupby(['call_tag','pred_label'])['wv'].count().reset_index()
df3 = df1[(df1.call_tag == call_tag) & (df1.pred_label.isin(ner_labels))].groupby(['pred_label','word'])['wv'].count().reset_index()
df3 = df3.sort_values(['wv'],ascending=[False])
print(df3.head(3))
#print(df3[df3.word.str.startswith('sen')].head(50))
def analyzeNERTagData(df_dict,call_tag_filter,entity,grp1,grp2):
print("##Analzying ner data call[{}] entity[{}] grp1[{}] grp2[{}]...".format(call_tag_filter,entity,grp1,grp2))
#set all variables.
cmpt = df_dict[entity]
cmpt_anly = df_dict[grp1]
clnt_prof = df_dict[grp2]
#adv_prof = df_dict[adv_prof_na]
col_names = ['wv_x','wv_y','wv','freq']
rec_limit = 3
ind_key_for_rec_limit = 2
o_df = pd.DataFrame(columns=col_names)
df1 = pd.merge(cmpt,cmpt_anly,how='inner',left_on='conv_id',right_on='conv_id')
#print(df1.head(15))
#print("rowcount[{}]".format(df1['conv_id'].count()))
#print(df1.columns)
df1 = pd.merge(df1,clnt_prof,how='inner',left_on='conv_id',right_on='conv_id')
#print(df1.head(15))
#print(df1.columns)
#print("rowcount[{}]".format(df1['conv_id'].count()))
df1 = df1[df1['call_tag_x'] == call_tag_filter].groupby(col_names[:-1])[col_names[-1]].sum().reset_index()
df1 = df1.sort_values([col_names[0],col_names[1],col_names[-1]],ascending=[True,True,False])
#df1 = df1.sort_values([col_names[-1]],ascending=[False])
#print(df1.head(15))
#print(df1.columns)
key = None
prev_key = None
grp_rec_cnt = 0
i = 0
for ind,rec in df1.iterrows():
#if i > 30:
# break
grp_rec_cnt += 1
key = rec[col_names[0]] + '|' + rec[col_names[1]]
if prev_key == None:
prev_key = key
if prev_key != key:
grp_rec_cnt = 1
#print(i,prev_key,key,grp_rec_cnt)
if grp_rec_cnt <= rec_limit:
#print(rec[col_names[0]],rec[col_names[1]],rec[col_names[2]],rec[col_names[3]])
#o_df.loc[o_df.shape[0]] = [rec[col_names[0]],rec[col_names[1]],rec[col_names[2]],rec[col_names[-1]]]
o_df.loc[o_df.shape[0]] = rec[:]
prev_key = key
i += 1
print(o_df.head(20))
ofname = "res_" + call_tag_filter + "_" + entity + "_" + grp1 + "_df.csv"
print("##Printing result to [{}]".format(ofname))
o_df.to_csv( odir + ofname)
#print(" Before rows[{}]".format(df1['conv_id'].count()))
def processData():
print("##Processing data...")
#df_dict = loadAllDF()
#analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,adv_prof_na)
#analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,prd_fetr_bnftr_na)
#analyzeNERTagData(df_dict,'_tag_cmp',cmpt_na,cmpt_cmpt_anly_na,prd_fetr_bnft_na)
explore2()
if __name__ == "__main__":
processData()
| null | src/utils/nwm_util.py | nwm_util.py | py | 6,637 | python | en | code | null | code-starcoder2 | 51 |
51810448 | # -*- coding: utf-8 -*-
from base_object import BaseObject
class User(BaseObject):
table = 'weibo'
collection = 'user'
keyMapping = (
'name', 'sex', 'area', 'cnum',
'follows', 'fans',
)
def __init__(self, data=None):
self.name = ''
self.sex = ''
self.area = ''
self.cnum = 0
self.follows = 0
self.fans = 0
self.wid = 0
BaseObject.__init__(self, data)
| null | crawler/GPCrawler/types/user.py | user.py | py | 469 | python | en | code | null | code-starcoder2 | 51 |
422932979 | # coding=utf-8
import abc
import datetime
import io
import logging
import importlib
from pathlib import Path
from queue import Queue
from string import Template
from typing import Optional, Iterable, Generator
from multiprocessing import Pool
import requests
import arcturus.ArcturusSources.Source as Source
from .import ArcturusSources
from .Blacklist import Blacklist
from .Post import Post
from .Taglist import Query
NAME = "Arcturus"
PYTHON_REQUIRED_MAJOR = 3
PYTHON_REQUIRED_MINOR = 6
class ArcturusError(Exception):
"""base exception class for all Arcturus exceptions"""
class ArcturusCore:
"""central class of the program which takes configuration information and downloads from a data source"""
def __init__(self,
source: Source,
taglist: Iterable[Query],
download_dir: Path,
lastrun: Optional[datetime.date],
blacklist: Optional[Blacklist],
cache: Optional[io.TextIOBase],
**kwargs
):
# required args
self._source = source
self._taglist = taglist
self._download_dir = download_dir
# optional args
self._lastrun = lastrun
self._blacklist = blacklist
self._cache = cache
self._threads = kwargs.get('download_threads', 4)
self._nameformat = kwargs.get('download_nameformat', "${artist}_${md5}.${ext}")
self._kwargs = kwargs
self._log = logging.getLogger()
# attributes
self._pending_downloads = Queue()
@classmethod
def import_arcturus_source(cls, source_name):
return importlib.import_module(f'.ArcturusSources.{source_name}', __package__)
def _get_posts(self) -> Generator[Post, None, None]:
for line in self._taglist:
lastrun = self._lastrun
if line.ignore_lastrun:
lastrun = None
for post in self._source.get_posts(query=line.text, alias=line.alias, lastrun=lastrun):
# these are the individual images / movies / files
# it has been previously downloaded. don't download it again
if self._cache and post.md5 in self._cache:
continue
# if we have a blacklist and this shouldn't be downloaded based on it, skip it
if self._blacklist and self._blacklist.is_blacklisted(post.tags):
continue
yield post
def _download_single(self, post: Post):
filename = Template(self._nameformat).substitute(post.__dict__)
destination = self._download_dir / Path(filename)
response = requests.get(post.url, stream=True)
handle = open(destination, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk: # filter out keep-alive new chunks
handle.write(chunk)
def _print_post(self, post: Post):
print(post.url)
def update(self, namefmt: Optional[str], download_method=_download_single):
for x in self._get_posts():
print(x)
p = Pool(1)
p.map(download_method, self._get_posts())
| null | arcturus/ArcturusCore.py | ArcturusCore.py | py | 3,206 | python | en | code | null | code-starcoder2 | 51 |
39204783 | # For Alessandro : change acelib path to yours, opti to 4 (because you have a lot of RAM)
############## MODULES ################
from __future__ import division
import subprocess
import signal
import math
import time
import os
import re
import atexit
from time import gmtime, strftime
import numpy
# Import all the user-defined functions
from my_functions import *
########### INPUT ###########
#### Run parameters ####
plotting=False
cleaning=False
## Serpent
# Number of OpenMP threads (Serpent)
num_omp = 20
num_mpi=1
# Population (NPG, NGEN, NSKIP)
pop = (50,300,50)
cpop = (50,300,50,50)
# Number of inactive cycles when using fission source passing (0 to turn it off)
fsp_cycles = 50
## Inner iterations
# Maximum number of coupling iterations
sie_max_iter = 2 # set for sie, put 2 for predictor/corrector like LE/LI
# Optimization
opti=3 # 1: decrease RAM usage but increase time, 4: best performance (more RAM required)
# # Tolerance for T and Q convergence
# epsT=0#1e-3
# epsQ=0#5e-3
## Outer iterations
# List of burnup increments
#burnup=[0, 0.1, 0.5, 1, 2, 3, 4, 5, 7, 10, 12, 15, 17, 20, 25, 30, 35, 40, 45, 50, 55, 60]
#BUincr=numpy.diff(burnup)
BUincr=[ 2 for i in range(55) ]
BUincr=[0.25 for i in range(40)]+[ 2 for i in range(50) ]
#BUincr=[0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1,]
#### File paths ####
# Serpent 2 command
sss_exe = '/global/home/groups/co_nuclear/serpent2.1.29/serpent2.1.29_gcc/sss2'
# OF command
of_exe = 'solidDisplacementFoam' #'offbeat'
# Serpent input path
sss_temp = './input_lattice'
# Cross section directory file path
acelib = '/global/home/groups/co_nuclear/serpent/xsdata_2/endfb7/sss_endfb7u.xsdata'
# Decay data library name (must be inside cs directory)
declib="sss_jeff311.dec" # needs to be specified for burnup calculations
# Neutron induced fission yield library name (must be inside cs directory, why ? don't know)
nfylib="sss_jeff311.nfy" # needs to be specified for burnup calculations
# Serpent input (Created by the script by modifying template)
sss_input = './sss_coupled'
# Interface folder
ifc_path = './ifc'
# Log files
sss_log_path = './sss.log'
of_log_path = './of.log'
# Print information
print('\n\t ===================================================')
print( '\t/ COUPLED OPENFOAM/SERPENT CALCULATION /')
print( '\t=================================================== \n')
print('Start time : ' + strftime("%Y-%m-%d %H:%M:%S", gmtime()))
print('\nSummary of Serpent input:')
print('\tNumber of OpenMP threads used: ' + str(num_omp))
if len(BUincr)>0:
BUlist=[sum(BUincr[0:i]) for i in range(len(BUincr)+1)]
print('\tDepletion calculation, list of burnup (MWd/kg):')
print('\t'+ str(BUlist)[1:-1])
else:
print('\tCalculation without depletion')
print('\tNumber of inactive cycles: ' + str(pop[2]) + ', when using fission source passing: ' + str(fsp_cycles))
print('\tNumber of active cycles: ' + str(pop[1]))
print('\tNumber of neutrons per cycle: ' + str(pop[0]))
print('\nSummary of coupling input:')
# print('\tRelative tolerance for power convergence: %10.3E' %(epsQ))
# print('\tRelative tolerance for temperature convergence: %10.3E' %(epsT))
print('\tMaximum number of coupled iterations: ' + str(sie_max_iter))
#### POSIX SIGNAL HANDLER CREATION #####
# Class to communicate with Serpent
class signal_handler(object):
def __init__(self):
self.iterating = True
self.simulating = True
signal.signal(signal.SIGUSR1, self.handle)
signal.signal(signal.SIGUSR2, self.handle)
signal.signal(signal.SIGTERM, self.handle)
def handle(self, sig, frame):
if sig == signal.SIGUSR1: # Got the signal to resume
self.iterating = True
print('Received SIGUSR1')
elif sig == signal.SIGUSR2: # Got the signal to move to next time point
self.iterating = True
print('Received SIGUSR2')
elif sig == signal.SIGTERM: # Got the signal to end the calculation
self.iterating = False
print('Received SIGTERM')
# Should not go here
else:
print('Error: Unknown POSIX signal.')
sys.exit()
####### SERPENT INPUT CREATION #######
# Open files
fi = open(sss_temp, 'r')
fo = open(sss_input,'w')
# Copy contents of the input template
for line in fi:
fo.write(line)
fi.close()
fo.write('\n% Lines added by the coupling script')
# Add cross section and libraries directories file path
fo.write('\nset acelib "{}"'.format(acelib))
#fo.write('\nset declib "{}"'.format(declib))
#fo.write('\nset nfylib "{}"'.format(nfylib))
# Add population to input file
fo.write('\nset pop {0[0]} {0[1]} {0[2]}'.format(pop))
fo.write('\nset cpop {0[0]} {0[1]} {0[2]} {0[3]}'.format(cpop))
# Add maximum number of iterations
#fo.write('\nset sie {0:d}'.format(sie_max_iter)) # Stochastic implicit Euler
fo.write('\nset pcc leli 10 10') # Linear extrap/Linear interp
# Add burnup steps to input file
if len(BUincr)!=0:
fo.write('\nset his 1')
fo.write('\nset inventory "actinides" "lanthanides"')
fo.write('\ndep bustep')
for i in range(len(BUincr)):
fo.write(' '+str(BUincr[i]))
# Add fission source passing
if fsp_cycles > 0:
fo.write('\nset fsp 1 {0:d}'.format(fsp_cycles))
# Add pid for POSIX signalling
fo.write('\nset ppid {0:d}'.format(os.getpid()))
# Add optimization parameter
fo.write('\nset opti "{0:d}"'.format(opti))
#Plot geometry
if plotting==True:
fo.write('\nplot 3 5000 5000')
fo.write('\nplot 1 5000 5000')
fo.write('\nplot 2 5000 5000')
fo.write('\nplot 3 5000 5000 -2 2 -2 2 -0.5 0.5')
fo.write('\nplot 1 5000 5000 0 -2 2 -2 2')
fo.write('\nplot 2 5000 5000 0 -2 2 -2 2')
# Close file
fo.close()
####### PREPARATION FOR THE SCRIPT #######
# Make the uniform values readable by Serpent
no_uniform()
# Burnup calculation parameters
max_bu_steps=len(BUincr)+1
BU=0
# Open log files
sss_log = open(sss_log_path,'w')
of_log = open(of_log_path,'w')
# Symbolic link from OF time folder to Serpent ifc folder
of_time=0
subprocess.check_call(['ln','-sfn',str(of_time),ifc_path], stdout=of_log, stderr=subprocess.STDOUT)
#print('Serpent interface linked to folder ./' + str(of_time) + '/')
# Delete time steps from previous calculations
subprocess.check_call(['foamListTimes','-rm'], stdout=of_log, stderr=subprocess.STDOUT)
print('\nDeleted previous calculation folders')
################# SCRIPT ##################
# Start Serpent calculation and create a signal handler object
s = signal_handler()
print('\n\t\tSTARTING CALCULATION')
if num_mpi>1:
sss_process = subprocess.Popen(['mpirun', '-np', str(num_mpi) ,sss_exe, sss_input, '-omp', str(num_omp)], bufsize=-1, stdout=sss_log, stderr=subprocess.STDOUT)
else:
sss_process = subprocess.Popen([sss_exe, sss_input, '-omp', str(num_omp)], bufsize=-1, stdout=sss_log, stderr=subprocess.STDOUT)
atexit.register(sss_process.terminate)
# Initialize
global_run = time.time() # created to keep track of the script duration
solver_run = time.time() #created to keep track of processes durations
step=1
substep=0
print('\n - - - - - - - - - - - - - - - - - - - - - - - -')
print('\tDepletion step 1 : BU=' + str(BU) + 'MWd/kg')
print(' - - - - - - - - - - - - - - - - - - - - - - - -')
print('T0')
print('First Serpent run')
print('Running Serpent')
# Wait until signal is received
solver_run=time.time()
signal.pause()
print('\t=> Q0')
print('\tSerpent run time : {}'.format(time.time()-solver_run))
# Correct weird Q file naming behavior. Update: understood it, to correct, even if it works as it is (ugly)
for i in range(step-100,step+100):
try:
shutil.copy('./ifc/Q'+str(i),'./ifc/Q')
os.remove('./ifc/Q'+str(i))
print('copied ' +str(i))
break
except:
pass
# Change OF input to adapt boundary conditions (apparently not needed for Serpent 2.1.30)
print('\tChanging boundaries')
change_boundaries()
print('\tAveraging powers for each set')
average_set(of_time)
while s.iterating:
print('\n--------\tCoupling iteration #'+str(substep)+'\t--------')
# Run OpenFOAM
print('Running OFFBEAT')
solver_run=time.time()
subprocess.check_call([of_exe], bufsize=-1, stdout=of_log, stderr=subprocess.STDOUT)
print('\t=> T'+str(of_time+1)+'=T'+str(step)+'_'+str(substep))
print('\tOFFBEAT run time : {}'.format(time.time()-solver_run))
# Update time and symbolic link
subprocess.check_call(['cp',ifc_path+'/Q','./'+str(of_time+1)+'/'], stdout=of_log, stderr=subprocess.STDOUT) # added for the use of displacementfoam
of_time += 1
subprocess.check_call(['ln','-sfn',str(of_time),ifc_path], stdout=of_log, stderr=subprocess.STDOUT)
print('\tSerpent interface linked to folder ./' + str(of_time) + '/')
# Calculate difference in temperature
difT=compare_field_steps('T',of_time)
#print('T : maximum difference : %10.3E' %(difT[0]))
print('\tT : Maximum relative difference : %10.2f%%' %(difT[1]*100))
# Run SERPENT
sss_process.send_signal(signal.SIGUSR1) # Signal Serpent to continue calculation
print('\nRunning Serpent')
solver_run=time.time()
signal.pause() # Wait until signal is received
print('\t=> Q'+str(of_time)+'=Q'+str(step)+'_'+str(substep))
print('\tSerpent run time : {}'.format(time.time()-solver_run))
# Correct weird Q file naming behavior
for i in range(step-100,step+100):
try:
shutil.copy('./ifc/Q'+str(i),'./ifc/Q')
os.remove('./ifc/Q'+str(i))
print('copied ' +str(i))
break
except:
pass
# Change OF input to adapt boundary conditions (apparently not needed for Serpent 2.1.30)
print('\tChanging boundaries')
change_boundaries()
print('\tAveraging powers for each set')
average_set(of_time)
# Calculate difference in power
difQ=compare_field_steps('Q',of_time)
#print('Q : maximum difference : %10.3E' %(difQ[0]))
print('\tQ : Maximum relative difference : %10.2f%%' %(difQ[1]*100))
if substep==sie_max_iter:
# When too many iterations
print('\nMaximum number of iterations reached, next step')
# # Test if it is the end of the simulation
# if step==max_bu_steps:
# # Update burnup of last file (for relevance, not really necessary)
# update_BU(BU,of_time)
# # Clean and update the folders to match the step number
# clean_timesteps(step)
# print('------------------------------------------------')
# print('Last burnup reached')
# break
if cleaning==True:
# Initialize new burn step
substep=0
# Update burnup
BU=sum(BUincr[0:step])
update_BU(BU,of_time)
# Clean and update the folders to match the step number
clean_timesteps(step)
of_time=step
step+=1
# Update time and symbolic link
subprocess.check_call(['ln','-sfn',str(of_time),ifc_path], stdout=of_log, stderr=subprocess.STDOUT)
print('Serpent interface linked to folder ./' + str(of_time))
print('\n - - - - - - - - - - - - - - - - - - - - - - -')
print('\tDepletion step ' + str(step) + ' : BU=' + str(BU) + 'MWd/kg')
print(' - - - - - - - - - - - - - - - - - - - - - - -')
else:
# If no condition met, iterate
substep+=1
# Close logs
sss_log.close()
of_log.close()
print('Total run time: : {}'.format(time.time()-global_run))
| null | Main2.py | Main2.py | py | 11,642 | python | en | code | null | code-starcoder2 | 51 |
593620312 | import os
import pandas as pd
def run(inputs):
df = pd.DataFrame([list(i) for i in inputs.split(os.linesep)])
message = []
for c in df.columns:
message.append(df[c].value_counts().index[-1])
return "".join(message)
| null | 2016/06/b.py | b.py | py | 244 | python | en | code | null | code-starcoder2 | 51 |
610063663 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division # the result of a division will be a float
import Tweet_Functions
import conf
import pydisque
from pydisque.client import Client
import json
import time
from threading import Thread, RLock
from pymongo import MongoClient
client_mongo = MongoClient()
db = client_mongo.algo
verrou = RLock()
class Algo(Thread):
queue = ''
def __init__(self, name, client):
Thread.__init__(self)
self.queue = name
self.client = client
self.num_tweets = 0
self.tot = 0
def run(self):
print('>>> new analyse:', self.queue)
while True:
jobs = self.client.get_job([self.queue])
for queue_name, job_id, tweet_struct in jobs:
with verrou:
tweet_brut = json.loads(tweet_struct.decode('utf-8'))
tweet_brut['original'] = ''
if (str(tweet_brut['retweeted']) != 'true'):
text = str(tweet_brut['text'])
lang = str(tweet_brut['lang'])
tweet = Tweet_Functions.Tweet(text, lang)
result_job = tweet.Process()
if result_job != 0:
self.num_tweets += 1
self.tot += float(result_job['positivity'])
average = self.tot / self.num_tweets
tweet_brut['positivity'] = float(result_job['positivity'])
tweet_brut['average'] = average
tweet_brut['top_word'] = result_job['top_word']
self.client.add_job('toProcess_res', json.dumps(tweet_brut), timeout=100)
self.client.ack_job(job_id)
Algo.stock_ML(job_id.decode('utf-8'), text, result_job['positivity'])
else:
tweet_brut['original'] = tweet_brut['retweeted_status']['id_str']
tweet_brut['positivity'] = 0.0
tweet_brut['average'] = average
tweet_brut['top_word'] = ''
self.client.add_job('toProcess_res', json.dumps(tweet_brut), timeout=100)
self.client.ack_job(job_id)
def stock_ML(id, tweet, positivity):
# enregistrement en base du tweet
tweets = db.tweetml
print('>>> id:', id, '>>> tweet:', tweet, '>>> positivity:', float(positivity))
if tweets.find({ 'idTweet': id }).count() == 0:
tweets.insert({
'idTweet': id,
'tweet': tweet,
'value': float(positivity)
})
else:
tweets.update({ 'idTweet': id }, { '$set': { 'value': float(positivity) }})
| null | Sentiment_Analysis.py | Sentiment_Analysis.py | py | 2,243 | python | en | code | null | code-starcoder2 | 51 |
32890507 | """
@author: jpzxshi & zen
"""
import os
import time
import numpy as np
import torch
from .nn import LossNN
from .utils import timing, cross_entropy_loss
class Brain:
'''Runner based on torch.
'''
brain = None
@classmethod
def Init(cls, data, net, criterion, optimizer, lr, iterations, lbfgs_steps, path = None, batch_size=None,
batch_size_test=None, weight_decay=0, print_every=1000, save=False, callback=None, dtype='float', device='cpu'):
cls.brain = cls(data, net, criterion, optimizer, lr, weight_decay, iterations, lbfgs_steps, path, batch_size,
batch_size_test, print_every, save, callback, dtype, device)
@classmethod
def Run(cls):
cls.brain.run()
@classmethod
def Run_reservoir(cls):
cls.brain.run_reservoir()
@classmethod
def Restore(cls):
cls.brain.restore()
@classmethod
def Output(cls, data=True, best_model=True, loss_history=True, info=None, **kwargs):
cls.brain.output(data, best_model, loss_history, info, **kwargs)
@classmethod
def Loss_history(cls):
return cls.brain.loss_history
@classmethod
def Encounter_nan(cls):
return cls.brain.encounter_nan
@classmethod
def Best_model(cls):
return cls.brain.best_model
def __init__(self, data, net, criterion, optimizer, lr, weight_decay, iterations, lbfgs_steps, path, batch_size,
batch_size_test, print_every, save, callback, dtype, device):
self.data = data
self.net = net
self.criterion = criterion
self.optimizer = optimizer
self.lr = lr
self.weight_decay = weight_decay
self.iterations = iterations
self.lbfgs_steps = lbfgs_steps
self.path = path
self.batch_size = batch_size
self.batch_size_test = batch_size_test
self.print_every = print_every
self.save = save
self.callback = callback
self.dtype = dtype
self.device = device
self.loss_history = None
self.encounter_nan = False
self.best_model = None
self.__optimizer = None
self.__criterion = None
@timing
def run(self):
self.__init_brain()
print('Training...', flush=True)
loss_history = []
for i in range(self.iterations + 1):
X_train, y_train = self.data.get_batch(self.batch_size)
loss = self.__criterion(self.net(X_train), y_train)
if i % self.print_every == 0 or i == self.iterations:
X_test, y_test = self.data.get_batch_test(self.batch_size_test)
loss_test = self.__criterion(self.net(X_test), y_test)
# print('{:<9}Train loss: %.4e{:<25}Test loss: %.4e{:<25}'.format(i, loss.item(), loss_test.item()), flush=True)
print(' ADAM || It: %05d, Loss: %.4e, Test: %.4e' %
(i, loss.item(), loss_test.item()))
if torch.any(torch.isnan(loss)):
self.encounter_nan = True
print('Encountering nan, stop training', flush=True)
return None
if self.save:
if not os.path.exists('model'): os.mkdir('model')
if self.path == None:
torch.save(self.net, 'model/model{}.pkl'.format(i))
else:
if not os.path.isdir('model/'+self.path): os.makedirs('model/'+self.path)
torch.save(self.net, 'model/{}/model{}.pkl'.format(self.path, i))
if self.callback is not None:
output = self.callback(self.data, self.net)
loss_history.append([i, loss.item(), loss_test.item(), *output])
else:
loss_history.append([i, loss.item(), loss_test.item()])
if i < self.iterations:
self.__optimizer.zero_grad()
loss.backward()
self.__optimizer.step()
self.loss_history = np.array(loss_history)
# print('Done!', flush=True)
return self.loss_history
def restore(self):
if self.loss_history is not None and self.save == True:
best_loss_index = np.argmin(self.loss_history[:, 1])
iteration = int(self.loss_history[best_loss_index, 0])
loss_train = self.loss_history[best_loss_index, 1]
loss_test = self.loss_history[best_loss_index, 2]
print('BestADAM It: %05d, Loss: %.4e, Test: %.4e' %
(iteration, loss_train, loss_test))
if self.path == None:
self.best_model = torch.load('model/model{}.pkl'.format(iteration))
else:
self.best_model = torch.load('model/{}/model{}.pkl'.format(self.path,iteration))
else:
raise RuntimeError('restore before running or without saved models')
from torch.optim import LBFGS
optim = LBFGS(self.best_model.parameters(), history_size=100,
max_iter=self.lbfgs_steps,
tolerance_grad=1e-09, tolerance_change=1e-09,
line_search_fn="strong_wolfe")
self.it = 0
if self.lbfgs_steps != 0:
def closure():
if torch.is_grad_enabled():
optim.zero_grad()
X_train, y_train = self.data.get_batch(None)
X_test, y_test = self.data.get_batch_test(None)
loss = self.best_model.criterion(self.best_model(X_train), y_train)
loss_test = self.best_model.criterion(self.best_model(X_test), y_test)
it = self.it + 1
if it % self.print_every == 0 or it == self.lbfgs_steps:
print('L-BFGS|| It: %05d, Loss: %.4e, Test: %.4e' %
(it, loss.item(), loss_test.item()))
self.it = it
if loss.requires_grad:
loss.backward()
return loss
optim.step(closure)
print('Done!', flush=True)
return self.best_model
def output(self, data, best_model, loss_history, info, **kwargs):
if self.path is None:
path = './outputs/' + time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(time.time()))
else:
path = './outputs/' + self.path
if not os.path.isdir(path): os.makedirs(path)
if data:
def save_data(fname, data):
if isinstance(data, dict):
np.savez_compressed(path + '/' + fname, **data)
else:
np.save(path + '/' + fname, data)
save_data('X_train', self.data.X_train_np)
save_data('y_train', self.data.y_train_np)
save_data('X_test', self.data.X_test_np)
save_data('y_test', self.data.y_test_np)
if best_model:
torch.save(self.best_model, path + '/model_best.pkl')
if loss_history:
np.savetxt(path + '/loss.txt', self.loss_history)
if info is not None:
with open(path + '/info.txt', 'w') as f:
for key, arg in info.items():
f.write('{}: {}\n'.format(key, str(arg)))
for key, arg in kwargs.items():
np.savetxt(path + '/' + key + '.txt', arg)
def __init_brain(self):
self.loss_history = None
self.encounter_nan = False
self.best_model = None
self.data.device = self.device
self.data.dtype = self.dtype
self.net.device = self.device
self.net.dtype = self.dtype
self.__init_optimizer()
self.__init_criterion()
def __init_optimizer(self):
if self.optimizer == 'adam':
self.__optimizer = torch.optim.Adam(self.net.parameters(), lr=self.lr, weight_decay=self.weight_decay)
else:
raise NotImplementedError
def __init_criterion(self):
if isinstance(self.net, LossNN):
self.__criterion = self.net.criterion
if self.criterion is not None:
import warnings
warnings.warn('loss-oriented neural network has already implemented its loss function')
elif self.criterion == 'MSE':
self.__criterion = torch.nn.MSELoss()
elif self.criterion == 'CrossEntropy':
self.__criterion = cross_entropy_loss
else:
raise NotImplementedError
| null | learner/brain.py | brain.py | py | 8,618 | python | en | code | null | code-starcoder2 | 51 |
484345255 | import os
from myhttplib import Server, config
import socket
if __name__ == '__main__':
port = 80
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind(('0.0.0.0', port))
_socket.listen(1)
_socket.setblocking(0)
print('socket ok')
# CPU_LIMIT
for _ in range(1, int(config.CPU_LIMIT)):
pid = os.fork()
if pid == 0:
break
server = Server(port, _socket)
print('server created')
server.run() | null | main.py | main.py | py | 549 | python | en | code | null | code-starcoder2 | 51 |
236011216 | import json
from os import listdir
from os.path import isfile, join
from player import player
import csv
import pdb
def getFiles(folder):
return [f for f in listdir(folder) if isfile(join(folder, f))]
def jsonToList(path):
with open(path) as f:
t = f.read()
return json.loads(t)
def distinctValues(d,value):
s = set()
for d1 in d:
s.add(d1[value])
return s
def addAdvanced(players):
directory = 'nbaStats/advancedPlayer'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addAdvanced(d)
def addFourFactor(players):
directory = 'nbaStats/playerFourFactor'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addShotCharts(d)
def addMisc(players):
directory = 'nbaStats/playerMisc'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addMisc(d)
def addShotCharts(players):
directory = 'nbaStats/playerShot'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addShotCharts(d)
def addUsage(players):
directory = 'nbaStats/playerUsage'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addUsage(d)
def addBoxScore(players):
directory = 'nbaStats/playerBoxScore'
for path in getFiles(directory):
id = int(path[:-5])
d = jsonToList(directory + '/' + path)
players[id].addBoxScore(d)
def loadPlayers():
playerDict = jsonToList('nbaStats/player.txt')
players = {}
for d in playerDict:
p = player.Player(d)
players[p.id] = p
addBoxScore(players)
addAdvanced(players)
addFourFactor(players)
addMisc(players)
addShotCharts(players)
addUsage(players)
return players
def loadGames():
games = {}
gameList = jsonToList('nbaStats/game.txt')
for game in gameList:
id = game['id']
games[game['id']] = game
return games
def groupByPosition(players):
grouped = {}
junkPos = ['RP','TE','RP','D']
for player in players.values():
pos = player.getPosition()
if pos in junkPos:
continue
if pos in grouped:
grouped[pos][player.id] = player
else:
group = {}
group[player.id] = player
grouped[pos] = group
return grouped
def get_float_minutes(s):
vals = s.split(':')
minutes = float(vals[0])
seconds = float(vals[0])
return minutes + (seconds / 60)
def getAverage(d,key,seasons):
total = 0.0
count = 0
for val in d:
if val['season'] in seasons:
try:
if key == 'min':
total += get_float_minutes(val[key])
else:
total += float(val[key])
except ValueError:
total += 0
count += 1
if count > 0:
return total / float(count)
else:
return 0
boxScoreCategories = ['min','fgm','fga','fg3m','fg3a','ftm','fta','oreb','dreb',
'ast','blk','stl','to','pf','pts','plus_minus',]
def getBoxScoreAverages(player,seasons):
result = []
for category in boxScoreCategories:
result.append(getAverage(player.boxScore,category,seasons))
return result
advancedCategories = ['off_rating','def_rating','ast_pct','ast_tov','ast_ratio',
'oreb_pct','dreb_pct','treb_pct','tm_tov_pct','efg_pct',
'ts_pct','usg_pct','pace','pie']
def getAdvancedAverages(player,seasons):
result = []
for category in advancedCategories:
result.append(getAverage(player.advanced,category,seasons))
return result
shotChartCategories = ['minutes_remaining','seconds_remaining','event_type','action_type','shot_type',
'shot_distance','loc_x','loc_y','shot_attempted_flag','shot_made_flag',
'shot_zone_basic','shot_zone_area','shot_zone_range']
valuesDict = {}
def getDistinctValues(keys,l):
for v in l:
for k in keys:
if k in valuesDict:
valuesDict[k].add(v[k])
else:
s = set()
s.add(v[k])
valuesDict[k] = s
numericalShotCategories = ['shot_distance','shot_made_flag','loc_x','loc_y']
categoricalShotCategories = {
'shot_zone_area': ['Right Side(R)',
'Center(C)',
'Left Side(L)',
'Left Side Center(LC)',
'Back Court(BC)',
'Right Side Center(RC)'],
'action_type': ['Fadeaway Bank shot',
'Running Layup Shot',
'Jump Bank Hook Shot',
'Driving Floating Jump Shot',
'Step Back Jump shot',
'Slam Dunk Shot',
'Tip Layup Shot',
'Driving Dunk Shot',
'Hook Shot',
'Running Alley Oop Layup Shot',
'Running Tip Shot',
'Running Pull-Up Jump Shot',
'Fadeaway Jump Shot',
'Putback Dunk Shot',
'Tip Dunk Shot',
'Running Reverse Layup Shot',
'Running Finger Roll Layup Shot',
'Putback Layup Shot',
'Step Back Bank Jump Shot',
'No Shot',
'Turnaround Jump Shot',
'Floating Jump shot',
'Jump Shot',
'Layup Shot',
'Reverse Layup Shot',
'Turnaround Fadeaway shot',
'Hook Bank Shot',
'Driving Jump shot',
'Cutting Dunk Shot',
'Driving Hook Shot',
'Running Alley Oop Dunk Shot',
'Alley Oop Layup shot',
'Turnaround Bank Hook Shot',
'Jump Bank Shot',
'Reverse Dunk Shot',
'Driving Floating Bank Jump Shot',
'Turnaround Fadeaway Bank Jump Shot',
'Running Dunk Shot',
'Tip Shot',
'Running Jump Shot',
'Running Hook Shot',
'Putback Slam Dunk Shot',
'Driving Bank Hook Shot',
'Turnaround Hook Shot',
'Cutting Finger Roll Layup Shot',
'Running Bank shot',
'Pullup Jump shot',
'Alley Oop Dunk Shot',
'Reverse Slam Dunk Shot',
'Running Slam Dunk Shot',
'Driving Bank shot',
'Driving Slam Dunk Shot',
'Running Reverse Dunk Shot',
'Pullup Bank shot',
'Jump Hook Shot',
'Driving Reverse Dunk Shot',
'Driving Finger Roll Layup Shot',
'Dunk Shot',
'Turnaround Bank shot',
'Driving Layup Shot',
'Cutting Layup Shot',
'Driving Reverse Layup Shot',
'Running Bank Hook Shot',
'Finger Roll Layup Shot'],
'shot_zone_range': ['16-24 ft.',
'Back Court Shot',
'24+ ft.',
'Less Than 8 ft.',
'8-16 ft.'],
'shot_zone_basic': ['Left Corner 3',
'Right Corner 3',
'Mid-Range',
'In The Paint (Non-RA)',
'Restricted Area',
'Backcourt',
'Above the Break 3'],
'event_type': ['Missed Shot',
'Made Shot'],
'shot_type': ['3PT Field Goal',
'2PT Field Goal']
}
def getCategoricalShotCategories():
result = []
for category in categoricalShotCategories:
for c_type in categoricalShotCategories[category]:
result.append('{}-{}'.format(category,c_type))
return result
def getShotChartAverages(player,seasons):
result = []
for category in numericalShotCategories:
result.append(getAverage(player.shotCharts,category,seasons))
player_category_stats = {}
for category in categoricalShotCategories:
category_dict = {}
for c_type in categoricalShotCategories[category]:
category_dict[c_type] = 0
player_category_stats[category] = category_dict
for shot_chart in player.shotCharts:
for category in categoricalShotCategories:
c_type = shot_chart[category]
player_category_stats[category][c_type] += 1
shot_chart_count = float(len(player.shotCharts))
for category in player_category_stats:
category_dict = player_category_stats[category]
for c_type in category_dict:
val = category_dict[c_type]
if shot_chart_count > 0:
result.append(float(val) / shot_chart_count)
else:
result.append(0)
#getDistinctValues(shotChartCategories,player.shotCharts)
return result
def getTotalGamesPlayed(player, seasons):
games = set()
for val in player.boxScore:
if val['season'] in seasons:
games.add(val['game_id'])
return len(games)
def getAveStats(players,games,seasons):
result = []
for player in players.values():
playerStats = [player.id,player.name,player.getPosition()]
playerStats.append(getTotalGamesPlayed(player,seasons))
playerStats.extend(getBoxScoreAverages(player,seasons))
playerStats.extend(getAdvancedAverages(player,seasons))
playerStats.extend(getShotChartAverages(player,seasons))
result.append(playerStats)
return result
def getOutputName(seasons):
result = 'averages/'
result += seasons[0]
if len(seasons) > 1:
result += '-' + seasons[len(seasons)-1]
result += '.csv'
return result
def clean_row(row):
row[1] = row[1].replace(',','')
return row
def outputStatsCsv(allPlayers,games,seasons):
outputName = getOutputName(seasons)
aveStats = getAveStats(allPlayers,games,seasons)
header = ['id','name','position','recorded_games']
header.extend(boxScoreCategories)
header.extend(advancedCategories)
header.extend(numericalShotCategories)
header.extend(getCategoricalShotCategories())
for i in range(0,len(header)):
print('#{} : {}'.format(i-3,header[i]))
with open(outputName,'w',newline='') as f:
writer = csv.writer(f,delimiter=',')
writer.writerow(header)
for item in aveStats:
row = clean_row(item)
if row[3] >= 20 and row[4] >= 10 and row[2] != 'None' and row[2] != 'Junk':
writer.writerow(row)
def main():
games = loadGames()
allPlayers = loadPlayers()
for player in allPlayers.values():
getShotChartAverages(player,['2010','2011','2012','2013','2014','2015','2016'])
with open('shotChartValues.txt','w',newline='') as f:
for v in valuesDict:
f.write(v + ': ' + str(valuesDict[v]) + '\n')
print('done')
outputStatsCsv(allPlayers,games,['2010'])
outputStatsCsv(allPlayers,games,['2011'])
outputStatsCsv(allPlayers,games,['2012'])
outputStatsCsv(allPlayers,games,['2013'])
outputStatsCsv(allPlayers,games,['2014'])
outputStatsCsv(allPlayers,games,['2015'])
outputStatsCsv(allPlayers,games,['2016'])
outputStatsCsv(allPlayers,games,['2010','2011','2012','2013','2014','2015','2016'])
#grouped_by_position = groupByPosition(allPlayers)
#for position in grouped_by_position:
# print(position + ': ' + str(len(grouped_by_position[position])))
if __name__ == '__main__':
main()
| null | DataMiningProject/organizeNBAdata.py | organizeNBAdata.py | py | 12,231 | python | en | code | null | code-starcoder2 | 51 |
141092386 | from django.shortcuts import render
from django.http import HttpResponse, HttpRequest
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import cx_Oracle
import os
from django.views import generic
from django.db import connections
def get_roam_countries(request, msisdn):
cursor = connections['ppcdb_pooled'].cursor()
cur = cursor.connection.cursor()
i_subs_id=None
i_int_request_id=1
i_client_app_type="MyTcell_Lite_Web"
i_client_app_version="v1"
o_st_id = cur.var(cx_Oracle.NUMBER)
o_roam_countries = cursor.connection.cursor()
o_exit_location_id = cur.var(cx_Oracle.STRING)
o_responce_id = cur.var(cx_Oracle.NUMBER)
o_result = cur.var(cx_Oracle.NUMBER)
o_err_msg = cur.var(cx_Oracle.STRING)
cur.callproc('mytcell_lite_pack.get_roam_countries', (
i_subs_id, msisdn, i_int_request_id, i_client_app_type, i_client_app_version, o_st_id,
o_roam_countries, o_exit_location_id, o_responce_id, o_result, o_err_msg))
columns = [i[0] for i in o_roam_countries.description]
countries={}
subs_type={}
countries['list']=[dict(zip(columns, row)) for row in o_roam_countries]
subs_type['subs_type']=o_st_id.getvalue()
countries['subs_info']=[subs_type]
countries['results']=[{
'o_exit_location_id' : o_exit_location_id.getvalue(),
'o_responce_id' : int(o_responce_id.getvalue()),
'err_code' : int(o_result.getvalue()),
'err_msg' : o_err_msg.getvalue()
}]
cur.close()
cursor.close()
return render(request, "mytcell_lite_app/roaming.html", context=countries)
| null | mytcell_lite_app/roam_countries_view_pooled.py | roam_countries_view_pooled.py | py | 1,715 | python | en | code | null | code-starcoder2 | 51 |
346248322 |
import matplotlib.pyplot as plt
if __name__ == "__main__":
p=10000
r=0.000120008
dt=21 #تغير بالزمن
pt=[]
t=[]
for i in range(int(10000/dt)):
g= p*r # نسبة التناقص بالسنة
p=p-(g*dt)
t.append(i*dt)
pt.append(p)
print(p)
plt.plot(t,pt)
plt.show()
| null | lab2.py | lab2.py | py | 348 | python | en | code | null | code-starcoder2 | 51 |
276959929 | # uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04)
# [GCC 8.4.0]
# Embedded file name: /home/hanzz/releases/odcs/server/odcs/server/events.py
# Compiled at: 2018-06-04 03:42:23
from threading import Lock
from sqlalchemy.orm import attributes
from logging import getLogger
log = getLogger()
_cache_lock = Lock()
_cached_composes = {}
def cache_composes_if_state_changed(session, flush_context):
"""Prepare outgoing messages when compose state is changed"""
from odcs.server.models import Compose
composes = (item for item in session.new | session.dirty if isinstance(item, Compose))
composes_state_changed = (compose for compose in composes if not attributes.get_history(compose, 'state').unchanged)
with _cache_lock:
for comp in composes_state_changed:
if comp.id not in _cached_composes:
_cached_composes[comp.id] = []
_cached_composes[comp.id].append(comp.json())
log.debug('Cached composes to be sent due to state changed: %s', _cached_composes.keys())
def start_to_publish_messages(session):
"""Publish messages after data is committed to database successfully"""
import odcs.server.messaging as messaging
with _cache_lock:
msgs = []
for compose_jsons in _cached_composes.values():
for compose_json in compose_jsons:
msgs.append({'event': 'state-changed',
'compose': compose_json})
log.debug('Sending messages: %s', msgs)
if msgs:
try:
messaging.publish(msgs)
except Exception:
log.exception('Cannot publish message to bus.')
_cached_composes.clear() | null | pycfiles/odcs-0.2.46.tar/events.py | events.py | py | 1,760 | python | en | code | null | code-starcoder2 | 51 |
84620143 | '''
Created on 2018. m�j. 29.
@author: H342541
'''
import geoplotlib
import pandas as pd
csv = pd.read_csv('../resource/summary_GoogleV3.csv', sep=';')
#csv = pd.read_csv('../resource/summary.csv', sep=';')
csvselect = csv[['Story_ID', 'Episode', 'lat', 'lon']]
csvgood = csvselect.dropna().reset_index(drop=True)
geoplotlib.dot(csvgood)
geoplotlib.labels(csvgood, 'Episode', font_size=10, anchor_x='center')
geoplotlib.show()
| null | VizTest/src/geoplotlib_plotting.py | geoplotlib_plotting.py | py | 432 | python | en | code | null | code-starcoder2 | 51 |
263208613 | """
LightStep's implementations of the basictracer Recorder API.
https://github.com/opentracing/basictracer-python
See the API definition for comments.
"""
from socket import error as socket_error
import atexit
import contextlib
import jsonpickle
import logging
import pprint
import ssl
import sys
import threading
import time
import warnings
from thrift import Thrift
from basictracer.recorder import SpanRecorder
from .crouton import ttypes
from . import constants, version as cruntime_version, util, connection as conn
class Recorder(SpanRecorder):
"""Recorder records and reports a BasicSpan to LightStep."""
def __init__(self, **kwargs):
self.runtime = Runtime(**kwargs)
def record_span(self, span):
"""Per BasicSpan.record_span"""
self.runtime._add_span(span)
def flush(self):
"""Force a flush of buffered Span data to LightStep"""
self.runtime.flush()
class LoggingRecorder(SpanRecorder):
"""LoggingRecorder prints all spans to stdout."""
def __init__(self, *args, **kwargs):
self._runtime_guid = util._generate_guid()
def record_span(self, span):
"""Per BasicSpan.record_span"""
logs = []
for log in span.logs:
event = ""
if len(log.key_values["event"]) > 0:
# Don't allow for arbitrarily long log messages.
if sys.getsizeof(log.key_values["event"]) > constants.MAX_LOG_MEMORY:
event = log.key_values["event"][:constants.MAX_LOG_LEN]
else:
event = log.key_values["event"]
logs.append(ttypes.LogRecord(
timestamp_micros=long(util._time_to_micros(log.timestamp)),
stable_name=event,
payload_json=log.key_values["payload"]))
logging.info(
'Reporting span %s \n with logs %s',
self._pretty_span(span),
self._pretty_logs(logs))
def flush(self):
"""A noop for LoggingRecorder"""
return
def _pretty_span(self, span):
"""A helper to format a span for console logging"""
span = {
'trace_guid': span.context.trace_id,
'span_guid': span.context.span_id,
'runtime_guid': util._id_to_hex(self._runtime_guid),
'span_name': span.operation_name,
'oldest_micros': span.start_time,
'youngest_micros': util._now_micros(),
}
return ''.join(['\n ' + attr + ": " + str(span[attr]) for attr in span])
def _pretty_logs(self, logs):
"""A helper to format logs for console logging"""
return ''.join(['\n ' + pprint.pformat(log) for log in logs])
class Runtime(object):
"""Instances of Runtime send spans to the LightStep collector.
:param str group_name: name identifying the type of service that is being
tracked
:param str access_token: project's access token
:param bool secure: whether HTTP connection is secure
:param str service_host: Service host name
:param int service_port: Service port number
:param int max_span_records: Maximum number of spans records to buffer
:param bool certificate_verification: if False, will ignore SSL
certification verification (in ALL HTTPS calls, not just in this
library) for the lifetime of this process; intended for debugging
purposes only
"""
def __init__(self,
group_name=None,
access_token='',
secure=True,
service_host="collector.lightstep.com",
service_port=443,
max_span_records=constants.DEFAULT_MAX_SPAN_RECORDS,
certificate_verification=True,
periodic_flush_seconds=constants.FLUSH_PERIOD_SECS):
# Fail fast on a bad access token
if isinstance(access_token, basestring) == False:
raise Exception('access_token must be a string')
if certificate_verification is False:
warnings.warn('SSL CERTIFICATE VERIFICATION turned off. ALL FUTURE HTTPS calls will be unverified.')
ssl._create_default_https_context = ssl._create_unverified_context
if group_name is None:
group_name = sys.argv[0]
# Thrift runtime configuration
self.guid = util._generate_guid()
timestamp = util._now_micros()
version = '.'.join(map(str, sys.version_info[0:3]))
attrs = [
ttypes.KeyValue("cruntime_platform", "python"),
ttypes.KeyValue("cruntime_version", cruntime_version.CRUNTIME_VERSION),
ttypes.KeyValue("python_version", version),
]
# Thrift is picky about the types being correct, so we're explicit here
self._runtime = ttypes.Runtime(
util._id_to_hex(self.guid),
long(timestamp),
str(group_name),
attrs)
self._service_url = util._service_url_from_hostport(secure,
service_host,
service_port)
self._auth = ttypes.Auth(access_token)
self._mutex = threading.Lock()
self._span_records = []
self._max_span_records = max_span_records
self._disabled_runtime = False
atexit.register(self.shutdown)
self._periodic_flush_seconds = periodic_flush_seconds
if self._periodic_flush_seconds <= 0:
warnings.warn(
'Runtime(periodic_flush_seconds={0}) means we will never flush to lightstep unless explicitly requested.'.format(
self._periodic_flush_seconds))
self._flush_connection = None
else:
self._flush_connection = conn._Connection(self._service_url)
self._flush_connection.open()
self._flush_thread = threading.Thread(target=self._flush_periodically,
name=constants.FLUSH_THREAD_NAME)
self._flush_thread.daemon = True
self._flush_thread.start()
def shutdown(self, flush=True):
"""Shutdown the Runtime's connection by (optionally) flushing the
remaining logs and spans and then disabling the Runtime.
Note: spans and logs will no longer be reported after shutdown is called.
Returns whether the data was successfully flushed.
"""
# Closing connection twice results in an error. Exit early
# if runtime has already been disabled.
if self._disabled_runtime:
return False
if flush:
flushed = self.flush()
if self._flush_connection:
self._flush_connection.close()
self._disabled_runtime = True
return flushed
def flush(self, connection=None):
"""Immediately send unreported data to the server.
Calling flush() will ensure that any current unreported data will be
immediately sent to the host server.
If connection is not specified, the report will sent to the server
passed in to __init__. Note that custom connections are currently used
for unit testing against a mocked connection.
Returns whether the data was successfully flushed.
"""
if self._disabled_runtime:
return False
if connection is not None:
return self._flush_worker(connection)
return self._flush_worker(self._flush_connection)
def _flush_periodically(self):
"""Periodically send reports to the server.
Runs in a dedicated daemon thread (self._flush_thread).
"""
# Open the connection
while not self._disabled_runtime and not self._flush_connection.ready:
time.sleep(self._periodic_flush_seconds)
self._flush_connection.open()
# Send data until we get disabled
while not self._disabled_runtime:
self._flush_worker(self._flush_connection)
time.sleep(self._periodic_flush_seconds)
def _flush_worker(self, connection):
"""Use the given connection to transmit the current logs and spans as a
report request."""
if connection == None:
return False
# If the connection is not ready, try reestablishing it. If that
# fails just wait until the next flush attempt to try again.
if not connection.ready:
connection.open()
if not connection.ready:
return False
report_request = self._construct_report_request()
try:
resp = connection.report(self._auth, report_request)
# The resp may be None on failed reports
if resp is not None:
if resp.commands is not None:
for command in resp.commands:
if command.disable:
self.shutdown(flush=False)
# Return whether we sent any span data
return len(report_request.span_records) > 0
except Exception:
self._restore_spans(report_request.span_records)
return False
def _construct_report_request(self):
"""Construct a report request."""
report = None
with self._mutex:
report = ttypes.ReportRequest(self._runtime, self._span_records,
None)
self._span_records = []
for span in report.span_records:
for log in span.log_records:
index = span.log_records.index(log)
if log.payload_json is not None:
try:
log.payload_json = \
jsonpickle.encode(log.payload_json,
unpicklable=False,
make_refs=False,
max_depth=constants.JSON_MAX_DEPTH)
except:
log.payload_json = jsonpickle.encode(constants.JSON_FAIL)
span.log_records[index] = log
return report
def _add_span(self, span):
"""Safely add a span to the buffer.
Will delete a previously-added span if the limit has been reached.
"""
if self._disabled_runtime:
return
# Checking the len() here *could* result in a span getting dropped that
# might have fit if a report started before the append(). This would only
# happen if the client lib was being saturated anyway (and likely
# dropping spans). But on the plus side, having the check here avoids
# doing a span conversion when the span will just be dropped while also
# keeping the lock scope minimized.
with self._mutex:
if len(self._span_records) >= self._max_span_records:
return
span_record = ttypes.SpanRecord(
trace_guid=util._id_to_hex(span.context.trace_id),
span_guid=util._id_to_hex(span.context.span_id),
runtime_guid=util._id_to_hex(span._tracer.recorder.runtime.guid),
span_name=str(span.operation_name),
join_ids=[],
oldest_micros=long(util._time_to_micros(span.start_time)),
youngest_micros=long(util._time_to_micros(span.start_time + span.duration)),
attributes=[],
log_records=[]
)
if span.parent_id != None:
span_record.attributes.append(
ttypes.KeyValue(
constants.PARENT_SPAN_GUID,
util._id_to_hex(span.parent_id)))
if span.tags:
for key in span.tags:
if key[:len(constants.JOIN_ID_TAG_PREFIX)] == constants.JOIN_ID_TAG_PREFIX:
span_record.join_ids.append(ttypes.TraceJoinId(key, span.tags[key]))
else:
span_record.attributes.append(ttypes.KeyValue(key, span.tags[key]))
for log in span.logs:
event = ""
if len(log.key_values["event"]) > 0:
# Don't allow for arbitrarily long log messages.
if sys.getsizeof(log.key_values["event"]) > constants.MAX_LOG_MEMORY:
event = log.key_values["event"][:constants.MAX_LOG_LEN]
else:
event = log.key_values["event"]
span_record.log_records.append(ttypes.LogRecord(
timestamp_micros=long(util._time_to_micros(log.timestamp)),
stable_name=event,
payload_json=log.key_values["payload"]))
with self._mutex:
if len(self._span_records) < self._max_span_records:
self._span_records.append(span_record)
def _restore_spans(self, span_records):
"""Called after a flush error to move records back into the buffer
"""
if self._disabled_runtime:
return
with self._mutex:
if len(self._span_records) >= self._max_span_records:
return
combined = span_records + self._span_records
self._span_records = combined[-self._max_span_records:]
| null | lightstep/recorder.py | recorder.py | py | 13,381 | python | en | code | null | code-starcoder2 | 51 |
161931935 | # Databricks notebook source
# COMMAND ----------
from datetime import datetime
import pytz
from pytz import timezone
mnt = "/mnt/entadls"
DLLocation = mnt+"/curated/internal/product/rgis/"
DLLocationArchive = mnt+"/curated/internal/product/rgis/archive/"
fileName = "RGISProductDetails.csv"
todaydate = datetime.now(tz=pytz.utc).astimezone(timezone('US/Pacific')).strftime('%Y-%m-%d')
NewFileName = "RGISProductDetails_"+todaydate+".csv"
# FileLocation = "wasbs://"+STORAGE_CONTAINER+"@"+STORAGE_ACCOUNT_ENV
# containerfiles = dbutils.fs.ls(FileLocation)
DLFiles = dbutils.fs.ls(DLLocation)
for file in DLFiles:
if fileName in file.name:
print(f'File {fileName} exists')
break
#Archive the existing file in DL
# dbutils.fs.mv(DLLocation+fileName, DLLocationArchive+NewFileName)
# dbutils.fs.cp(file.path, DLLocation)
# COMMAND ----------
| null | C1-SIT3/mplk_automation/rgis/RGISProductDetails.py | RGISProductDetails.py | py | 872 | python | en | code | null | code-starcoder2 | 50 |
453141117 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\routing\route_events\route_event_context.py
# Compiled at: 2020-10-06 22:03:05
# Size of source mod 2**32: 41496 bytes
from _collections import defaultdict
from random import shuffle, random, uniform
import operator, weakref
from event_testing.resolver import SingleSimResolver, SingleObjectResolver
from routing import PathNodeTransition
from routing.portals.portal_tuning import PortalType
from routing.route_enums import RouteEventType
from routing.route_events.route_event_mixins import RouteEventBase
from routing.route_events.route_event_utils import RouteEventSchedulePreference
import gsi_handlers, objects, sims4.log, sims4.math
logger = sims4.log.Logger('RouteEvents', default_owner='rmccord')
DURATION_SCALING_FOR_FAKE_STRAIGHT_ROUTE_EVENT = 0.25
class _RouteDurationBucket:
def __init__(self, start_time, end_time):
self.start_time = start_time
self.end_time = end_time
def __repr__(self):
return '<{} - {}>'.format(self.start_time, self.end_time)
def __eq__(self, other):
return self.start_time == other.start_time and self.end_time == other.end_time
def __contains__(self, time):
return self.start_time <= time <= self.end_time
@property
def duration(self):
return self.end_time - self.start_time
class _RouteDuration:
MINIMUM_BUCKET_SIZE = 0.1
def __init__(self, start_time, end_time):
self.start_time = start_time
self.end_time = end_time
self.buckets = [_RouteDurationBucket(start_time, end_time)]
def add_event(self, route_event, time):
end_time = time + route_event.duration
bucket = None
for idx, bucket in enumerate(self.buckets):
if time in bucket:
break
else:
return
next_bucket = _RouteDurationBucket(end_time, bucket.end_time)
bucket.end_time = time
kept = 1
if bucket.start_time >= bucket.end_time:
kept = 0
self.buckets.remove(bucket)
if next_bucket.start_time < next_bucket.end_time:
self.buckets.insert(idx + kept, next_bucket)
def fill_with_route_events(self, route_event_timing):
num_filled = 0
if self.buckets:
for route_event, time in route_event_timing:
if time < self.buckets[0].start_time:
continue
self.add_event(route_event, time)
num_filled += 1
return num_filled
def _get_start_time_for_straight_path_event(self, path, bucket, duration, offset_time, straight_duration, earliest_time, schedule_preference, specific_time=None):
adjusted_bucket_start = max(bucket.start_time, earliest_time)
start_index = max(0, path.node_at_time(bucket.start_time).index - 1)
end_index = max(start_index, path.node_at_time(bucket.end_time).index)
indices = list(range(start_index + 1, end_index + 1))
if schedule_preference == RouteEventSchedulePreference.RANDOM:
shuffle(indices)
for index in indices:
cur_node = path.nodes[index]
prev_node = path.nodes[(index - 1)]
start_time = max(adjusted_bucket_start, prev_node.time)
end_time = min(bucket.end_time, cur_node.time)
segment_time = end_time - start_time
if segment_time < straight_duration:
continue
else:
straight_path_earliest_start = start_time
if start_time - offset_time < adjusted_bucket_start:
straight_path_earliest_start = adjusted_bucket_start + offset_time
else:
straight_path_latest_start = end_time - straight_duration
duration_after_straight_path_start = duration - offset_time
if straight_path_latest_start + duration_after_straight_path_start > bucket.end_time:
straight_path_latest_start = bucket.end_time - duration_after_straight_path_start
if straight_path_latest_start < straight_path_earliest_start:
continue
if specific_time is not None:
specific_straight_path_start = specific_time + offset_time
if specific_straight_path_start >= straight_path_earliest_start and specific_straight_path_start <= straight_path_latest_start:
return specific_time
continue
elif schedule_preference == RouteEventSchedulePreference.RANDOM:
straight_path_start = uniform(straight_path_earliest_start, straight_path_latest_start)
else:
straight_path_start = straight_path_earliest_start
return straight_path_start - offset_time
def fill_and_get_start_time_for_route_event(self, route_event, path, repeat_event=False, schedule_preference=RouteEventSchedulePreference.BEGINNING):
if not route_event.event_data.is_valid_for_scheduling(path.sim, path):
return
duration = route_event.duration
time = route_event.time
earliest_time = route_event.earliest_repeat_time if repeat_event else 0
if not repeat_event:
if route_event.scheduling_override is not None:
schedule_preference = route_event.scheduling_override
if schedule_preference == RouteEventSchedulePreference.BEGINNING:
buckets = sorted((self.buckets), key=(operator.attrgetter('start_time')))
else:
if schedule_preference == RouteEventSchedulePreference.END:
buckets = sorted((self.buckets), key=(operator.attrgetter('start_time')), reverse=True)
else:
if schedule_preference == RouteEventSchedulePreference.RANDOM:
buckets = list(self.buckets)
shuffle(buckets)
else:
straight_path_tuning = route_event.prefer_straight_paths
if straight_path_tuning is not None:
straight_percentage = straight_path_tuning.straight_path_percentage
straight_duration = duration * straight_percentage
if straight_path_tuning.straight_path_offset is not None:
offset_time = duration * route_event.prefer_straight_paths.straight_path_offset
else:
offset_time = duration * (0.5 - straight_percentage * 0.5)
for idx, bucket in enumerate(buckets):
if bucket.duration < duration:
continue
if bucket.end_time - duration < earliest_time:
continue
if time is not None:
if bucket.start_time > time or bucket.end_time - duration < time:
continue
if straight_path_tuning is None:
if time is not None:
break
if schedule_preference == RouteEventSchedulePreference.BEGINNING:
time = max(bucket.start_time, earliest_time)
else:
if schedule_preference == RouteEventSchedulePreference.END:
time = bucket.end_time - duration
else:
start_time = max(bucket.start_time, earliest_time)
time = uniform(start_time, bucket.end_time - duration)
break
else:
time = self._get_start_time_for_straight_path_event(path, bucket, duration, offset_time, straight_duration, earliest_time, schedule_preference, specific_time=time)
if time is not None:
break
else:
return
end_time = time + duration
if time - bucket.start_time < _RouteDuration.MINIMUM_BUCKET_SIZE:
bucket.start_time = end_time
else:
if bucket.end_time - end_time < _RouteDuration.MINIMUM_BUCKET_SIZE:
bucket.end_time = time
else:
next_bucket = _RouteDurationBucket(end_time, bucket.end_time)
bucket.end_time = time
self.buckets.insert(idx + 1, next_bucket)
return time
class RouteEventContext:
ROUTE_TRIM_START = 0.25
ROUTE_TRIM_END = 0
ROUTE_TRIM_DURATION = ROUTE_TRIM_START + ROUTE_TRIM_END
ROUTE_EVENT_SCHEDULED_CAP = 50
ROUTE_EVENT_CAPPED_COOLDOWN_THRESHOLD = 25
class _RouteEventSchedulingData(RouteEventBase):
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self.prefer_straight_paths = False
self.scheduling_override = None
self.earliest_repeat_time = 0
def copy_from(self, other):
super().copy_from(other)
self.scheduling_override = other.scheduling_override
self.prefer_straight_paths = other.prefer_straight_paths
def __init__(self):
self._route_events_to_schedule = defaultdict(list)
self._scheduled_events = []
self._events_already_considered = defaultdict(set)
self._has_hit_cap = False
def has_pending_events_to_process(self):
return any((not route_event.processed for route_event, _ in self._scheduled_events))
def add_route_event(self, route_event_type, route_event):
logger.debug('ADDED: {}', route_event)
self._route_events_to_schedule[route_event_type].append(route_event)
def remove_route_event(self, route_event, time):
self._scheduled_events.remove((route_event, time))
def clear_route_events(self):
self._route_events_to_schedule.clear()
self._scheduled_events.clear()
self._events_already_considered.clear()
self._has_hit_cap = False
def has_scheduled_events(self):
if self._scheduled_events:
return True
return False
def handle_route_event_executed(self, event_id, actor, path=None):
for route_event, time in self._scheduled_events:
if route_event.id == event_id:
route_event.on_executed(actor, path=path)
break
else:
return False
if path is not None:
if gsi_handlers.route_event_handlers.archiver.enabled or gsi_handlers.routing_handlers.sim_route_archiver.enabled:
gsi_handlers.route_event_handlers.gsi_route_event_executed(path, actor, route_event)
if route_event.event_data.should_remove_on_execute():
self.remove_route_event(route_event, time)
return True
def handle_route_event_skipped(self, event_id, actor, path=None):
for route_event, time in self._scheduled_events:
if route_event.id == event_id:
self.remove_route_event(route_event, time)
return True
return False
def remove_route_event_by_data(self, event_data):
for route_event, time in self._scheduled_events:
if route_event.event_data is event_data:
self.remove_route_event(route_event, time)
return
def route_event_already_scheduled(self, route_event_cls, time=None, provider=None, epsilon_override=None):
for route_event, event_time in self._scheduled_events:
if route_event_cls is type(route_event) and route_event.provider is provider:
epsilon = epsilon_override if epsilon_override is not None else route_event.duration
if time is not None:
if not sims4.math.almost_equal(time, event_time, epsilon=epsilon):
continue
return True
return False
def route_event_of_data_type_gen(self, route_event_data_cls):
for route_event, _ in self._scheduled_events:
if route_event_data_cls is type(route_event.event_data):
yield route_event
def route_event_already_fully_considered(self, route_event_cls, provider):
provider_ref = weakref.ref(provider)
if provider_ref not in self._events_already_considered:
return False
return route_event_cls in self._events_already_considered[provider_ref]
def prune_stale_events_and_get_failed_types(self, actor, path, current_time):
self._route_events_to_schedule.clear()
failed_events = []
failed_event_types = set()
for route_event, time in self._scheduled_events:
if not type(route_event) in failed_event_types:
route_event.test((route_event.get_resolver(actor)), from_update=True) or failed_events.append((route_event, time))
failed_event_types.add(type(route_event))
gsi_path_log = None
if gsi_handlers.route_event_handlers.archiver.enabled:
gsi_path_log = gsi_handlers.route_event_handlers.get_path_route_events_log(path)
for route_event, time in failed_events:
if gsi_path_log is not None:
gsi_event_data = {'status': 'Removed'}
gsi_handlers.route_event_handlers.gsi_fill_route_event_data(route_event, gsi_path_log, gsi_event_data)
self.remove_route_event(route_event, time)
return (failed_events, failed_event_types)
def _test_gathered_events_for_chance(self, actor):
if actor.is_sim:
resolver = SingleSimResolver(actor.sim_info)
else:
resolver = SingleObjectResolver(actor)
for route_event_type, route_events in self._route_events_to_schedule.items():
for route_event in tuple(route_events):
self._events_already_considered[route_event.provider_ref].add(type(route_event))
if route_event_type != RouteEventType.LOW_REPEAT and random() > route_event.chance.get_chance(resolver):
route_events.remove(route_event)
def schedule_route_events(self, actor, path, failed_event_types=None, start_time=0):
total_duration = path.duration()
if total_duration <= RouteEventContext.ROUTE_TRIM_DURATION:
return
else:
num_route_events = 0
added_events = []
start_time = start_time + RouteEventContext.ROUTE_TRIM_START
end_time = total_duration - RouteEventContext.ROUTE_TRIM_END
time_buckets = _RouteDuration(start_time, end_time)
num_route_events += time_buckets.fill_with_route_events(self._scheduled_events)
self._test_gathered_events_for_chance(actor)
def _schedule_route_events(route_event_priority, schedule_preference=RouteEventSchedulePreference.BEGINNING):
nonlocal num_route_events
for route_event in self._route_events_to_schedule[route_event_priority]:
if failed_event_types is not None:
if type(route_event) in failed_event_types:
continue
route_event.prepare_route_event(actor)
time = time_buckets.fill_and_get_start_time_for_route_event(route_event, path=path, schedule_preference=schedule_preference)
if time is not None:
route_event.time = time
added_events.append((route_event, time))
num_route_events += 1
def _add_path_based_route_events(route_event_scheduling_type, transition_type):
route_events = self._route_events_to_schedule[route_event_scheduling_type]
if not route_events:
return
if len(route_events) > 1:
logger.warn('Got multiple path-based route events of the same type {}. This is not supported', route_event_scheduling_type)
origin_route_event = route_events[0]
route_event_type = type(origin_route_event)
self._route_events_to_schedule[route_event_scheduling_type].clear()
for node in path.get_transition_tagged_nodes_gen(transition_type):
route_event = route_event_type(time=node.time, **origin_route_event.route_event_parameters)
self._route_events_to_schedule[route_event_scheduling_type].append(route_event)
if actor.is_sim:
resolver = SingleSimResolver(actor.sim_info)
else:
resolver = SingleObjectResolver(actor)
for route_event_type in reversed(RouteEventType):
if route_event_type == RouteEventType.FIRST_INDOOR:
_add_path_based_route_events(route_event_type, PathNodeTransition.PATH_NODE_TRANSITION_FIRST_INDOOR)
_schedule_route_events(route_event_type)
elif route_event_type == RouteEventType.LAST_INDOOR:
_add_path_based_route_events(route_event_type, PathNodeTransition.PATH_NODE_TRANSITION_LAST_INDOOR)
_schedule_route_events(route_event_type)
elif route_event_type == RouteEventType.FIRST_OUTDOOR:
_schedule_route_events(route_event_type)
elif route_event_type == RouteEventType.LAST_OUTDOOR:
_schedule_route_events(route_event_type)
elif route_event_type == RouteEventType.INTERACTION_PRE:
_schedule_route_events(route_event_type, schedule_preference=(RouteEventSchedulePreference.END))
elif route_event_type == RouteEventType.INTERACTION_POST:
_schedule_route_events(route_event_type)
elif route_event_type == RouteEventType.BROADCASTER:
_schedule_route_events(route_event_type)
else:
if route_event_type == RouteEventType.LOW_SINGLE or route_event_type == RouteEventType.HIGH_SINGLE:
shuffle(self._route_events_to_schedule[route_event_type])
_schedule_route_events(route_event_type, schedule_preference=(RouteEventSchedulePreference.RANDOM))
portal_events = []
for route_event, time in added_events:
portal_event = None
if route_event.duration:
if not route_event.allowed_at_animated_portal:
start_index = path.node_at_time(time).index - 1
start_index = 0 if start_index < 0 else start_index
end_index = path.node_at_time(time + route_event.duration).index
end_index = start_index if end_index < start_index else end_index
event_nodes = [path.nodes[index] for index in range(start_index, end_index)]
for node in event_nodes:
if node.portal_id and node.portal_object_id:
portal_object = objects.system.find_object(node.portal_object_id)
if portal_object is not None and portal_object.get_portal_type(node.portal_id) != PortalType.PortalType_Walk:
portal_event = (
route_event, time)
break
if portal_event is not None:
portal_events.append(portal_event)
for portal_event in portal_events:
added_events.remove(portal_event)
self.gsi_update_route_events(path, added_events, start_time)
self._scheduled_events.extend(added_events)
logger.debug('{} scheduled {} of {} route events.', actor, len(self._scheduled_events), len(self._route_events_to_schedule.values()))
def gsi_update_route_events(self, path, added_events, start_time):
gsi_path_log = None
if gsi_handlers.route_event_handlers.archiver.enabled or gsi_handlers.routing_handlers.sim_route_archiver.enabled:
gsi_path_log = gsi_handlers.route_event_handlers.get_path_route_events_log(path)
if gsi_path_log is not None:
for route_event, time in self._scheduled_events:
gsi_event_data = {}
if time <= start_time:
gsi_event_data['status'] = 'Past'
else:
gsi_event_data['status'] = 'Persisted'
gsi_handlers.route_event_handlers.gsi_fill_route_event_data(route_event, gsi_path_log, gsi_event_data)
for route_event, time in added_events:
gsi_event_data = {'status':'Added',
'executed':False}
gsi_handlers.route_event_handlers.gsi_fill_route_event_data(route_event, gsi_path_log, gsi_event_data)
def process_route_events(self, actor):
for route_event, time in self._scheduled_events:
route_event.process(actor, time)
def append_route_events_to_route_msg(self, route_msg):
for route_event, time in self._scheduled_events:
route_event.build_route_event_msg(route_msg, time) | null | Scripts/simulation/routing/route_events/route_event_context.py | route_event_context.py | py | 21,562 | python | en | code | null | code-starcoder2 | 50 |
572297101 | # -*- coding:utf-8 -*-
# 题目描述
# 给定一个double类型的浮点数base和int类型的整数exponent。求base的exponent次方。
# @href https://www.nowcoder.com/practice/1a834e5e3e1a4b7ba251417554e07c00
class Solution:
def Power(self, base, exponent):
# return pow(base, exponent)
if exponent < 0:
exponent = exponent * -1
base = 1 / base
res = 1
for i in range(exponent):
res = res * base
return res
s = Solution()
print(s.Power(2, -3))
| null | src/main/java/Nowcoder/S12.py | S12.py | py | 541 | python | en | code | null | code-starcoder2 | 50 |
520597428 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 24 12:55:49 2014
@author: sblakeley
"""
def is_multiple(a,b):
if b%a==0:
print('True')
else:
print('False')
is_multiple(3,9)
| null | exercices/203/solution.py | solution.py | py | 198 | python | en | code | null | code-starcoder2 | 50 |
391987982 | ###############################################################################
#SpartaHack V:
#Project Personal Assistan Maker
#class Assistant()
# age_type function
# emotional_support function
# support sounds different depending on age
# schedule function
#end 7-day trial
###############################################################################
import random
day_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"\
, "Sunday"]
num_days = {1:"Monday", 2:"Tuesday", 3:"Wednesday", 4:"Thursday", 5: "Friday",\
6:"Saturday", 7:"Sunday"}
day_num = {"Monday":1, "Tuesday":2, "Wednesday":3, "Thursday":4, "Friday":5,\
"Saturday":6, "Sunday":7}
class Assistant(object):
def __init__(self,name = "Pam",gender = "female", age = "18"):
self.name = name
self.gender = gender
self.age = age
def __str__(self):
return self.name
def age_type(self):
age_num = int(self.age)
if age_num <= 10:
return 'child'
elif age_num >= 60:
return 'senior'
elif age_num >=11 and age_num <= 18:
return 'adolescent'
elif age_num >= 19 and age_num <= 26:
return 'new'
else:
return 'adult'
def emotional_support(self, mood, age_type):
##dictionary
selection = random.randint(0,2)
mood_dict = {"happy":["Sanity and happiness are an impossible combination.",\
"No medicine cures what happiness cannot.", \
"Happiness is only real when shared " ],"sad": \
["Sadness flies away on the wings of time.", "Tears are words that need to be written."\
, "Any fool can be happy. It takes a man with real heart to make beauty out of the stuff that makes us weep."],\
"content":["nobody got anywhere in this world by just being content",\
"Only I can change my life for me. No one else can do that for me",\
"With the new day comes new strength and new thoughts"]}
if age_type == 'new':
mood_dict = {"happy":["Be happy for this moment. This moment is your life.","The secret to happiness is freedom. And the secre to freedom is courage.","Family and friendships are two of the greatest facilitators of happiness."],"sad":["Friends show thier love in time of trouble not in happiness.","The good times of today, are the sad thoughts of tomorrow.","Tears come from the heart and not from the brain."],"content":["Develop a passion for learning. If you do, you will never cease to grow.", "I have no special talent. I am only passionately curious.", "You have to stay in school. You have to. You have to go to college. You have to get your degree. Because that's the one thing people can't take away from you is your education. And it is worth the investment. - Michelle Obama"]}
if age_type == 'child':
mood_dict = {"happy":["WOOOO I am always happy too!", "My mom says to always smile :)","A smile a day keeps me happy!"],"sad":["Mommy says Santa will be mad if you are sad.", "Dad doesn't like to see people sad, he always says you should be mad rather than sad atleast.", "Elmo loves you too!"],"content":["Yeah its boring.","At least we aren't grounded.", "Momma says to always be happy."]}
if age_type == 'senior':
mood_dict = {"happy":["Cherish all your happy moments; they make a fine cushion for old age", "If you ever wanted to be a kid again wait til your old. You don't have to do anything anymore!","I will never giv in to old age until I become old. And I'm not old yet!"],"sad":["Old age is the most unexpected of all things that happen to a man.", "Youth is a blunder; Manhood a struggle, Old Age a regret","Old age is not a mteer for sorrow. It is matter for thanks if we have left our work done behind us."],"content":["Be eccentric now. Don't wait for old age to wear purple.","Old age has its pleasures, which, though different, are notless than the pleasures of youth.", "Naps are great no matter old or young!"]}
if age_type == 'adolescent':
mood_dict = {"happy":["Keep tue to the dream of your youth.", "Snow days are a sign god is real.", "That glow up tho!"],"sad":["Telling a teenager the facts of life is like giving a fish a bath.", "Adolescence is just one big walking pimple.", "Homework sucks, I just want to sleep."],"content":["We never really group up, we only learn how to act in public.", "It takes courage to grow up and become who you really are.", "HALF DAY!"]}
if mood.lower() == 'happy':
return mood_dict["happy"][selection]
elif mood.lower() == 'sad':
return mood_dict['sad'][selection]
else :
return mood_dict['content'][selection]
def schedule(self):
return "LOL"
# def morning_meme(self):
# return image
def main():
print("Welcome to your one week trial to the personal assistant maker. Where we will create both a secretary and friend.")
your_assistant = Assistant()
name = input("What is your name: ").title()
print("\nHello "+ name)
##Creating Assistant
print("Now answer the next few questions to start creating your personal assistant")
assi_gender = input("Select a prefered gender. Female or Male?: ").lower()
while assi_gender not in ['female','male']:
assi_gender = input("We are working to expand to a more progressive line up.\n However we have just two options at the moment.\n Female or Male?: ")
if assi_gender == "male":
print("Your assistant is male.")
else:
print("Your assistant is female.")
assi_name = input("What would you like to name your assistant?:" ).title()
assi_age = input("You can also choose the age for your assistant, there may be some unexpected responses if you choose an age closer to a child or a senior citizen. Input format example: 26 \nPlease enter an age: ")
while assi_age.isdigit() != True or int(assi_age) <= 0 or int(assi_age) > 122:
if assi_age.isdigit() != True:
print("Error, you did not enter a valid age.")
assi_age = input("Please enter a numeric age. ")
elif int(assi_age) <= 0:
print("Sorry your desired assistant has not been bored yet.")
assi_age = input("Please enter an age that would be at least alive. ")
elif int(assi_age) > 122:
print( "There has been no one that has lived longer than your inputted age. ")
assi_age = input("Please enter a valid age. ")
else:
assi_age = input("Please enter a valid age. ")
your_assistant = Assistant(assi_name, assi_gender, assi_age)
start = input("Do you want to start your trial? (Y/N): ")
if start.upper() == "N" :
print("Too late its free!! Lets start!")
age_type = your_assistant.age_type() #Tracks the age_type
print("Oh, by the way, your assistant is under the age-type " + age_type + ". We do not guarantee the actions or speach of your assistan depending on its age.\nGood Luck!")
event_list = {}
for day in range(1,8):
rediculous = random.randint(0,100)
#morning_meme = your_assistant.meme()
#print(morning_meme)
print("\nToday is " + num_days[day])
#Rediculous factors
if age_type == "child":
if rediculous >= 50:
print(your_assistant.name + " has soiled thier bed this morning and can no longer be here today.")
print(your_assistant.name + ": 'Sorry " + name + " I'll do my best tomorrow though!'")
continue
if age_type == "new":
if rediculous <= 10:
print(your_assistant.name + " has called in sick.")
print(your_assistant.name + ": 'Sorry " + name + " I should be better tomorrow, but I won't be able to be there for you today. See you tomorrow!'")
continue
elif rediculous >=90:
print(your_assistant.name + ": 'Hey, " + name + ", I just got super liked on tinder so like, I going out today. Hope you can understand.")
continue
if age_type == "senior":
if rediculous <= 50:
print(your_assistant.name + ": 'Help! I've fallen and I can't get up!'")
print(name + "Runs over and helps + "+ your_assistant.name + "." + your_assistant.name + " is too traumatized to work today.")
continue
if rediculous >= 90:
print(your_assistant.name + " was run over by a reindeer!")
print(your_assistant.name + " goes to the hospital and you lose a day of the trial!")
continue
if age_type == "adolescent":
if rediculous <= 20:
print(your_assistant.name + " didn't show up. However you call his mom and " +your_assistant.name + " won't get paid for the rest of the demo.")
continue
if rediculous >=40 and rediculous <= 50:
print(your_assistant.name + "'s Mom called and she said that '" + your_assistant.name + " jumped out the window and sprained his ankle.")
print(your_assistant.name + " will be back tomorrow!")
continue
mood = input(your_assistant.name +": 'Lets start with a quote to match your mood!'\n'How are you feeling today?' (happy, sad or content) : ")
print(your_assistant.emotional_support(mood, age_type)) #Prints a random quote of the day
if day in event_list:
print("\n" + your_assistant.name +": 'You have these events for today:'")
for i in event_list[day]:
print("'" + i + "'")
#Daily scheduling
want_sched = input("\n" + your_assistant.name +": 'Would you like to schedule something for the week?' (Y/N): ").upper()
while want_sched != "Y" and want_sched != "N":
if day == 7:
print(your_assistant.name +": 'Today is the last day of the trial. You are unable to schedule anything.'")
want_sched = "N"
continue
print("Please enter a valid input")
want_sched = input(your_assistant.name +": 'Would you like to schedule something for the week?' (Y/N): ").upper()
#Continue if want to schedule
while want_sched == "Y":
week_day = input(your_assistant.name +": 'Which day would you like to schedule something.' (Monday, etc.): ").title()
while week_day not in day_names:
week_day = input("Please enter a valid weekday or weekend").title()
#If week day is not valid
while day_num[week_day]<= day:
#Response depends on day
if day_num[week_day] == day:
week_day = input(your_assistant.name +": 'Unfortunately today is already "+ week_day + ".'\n'Please choose another day': ")
elif day_num[week_day] == day:
week_day = input(your_assistant.name +": 'Sorry, your trial does not last till "+ week_day + "'\n'Please choose another day': ")
while week_day not in day_names:
week_day = input("Please enter a valid weekday or weekend").title()
event = input(your_assistant.name +": 'What event would you like to be reminded of?'")
print(your_assistant.name +": 'I will remind " + name + " on " + week_day + " of the event " + event + "'." )
if day_num[week_day] not in event_list:
event_list[day_num[week_day]] = []
event_list[day_num[week_day]].append(event)
want_sched = input(your_assistant.name +": 'Would you like to schedule anything else?' (Y/N) ").title()
while want_sched != "Y" and want_sched != "N":
print("Please enter a valid input")
want_sched = input(your_assistant.name +": 'Would you like to schedule something for the week?' (Y/N): ").upper()
#End of Scheduling
print("Thus ends your week long trial.")
print(your_assistant.name +": 'I hope you have had a great experience!'")
if __name__ == "__main__":
main()
# | null | P_A_M.py | P_A_M.py | py | 12,754 | python | en | code | null | code-starcoder2 | 50 |
376827097 | # -*- coding: utf-8 -*-
"""
Created on Sun May 16 23:19:06 2021
@author: galan
"""
import string
import hashlib
import merklelib
from merklelib import MerkleTree
def hashfunc(value):
return hashlib.sha256(value).hexdigest()
data = list(string.ascii_letters)
tree = MerkleTree(data, hashfunc)
proof = tree.get_proof('A')
if tree.verify_leaf_inclusion('A', proof):
print('A is in the tree')
else:
print('A is not in the tree')
MR = tree.merkle_root
kappa = merklelib.verify_leaf_inclusion('A', proof, hashfunc, MR)
from merklelib import utils
keepo = hashfunc(b'\x00' + utils.to_string('a')) | null | middle-daemon/script/Merkle_Tree.py | Merkle_Tree.py | py | 658 | python | en | code | null | code-starcoder2 | 50 |
625082202 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 15:11:17 2019
@author: rain
"""
import pandas as pd
#### hand writing example
from keras.datasets import mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
############################## network building
import keras
from keras import models
from keras import layers
network = models.Sequential()
network.add(layers.Dense(512,activation='relu',input_shape=(28*28,)))
network.add(layers.Dense(10,activation='softmax'))
rms=keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06) #先定义优化器,再放入
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy']
)
############################# put data into model
train_images=train_images.reshape((60000,28*28))
train_images=train_images.astype('float32')/255 # ?
test_images=test_images.reshape((10000,28*28))
test_images=test_images.astype('float32')/255
############################# preparing for labels
from keras.utils import to_categorical
train_labels=to_categorical(train_labels) # 把label变成矩阵,一一对应model出来的值
test_labels=to_categorical(test_labels)
network.fit(train_images,train_labels, epochs=7, batch_size=518)
########################### 放到测试集上
test_loss, test_acc= network.evaluate(test_images, test_labels)
########################### show the image
digit = train_images[5]
import matplotlib.pyplot as plt
from ipykernel.kernelapp import IPKernelApp
plt.imshow(digit)
| null | 各种神经网络模板/ex_deeplearning.py | ex_deeplearning.py | py | 1,589 | python | en | code | null | code-starcoder2 | 50 |
490009659 | # Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routing utilities library."""
from resources.libraries.python.VatExecutor import VatTerminal
from resources.libraries.python.topology import Topology
from resources.libraries.python.ssh import exec_cmd_no_error
class Routing(object):
"""Routing utilities."""
@staticmethod
def vpp_route_add(node, network, prefix_len, gateway=None,
interface=None, use_sw_index=True, resolve_attempts=10,
count=1, vrf=None, lookup_vrf=None, multipath=False,
weight=None, local=False):
"""Add route to the VPP node.
:param node: Node to add route on.
:param network: Route destination network address.
:param prefix_len: Route destination network prefix length.
:param gateway: Route gateway address.
:param interface: Route interface.
:param vrf: VRF table ID (Optional).
:param use_sw_index: Use sw_if_index in VAT command.
:param resolve_attempts: Resolve attempts IP route add parameter.
:param count: number of IP addresses to add starting from network IP
:param local: The route is local
with same prefix (increment is 1). If None, then is not used.
:param lookup_vrf: VRF table ID for lookup.
:param multipath: Enable multipath routing.
:param weight: Weight value for unequal cost multipath routing.
:type node: dict
:type network: str
:type prefix_len: int
:type gateway: str
:type interface: str
:type use_sw_index: bool
:type resolve_attempts: int
:type count: int
:type vrf: int
:type lookup_vrf: int
:type multipath: bool
:type weight: int
:type local: bool
"""
if interface:
if use_sw_index:
int_cmd = ('sw_if_index {}'.
format(Topology.get_interface_sw_index(node,
interface)))
else:
int_cmd = interface
else:
int_cmd = ''
rap = 'resolve-attempts {}'.format(resolve_attempts) \
if resolve_attempts else ''
via = 'via {}'.format(gateway) if gateway else ''
cnt = 'count {}'.format(count) \
if count else ''
vrf = 'vrf {}'.format(vrf) if vrf else ''
lookup_vrf = 'lookup-in-vrf {}'.format(lookup_vrf) if lookup_vrf else ''
multipath = 'multipath' if multipath else ''
weight = 'weight {}'.format(weight) if weight else ''
local = 'local' if local else ''
with VatTerminal(node, json_param=False) as vat:
vat.vat_terminal_exec_cmd_from_template('add_route.vat',
network=network,
prefix_length=prefix_len,
via=via,
vrf=vrf,
interface=int_cmd,
resolve_attempts=rap,
count=cnt,
lookup_vrf=lookup_vrf,
multipath=multipath,
weight=weight,
local=local)
@staticmethod
def add_fib_table(node, table_id, ipv6=False):
"""Create new FIB table according to ID.
:param node: Node to add FIB on.
:param table_id: FIB table ID.
:param ipv6: Is this an IPv6 table
:type node: dict
:type table_id: int
:type ipv6: bool
"""
with VatTerminal(node) as vat:
vat.vat_terminal_exec_cmd_from_template('add_fib_table.vat',
table_id=table_id,
ipv6="ipv6" if ipv6 else "")
@staticmethod
def add_route(node, ip_addr, prefix, gateway, namespace=None):
"""Add route in namespace.
:param node: Node where to execute command.
:param ip_addr: Route destination IP address.
:param prefix: IP prefix.
:param namespace: Execute command in namespace. Optional.
:param gateway: Gateway address.
:type node: dict
:type ip_addr: str
:type prefix: int
:type gateway: str
:type namespace: str
"""
if namespace is not None:
cmd = 'ip netns exec {} ip route add {}/{} via {}'.format(
namespace, ip_addr, prefix, gateway)
else:
cmd = 'ip route add {}/{} via {}'.format(ip_addr, prefix, gateway)
exec_cmd_no_error(node, cmd, sudo=True)
| null | resources/libraries/python/Routing.py | Routing.py | py | 5,536 | python | en | code | null | code-starcoder2 | 50 |
91463215 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exporters import CsvItemExporter
import pymysql
# class SpidersPipeline:
# def process_item(self, item, spider):
class MysqlPipeline(object):
def __init__(self):
# 建立连接
self.conn = pymysql.connect(
host='localhost',
port=3306,
user='root',
password='123',
db='test1',
charset='utf8mb4'
)
# 创建游标
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
# sql语句
insert_sql = "insert into movies(release,name,tag) VALUES(%s,%s,%s)"
# 执行插入数据到数据库操作
self.cursor.execute(insert_sql, (item['release'], item['name'], item['tag']))
# 提交,不进行提交无法保存到数据库
self.conn.commit()
def close_spider(self, spider):
# 关闭游标和连接
self.cursor.close()
self.conn.close()
| null | Week02/spiders/spiders/pipelines.py | pipelines.py | py | 1,237 | python | en | code | null | code-starcoder2 | 50 |
378849862 | # -*- coding: utf-8 -*-
# 材料报表 模块
import os,datetime, xlrd, json
from web.models import Materialreport
from myAPI.excelAPI import get_date, list_to_xlsx
from django.shortcuts import render, redirect
from myAPI.pageAPI import djangoPage, PAGE_NUM
from myAPI.downfileAPI import down_file
from web.forms.materialreport import MaterialreportForm
from myAPI.modelAPI import get_model_first_id, get_model_last_id, \
get_model_up_id, get_model_name_up_id, get_model_data, get_post_data
from myAPI.listAPI import pinyin
from django.http.response import StreamingHttpResponse
from myAPI.listAPI import pinyin
def model_id_update(request, model, id):
"""" 更新一条数据库记录。
v[5] = round(float(v[4]) * float(v[3]), 2) #上月结存金额 = 数量 * 单价
v[8] = round(float(v[7]) * float(v[6]), 2) #收入金额 = 数量 * 单价
v[11] = round(float(v[10]) * float(v[9]), 2) #加权金额 = 数量 * 单价
v[14] = round(float(v[13]) * float(v[12]), 2) #生产支出金额 = 数量 * 单价
v[21] = round(float(v[20]) * float(v[19]), 2) #本月结存金额 = 数量 * 单价
"""
id = int(id)
model.filter(id=id).update(operator = request.user.username) #更新经办人为登录用户
#更新 上月结存金额 = 数量 * 单价
lastmonth_money = round(model.get(id=id).lastmonth_number * model.get(id=id).lastmonth_univalence, 2) #金额保留2位小数
model.filter(id=id).update(lastmonth_money = lastmonth_money)
#更新 收入金额 = 数量 * 单价
income_money = round(model.get(id=id).income_number * model.get(id=id).income_univalence, 2) #金额保留2位小数
model.filter(id=id).update(income_money = income_money)
#更新 加权金额 = 数量 * 单价
weighting_money = round(model.get(id=id).weighting_number * model.get(id=id).weighting_univalence, 2) #金额保留2位小数
model.filter(id=id).update(weighting_money = weighting_money)
#更新 生产支出金额 = 数量 * 单价
production_expenditure_money = round(model.get(id=id).production_expenditure_number * model.get(id=id).production_expenditure_univalence, 2) #金额保留2位小数
model.filter(id=id).update(production_expenditure_money = production_expenditure_money)
#更新 本月结存金额 = 数量 * 单价
thismonth_money = round(model.get(id=id).thismonth_number * model.get(id=id).thismonth_univalence, 2) #金额保留2位小数
model.filter(id=id).update(thismonth_money = thismonth_money)
return ''
def post_excel_model(request, post_file_excel, model, k):
''' Excel文件,多张工作表(数据),导入到数据库
post_file_excel: 前端上传的文件名
model: 数据库
K: 数据库字段, 与Excel表格列对应
'''
file_excel = request.FILES.get(post_file_excel)
ext = os.path.splitext(file_excel.name)[1]
if 'xls' not in ext and 'xlsx' not in ext:
return 'err: 文件格式错误,请上传Excel文件。'
model.objects.all().delete() #删除数据库
workbook = xlrd.open_workbook(file_contents=file_excel.file.read())
sheet_sum = len(workbook.sheet_names()) # 获取电子表格 工作表总数
filename_no = ''
n, n1 = 0, 0 # n序号 n1 未被导入数据的工作表记数
for index in range(0, sheet_sum):
ret = workbook_model(workbook, 0, index, model, k) #从电子表格0行开始
if ret[0:3] == 'err':
return ret
else:
n += 1
if ret:
n1 += 1 #n1 未被导入数据的工作表记数
filename_no += str(n)+ '. ' + ret + '、' #n序号
return "导入了%s工作张表。未被导入数据的工作表总数:%s; 表名:%s"%(str(sheet_sum), str(n1), filename_no)
def workbook_model(workbook, x, index, model, k):
""" 电子表格,多张工作表写入数据库
workbook: workbook = xlrd.open_workbook(file_contents=file_excel.file.read())
x: 从x行开始 x=0,1,2...
index: 工作表序号
model: 数据库
K: 数据库字段, 与Excel表格列对应
结束循环条件: 最后两行相同
"""
sheet = workbook.sheet_by_index(index)
try:
#1.1、电子表格转换为列表;2、最后两行相同结束循环
mylist = []
for row_num in range(x, sheet.nrows): #从x行开始 x=0,1,2...
row = sheet.row(row_num) #row -- [empty:'', empty:'', text:'HZ', number:10.0]
v = []
for r in row: #一次处理电子表格一行
v.append(r.value)
mylist.append(v)
if not any(v): #空行 v=['','','','','','',''] 结束循环
break
#2.1、列表中去掉列表最后两个元素;2、插入客户名称、经办人
mlist = []
name = ''
filename_no = '' #未被导入的工作表名
for (n,v) in enumerate(mylist):
v1 = v[1:] #v ['产品名称','','','','','','',''] v1['','','','','','','']
if n == 0 and not any(v1):
name = v[0] #获得单位名称
else:
v.insert(1, name) #插入 客户名称
v.insert(22, '陈会计') #插入 经办人
if n >= 3:
mlist.append(v)
#3.1、列表数据,初始化成与数据库字段一样的数据类型;2、数据写入数据库
object_list = []
for (n,v) in enumerate(mlist):
if isinstance(v[0], int) or isinstance(v[0], float):
v[0] = get_date(int(v[0])) #列表元素0,转换为时间格式
else:
v[0] = '1900-01-01'
for r in range(3,22):
if not v[r] or isinstance(v[r], str):
v[r] = 0 #数值单元格 列表元素,如果为空或为字符型,转换为0
else:
v[r] = round(v[r], 2)
# v[5] = round(float(v[4]) * float(v[3]), 2) #上月结存金额 = 数量 * 单价
# v[8] = round(float(v[7]) * float(v[6]), 2) #收入金额 = 数量 * 单价
# v[11] = round(float(v[10]) * float(v[9]), 2) #加权金额 = 数量 * 单价
# v[14] = round(float(v[13]) * float(v[12]), 2) #生产支出金额 = 数量 * 单价
# v[21] = round(float(v[20]) * float(v[19]), 2) #本月结存金额 = 数量 * 单价
d = dict(zip(k,v))
object_list.append(model(**d))
if object_list:
model.objects.bulk_create(object_list, batch_size=20)
else:
filename_no += '%s' %(index+1)
return filename_no
except Exception as e:
print(e)
return 'err: %s. 错误工作表:%s'%(e, index+1)
def materialreport_import(request):
"""批量导入材料报表"""
down_tpl = '/web/materialreport/tpl/' #下载模板路径
if request.method == 'GET':
return render(request, 'web/import.html',context=locals())
k = ["date","name","material_name",\
"lastmonth_number","lastmonth_univalence","lastmonth_money",\
"income_number","income_univalence","income_money",\
"weighting_number","weighting_univalence","weighting_money",\
"production_expenditure_number","production_expenditure_univalence","production_expenditure_money",\
"material_expenditure_number","material_expenditure_money",\
"sale_number","sale_money",\
"thismonth_number","thismonth_univalence","thismonth_money","operator"]
ret = post_excel_model(request, 'post_file_excel', Materialreport, k)
context = {'status': False, 'msg': '导入失败! %s' %ret} if ret[0:3] == 'err' \
else {'status': True, 'msg': '导入成功! %s. ' %ret}
return render(request, 'web/import.html',context)
def materialreport_tpl(request):
"""下载材料报表模板"""
tpl_path = 'web/files/%s' % '批量导入材料报表模板.xlsx'
return down_file(tpl_path, 'excel_tpl.xlsx')
def materialreport_list(request, page):
"""材料报表列表"""
cleanData = request.GET.dict()
model = Materialreport.objects
if request.method == 'POST':
page = 1
cleanData = request.POST.dict()
dict.pop(cleanData,'csrfmiddlewaretoken')
name, name_list, queryString, datas = get_model_data(model, cleanData)
lastmonth_money = round(sum(datas.filter().values_list('lastmonth_money', flat=True)), 2) #上月结存金额 求和
income_money = round(sum(datas.filter().values_list('income_money', flat=True)),2) #收入金额 求和
weighting_money = round(sum(datas.filter().values_list('weighting_money', flat=True)),2) #加权金额 求和
production_expenditure_money = round(sum(datas.filter().values_list('production_expenditure_money', flat=True)),2) #生产支出金额 求和
material_expenditure_money = round(sum(datas.filter().values_list('material_expenditure_money', flat=True)),2) #材料支出金额 求和
sale_money = round(sum(datas.filter().values_list('sale_money', flat=True)),2) #销售金额 求和
thismonth_money = round(sum(datas.filter().values_list('thismonth_money', flat=True)),2) #本月结存金额 求和
data_list, pageList, num_pages, page = djangoPage(datas,page,PAGE_NUM) #调用分页函数
offset = PAGE_NUM * (page - 1)
name_list = pinyin(list(set(Materialreport.objects.values_list('name', flat=True))))
name_list.insert(0, '')
return render(request, 'web/materialreport/materialreport_list.html', context=locals())
def materialreport_add(request):
"""添加"""
if request.method == 'GET':
form = MaterialreportForm()
return render(request, 'web/form_submit.html', context=locals())
form = MaterialreportForm(data=request.POST)
if form.is_valid():
form.save()
# 更新一条数据库记录
model = materialreport.objects
model_id_update(request, model, get_model_last_id(model))
return redirect('/web/materialreport/list/(.+)')
return render(request, 'web/form_submit.html', context=locals())
def materialreport_edit(request, cid):
"""编辑"""
obj = Materialreport.objects.get(id=cid)
if request.method == 'GET':
form = MaterialreportForm(instance=obj)
return render(request, 'web/form_submit.html', context=locals())
form = MaterialreportForm(data=request.POST, instance=obj)
if form.is_valid():
form.save()
# 更新一条数据库记录
model = Materialreport.objects
model_id_update(request, model, cid)
return redirect('/web/materialreport/list/(.+)')
return render(request, 'web/form_submit.html', context=locals())
def materialreport_del(request, cid):
"""删除"""
Materialreport.objects.filter(id=cid).delete()
return redirect('/web/receivable/list/(.+)')
def convertxlsx(data_list, filePath, ids): #??
ret = True
try:
# 数据库字段值,转化为电子表格值。 电子表格标题栏。1、与数据库字段保持一致。
headings = ["序号", "采购日期", "客户名称", "送货单号码","摘要", \
"送货数量", "单价", "金额", "收款", "余额", "备注", \
"日期1", "发票号码", "金额1", "欠客户票","经办人"]
k = ["date","name","receipt","abstract","number",\
"univalence","money","collection","balance","note",\
"date1","Invoice_number","money1","owe_ticket","operator"]
date = [str(i.date + datetime.timedelta(hours=8)).split('+')[0] for i in data_list] #日期+时差
name = [i.name for i in data_list]
receipt = [i.receipt for i in data_list]
abstract = [i.abstract for i in data_list ]
number = [i.number for i in data_list ]
univalence = [i.univalence for i in data_list ]
money = [i.money for i in data_list ]
collection = [i.collection for i in data_list ]
balance = [i.balance for i in data_list ]
note = [i.note for i in data_list ]
date1 = [str(i.date1 + datetime.timedelta(hours=8)).split('+')[0] for i in data_list] #日期+时差
Invoice_number = [i.Invoice_number for i in data_list ]
money1 = [i.money1 for i in data_list ]
owe_ticket = [i.owe_ticket for i in data_list ]
operator = [i.operator for i in data_list ]
data = [ids, date,name,receipt,abstract,number,univalence,\
money,collection,balance,note,date1,Invoice_number,money1,owe_ticket,operator ]
if not list_to_xlsx(data, headings, filePath): # 保存为电子表格
ret = False
except Exception as _e:
print('err: %s' %_e)
ret = False
return ret
#单页保存Excel down_file
def materialreport_makexlsx_page(request, page):
datas, tempFilePath, fileName = get_post_data(request, Materialreport.objects)
datas, pageList, num_pages, page = djangoPage(datas, page, PAGE_NUM) #调用分页函数
ids = [i+PAGE_NUM * (page - 1) for i in range(1,PAGE_NUM+1) ] #序号
if convertxlsx(datas, tempFilePath, ids):
return down_file(tempFilePath, fileName)
return HttpResponseRedirect(r'/web/materialreport/list/%s' % (page))
#全部保存Excel
def materialreport_makexlsx_all(request, page):
datas, tempFilePath, fileName = get_post_data(request, Materialreport.objects)
ids = [i for i in range(1,len(datas)+1) ] #序号
if convertxlsx(datas, tempFilePath, ids):
return down_file(tempFilePath, fileName)
return HttpResponseRedirect(r'/web/materialreport/list/%s' % (page))
| null | mysite/web/views/materialreport.py | materialreport.py | py | 14,440 | python | en | code | null | code-starcoder2 | 50 |
448999079 | """
remember that once in a state, the model may either move onto the next state or remain in its
current state.
~P(X=l)= 1/p = mean, 1/mean = p of staying in a state
state's are geometrically distributed so you'll want to know the number of times you stayed in
that state before moving on.
You've been given the probabilities for if the state is moving on however you'll need to sample
for the number of times you've been in the current state.
"""
import random
import math
################################# Some of the helper code.########################################
def randexp(x):
return -log(random.random())/x
def randpoiss(x):
t = randexp(x)
poiss = 0
while (t < 1):
poiss += 1
t += randexp(x)
return poiss
def baseGen():
num = random.random()
if num < 0.25:
return "A"
elif num < 0.5:
return "C"
elif num < 0.75:
return "G"
else:
return "T"
def randSeqGen(length):
seq = ""
while len(seq) < length:
seq += baseGen()
return seq
def mutate(X,t,mu):
l = len(X)
# the number of mutations is Poisson with total rate l*mu*t
numMutations = randpoiss(l*mu*t)
# for each mutation, choose a site to mutate and mutate it
for i in range(1, numMutations):
# choose a site
site = math.ceil(random.random()*l)
# mutate that site
X[site] = baseGen()
return X
##################################################################################################
def initialStateGen():
num = random.random()
if num < 1/3:
return "H"
elif num < 2/3:
return "S"
else:
return "T"
def stateGen(currentState):
num = random.random()
if currentState == "H":
if num < 211/225:
return "H"
elif num < 215.2/225:
return "S"
else:
return "T"
elif currentState == "S":
if num < 57/64:
return "S"
elif num < 59.8/64:
return "H"
else:
return "T"
else:
if num < 31/36:
return "T"
elif num < 33.5/36:
return "H"
else:
return "S"
def emite(state):
num = random.random()
if state == "H":
if num < .1:
return "N"
elif num < .7:
return "I"
else:
return "B"
elif state == "S":
if num < .3:
return "N"
elif num < .45:
return "I"
else:
return "B"
elif state == "T":
if num < .7:
return "N"
elif num < .9:
return "I"
else:
return "B"
else:
print("Unknown State.")
def HMMsimulator(length):
stateSeq = initialStateGen()
while len(stateSeq) < length:
stateSeq += stateGen(stateSeq[-1:])
emissions = ""
for s in stateSeq:
emissions += emite(s)
return [stateSeq, emissions]
x = HMMsimulator(150)
"""
print("states:\n" + x[0])
print()
print("emissions:\n" + x[1])
"""
##################################################################################################
import math as m
def jointProbability(obs, states):
sw = {"H":0, "S":1, "T":2, "B":0, "I":1, "N":2}
tMatrix = [[m.log(211/225), m.log(4.2/225), m.log(9.8/255)],
[m.log(2.8/64), m.log(57/64), m.log(4.2/64)],
[m.log(2.5/36), m.log(2.5/36), m.log(31/36)]]
eMatrix = [[m.log(.3), m.log(.55), m.log(.1)],
[m.log(.6), m.log(.15), m.log(.2)],
[m.log(.1), m.log(.3), m.log(.7)]]
prob = m.log(1/3)
for i in range(len(states)):
s = sw[states[i]]
o = sw[obs[i]]
p1 = eMatrix[o][s]
try:
s2 = sw[states[i+1]]
p2 = tMatrix[s][s2]
prob = prob + p1 + p2
except:
prob = prob + p1
return prob
stateSeq = "SSHHHTTSSSHHHHHHSSSSSS"
eSeq = "BINBNINBNINBINBIINBBNB"
"""
px = jointProbability(x[1], x[0])
print("Joint probability of:\n" + "The state sequence: " + x[0] +
"\nand the emission sequence: " + x[1] + "\nis", px, "in log units")
print()
px = jointProbability(eSeq, stateSeq)
print("Joint probability of:\n" + "The given state sequence: " + stateSeq +
"\nand the emission sequence: " + eSeq + "\nis", px, "in log units")
"""
def forward(obs):
sw = {"H":0, "S":1, "T":2, "B":0, "I":1, "N":2}
#states H S T
tMatrix = [[m.log(211/225), m.log(4.2/225), m.log(9.8/255)], # H
[m.log(2.8/64), m.log(57/64), m.log(4.2/64)], # S
[m.log(2.5/36), m.log(2.5/36), m.log(31/36)]] # T
#states H S T
eMatrix = [[m.log(.3), m.log(.55), m.log(.1)], # B
[m.log(.6), m.log(.15), m.log(.2)], # I
[m.log(.1), m.log(.3), m.log(.7)]] # N
fMatrix = [[],[],[]]
for i in range(len(obs)):
o = sw[obs[i]]
for j in range(3):
if i == 0:
e1 = eMatrix[o][j]
a1 = m.log(1/3)
fMatrix[j] += [e1 + m.log(m.exp(a1) + m.exp(a1) + m.exp(a1))]
else:
e1 = eMatrix[o][j]
e2 = fMatrix[0][i-1]
a2 = tMatrix[j][0]
e3 = fMatrix[1][i-1]
a3 = tMatrix[j][1]
e4 = fMatrix[2][i-1]
a4 = tMatrix[j][2]
fMatrix[j] += [e1 + m.log(m.exp(e2+a2) + m.exp(e3+a3) + m.exp(e4+a4))]
#for i in fMatrix:
#print(i)
return m.log(m.exp(fMatrix[0][-1]) + m.exp(fMatrix[1][-1]) + m.exp(fMatrix[2][-1]))
fx = forward(eSeq)
print("The log probability of the forward algorithm applied to the given emission sequence:\n"
+ eSeq, "is", fx)
fx = forward(x[1])
print("The log probability of the forward algorithm applied to the generated emission sequence:\n"
+ x[1], "is", fx) | null | Uni/Compsci369/Assignment3/Question1.py | Question1.py | py | 5,150 | python | en | code | null | code-starcoder2 | 50 |
60179069 | # Default shell for a Python 3.x program
#
__author__ = 'Nathaniel Smith'
# CIS-125-82A
# Distance
#
# This program promts the user for a distance measured in kilometers,
# converts it to miles and prints out the results.
def main ():
K = eval(input("Please enter a distance in kilometers: "))
M = K * 0.62
print("The distance ", K, " in kilometers is equal to ", M, " miles.")
main () | null | distance.py | distance.py | py | 399 | python | en | code | null | code-starcoder2 | 50 |
107265824 | from django import forms
from .models import Lead, TempUser
class NewLeadForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.Meta.fields:
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
class Meta:
model = Lead
fields = ['company_name', 'number_locations', 'contact_name', 'contact_email', 'contact_phone']
class LoginForm(forms.ModelForm):
class Meta:
model = TempUser
fields = ['email']
class NewLeadFormSpiffy(NewLeadForm):
class Meta(NewLeadForm.Meta):
fields = NewLeadForm.Meta.fields + ['status'] | null | leads/lead/forms.py | forms.py | py | 706 | python | en | code | null | code-starcoder2 | 51 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.