hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
77c4b823daa25924e42b9613fee9df7dab7601e4 | 6,301 | py | Python | model/train/AdvancedEAST.py | JinGyeSetBirdsFree/FudanOCR | e6b18b0eefaf832b2eb7198f5df79e00bd4cee36 | [
"MIT"
] | 25 | 2020-02-29T12:14:10.000Z | 2020-04-24T07:56:06.000Z | model/train/AdvancedEAST.py | dun933/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b | [
"MIT"
] | 33 | 2020-12-10T19:15:39.000Z | 2022-03-12T00:17:30.000Z | model/train/AdvancedEAST.py | dun933/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b | [
"MIT"
] | 4 | 2020-02-29T12:14:18.000Z | 2020-04-12T12:26:50.000Z | # -*- coding: utf-8 -*-
def train_AEAST(config_file):
import sys
sys.path.append('./detection_model/AdvancedEAST')
import os
import argparse
import time
import numpy as numpy
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from tqdm import tqdm
import config as cfg
from utils.data_utils import custom_dset, collate_fn
from network.AEast import East
from network.loss import LossFunc
from utils.utils import AverageMeter, save_log
from utils.earlystop import EarlyStopping
os.environ["CUDA_VISIBLE_DEVICES"] = "0,3"
from yacs.config import CfgNode as CN
def read_config_file(config_file):
f = open(config_file)
opt = CN.load_cfg(f)
return opt
opt = read_config_file(config_file)
class Wrapped:
def __init__(self, train_loader, val_loader, model, criterion, optimizer, scheduler, start_epoch, val_loss_min):
self.train_loader = train_loader
self.val_loader = val_loader
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler #
self.start_epoch = start_epoch #
self.tick = time.strftime("%Y%m%d-%H-%M-%S", time.localtime(time.time()))
self.earlystopping = EarlyStopping(opt.patience, val_loss_min)
def __call__(self):
for epoch in tqdm(range(self.start_epoch + 1, opt.max_epoch + 1), desc='Epoch'):
if epoch == 1:
tqdm.write("Validating pretrained model.")
self.validate(0)
if epoch > 1 and epoch % opt.decay_step == 0:
tqdm.write("Learning rate - Epoch: [{0}]: {1}".format(epoch - 1,self.optimizer.param_groups[0]['lr']))
self.train(epoch)
if self.validate(epoch): # if earlystop
print('Earlystopping activates. Training stopped.')
break
def validate(self, epoch):
losses = AverageMeter()
self.model.eval()
for i, (img, gt) in tqdm(enumerate(self.val_loader), desc='Val', total=len(self.val_loader)):
img = img.cuda()
gt = gt.cuda()
east_detect = self.model(img)
loss = self.criterion(gt, east_detect)
losses.update(loss.item(), img.size(0))
tqdm.write('Validate Loss - Epoch: [{0}] Avg Loss {1}'.format(epoch,losses.avg))
save_log(losses, epoch, i + 1, len(self.val_loader), self.tick, split='Validation')
earlystop, save = self.earlystopping(losses.avg)
if not earlystop and save:
state = {
'epoch': epoch,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'val_loss_min': losses.avg
}
self.earlystopping.save_checkpoint(state, losses.avg)
return earlystop
def train(self, epoch):
losses = AverageMeter()
self.model.train()
for i, (img, gt) in tqdm(enumerate(self.train_loader), desc='Train', total=len(self.train_loader)):
img = img.cuda()
gt = gt.cuda()
east_detect = self.model(img)
loss = self.criterion(gt, east_detect)
losses.update(loss.item(), img.size(0))
# backward propagation
self.scheduler.step()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if (i + 1) % opt.print_step == 0:
tqdm.write(
'Training loss - Epoch: [{0}][{1}/{2}] Loss {loss.val:.4f} Avg Loss {loss.avg:.4f}'.format(
epoch, i + 1, len(self.train_loader), loss=losses))
save_log(losses, epoch, i + 1, len(self.train_loader), self.tick, split='Training')
class LRPolicy:
def __init__(self, rate, step):
self.rate = rate
self.step = step
def __call__(self, it):
return self.rate ** (it // self.step)
print('=== AdvancedEAST ===')
print('Task id: {0}'.format(opt.task_id))
print('=== Initialzing DataLoader ===')
print('Multi-processing on {0} cores'.format(opt.num_process))
batch_size = opt.batch_size_per_gpu
trainset = custom_dset(split='train')
valset = custom_dset(split='val')
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn,
num_workers=opt.num_workers, drop_last=False)
val_loader = DataLoader(valset, batch_size=1, collate_fn=collate_fn, num_workers=opt.num_workers)
print('=== Building Network ===')
model = East()
model = model.cuda()
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"
model = nn.DataParallel(model, device_ids=opt.gpu_ids) # 数据并行
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Total parameters: {0}'.format(params))
cudnn.benchmark = True
criterion = LossFunc()
optimizer = Adam(model.parameters(), lr=opt.lr_rate)
# decay every opt.decay_step epoch / every decay_step iter
decay_step = len(train_loader) * opt.decay_step
scheduler = LambdaLR(optimizer, lr_lambda=LRPolicy(rate=opt.decay_rate, step=decay_step))
print('Batch size: {0}'.format(batch_size))
print('Initial learning rate: {0}\nDecay step: {1}\nDecay rate: {2}\nPatience: {3}'.format(
opt.lr_rate, opt.decay_step, opt.decay_rate, opt.patience))
start_epoch = 0
val_loss_min = None
print('=== Training ===')
wrap = Wrapped(train_loader, val_loader, model, criterion, optimizer, scheduler, start_epoch, val_loss_min)
wrap()
| 40.651613 | 123 | 0.577845 | 749 | 6,301 | 4.699599 | 0.23498 | 0.03125 | 0.025568 | 0.008523 | 0.213636 | 0.184659 | 0.164205 | 0.153977 | 0.122727 | 0.099432 | 0 | 0.008947 | 0.308205 | 6,301 | 154 | 124 | 40.915584 | 0.798578 | 0.018568 | 0 | 0.096 | 0 | 0.016 | 0.107125 | 0.004983 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064 | false | 0 | 0.152 | 0.008 | 0.256 | 0.088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77c66ea30fa9ef6d5f9730bf0414d7cdb56d8682 | 3,579 | py | Python | jax_influence/selection.py | google-research/jax-influence | 74bd321156b5445bb35b9594568e4eaaec1a76a3 | [
"Apache-2.0"
] | 8 | 2022-02-17T10:19:27.000Z | 2022-03-28T12:33:57.000Z | jax_influence/selection.py | google-research/jax-influence | 74bd321156b5445bb35b9594568e4eaaec1a76a3 | [
"Apache-2.0"
] | null | null | null | jax_influence/selection.py | google-research/jax-influence | 74bd321156b5445bb35b9594568e4eaaec1a76a3 | [
"Apache-2.0"
] | 1 | 2022-03-02T14:32:56.000Z | 2022-03-02T14:32:56.000Z | # Copyright 2021 The Jax Influence Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for selecting subsets of parameters."""
from typing import Mapping, Union, Tuple
import flax
from flax import traverse_util
import jax
import jax.numpy as jnp
from jax_influence.types import PyTree
from jax_influence.types import SelectionFn
def split_params(params: PyTree,
select_path_fn: SelectionFn) -> Tuple[PyTree, PyTree]:
"""Decomposes parameters in two pieces using a selection function.
Args:
params: Frozen dict of parameters.
select_path_fn: Evaluates to True for the parameter paths to be selected.
Returns:
A Tuple (`selected', `unselected'), where the first contains the parameters
taken by the selection function and the second contains the
remaining parameters.
"""
flattened = traverse_util.flatten_dict(flax.core.unfreeze(params))
selected, unselected = dict(), dict()
for k, v in flattened.items():
if select_path_fn(k):
selected[k] = v
else:
unselected[k] = v
selected = traverse_util.unflatten_dict(selected)
unselected = traverse_util.unflatten_dict(unselected)
return flax.core.FrozenDict(selected), flax.core.FrozenDict(unselected)
def merge_params(params_left: PyTree, params_right: PyTree) -> PyTree:
"""Merges two dictionaries of parameters.
Args:
params_left: First PyTree.
params_right: Second PyTree.
Returns:
The merge of the two parameter PyTrees.
"""
out = traverse_util.flatten_dict(flax.core.unfreeze(params_left))
params_right = traverse_util.flatten_dict(flax.core.unfreeze(params_right))
for k, v in params_right.items():
assert k not in params_left
out[k] = v
out = traverse_util.unflatten_dict(out)
return flax.core.FrozenDict(out)
def param_size(params: PyTree) -> int:
"""Computes the total size of parameters."""
sizes = jax.tree_map(jnp.size, params)
return sum(jax.tree_leaves(sizes))
def summarize_split_effect(
params: PyTree,
select_fn: SelectionFn) -> Mapping[str, Union[str, int, float]]:
"""Summarizes the effect of splitting parameters."""
total_size = param_size(params)
sel, _ = split_params(params, select_fn)
selected_size = param_size(sel)
out = {}
out['total_size'] = total_size
out['pretty_total_size'] = f'{total_size:.3e}'
out['selected_size'] = selected_size
out['pretty_selected_size'] = f'{selected_size:.3e}'
out['selected%'] = selected_size / total_size
return out
| 33.448598 | 79 | 0.740151 | 512 | 3,579 | 5.0625 | 0.298828 | 0.046296 | 0.020062 | 0.024691 | 0.393904 | 0.373071 | 0.373071 | 0.373071 | 0.320988 | 0.320988 | 0 | 0.006063 | 0.170439 | 3,579 | 106 | 80 | 33.764151 | 0.866959 | 0.492316 | 0 | 0 | 0 | 0 | 0.059942 | 0 | 0 | 0 | 0 | 0 | 0.023256 | 1 | 0.093023 | false | 0 | 0.162791 | 0 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77c6869548be7b262a6b24e861485690a9a1e3b0 | 20,327 | py | Python | CureIAM/processors/gcpcloudiam.py | gojek/CureIAM | 83cdf6ef4d61b563ae8ac69fbf008f8338f6361f | [
"Apache-2.0"
] | 17 | 2021-11-10T08:32:31.000Z | 2022-03-03T12:20:38.000Z | CureIAM/processors/gcpcloudiam.py | gojekfarm/CureIAM | 83cdf6ef4d61b563ae8ac69fbf008f8338f6361f | [
"Apache-2.0"
] | null | null | null | CureIAM/processors/gcpcloudiam.py | gojekfarm/CureIAM | 83cdf6ef4d61b563ae8ac69fbf008f8338f6361f | [
"Apache-2.0"
] | 3 | 2021-11-11T17:20:16.000Z | 2021-12-02T20:00:34.000Z | """Plugin to process the data retrieved from `gcpcloud.CureIAM` plugin
"""
import json
import logging
import datetime
from CureIAM.models.iamriskscore import IAMRiskScoreModel
from CureIAM.models.applyrecommendationmodel import IAMApplyRecommendationModel
from CureIAM import util
_log = logging.getLogger(__name__)
class GCPIAMRecommendationProcessor:
"""SimpleProcessor plugin to perform processing on
gcpcloud.CureIAM IAMRecommendation_record."""
def __init__(self, enable_enforcer=False, enforcer=None):
"""Create an instance of :class:`GCPIAMRecommendationProcessor` plugin.
"""
self._recommendation_applied = 0
self._recommendation_applied_today = 0
self._enforcer = enforcer
self._enable_enforcer = enable_enforcer
if self._enforcer:
self._apply_recommendation_allowlist_projects = enforcer.get('allowlist_projects', None)
# Don't perform operations on these projects
self._apply_recommendation_blocklist_projects = enforcer.get('blocklist_projects', None)
# Don't perform operations on these accounts_ids
self._apply_recommendation_blocklist_accounts = enforcer.get('blocklist_accounts', None)
self._apply_recommendation_allowlist_account_types = enforcer.get('allowlist_account_types', ['user', 'group'])
self._apply_recommendation_blocklist_account_types = enforcer.get('blocklist_account_types', ['serviceAccount'])
# Min recommendation apply score is 60 to default for user
self._apply_recommendation_min_score_user = enforcer.get('min_safe_to_apply_score_user', 60)
# Min recommendation apply score is 60 to default for groups
self._apply_recommendation_min_score_group = enforcer.get('min_safe_to_apply_score_group', 60)
# Min recommendation apply score is 60 to default for SA
self._apply_recommendation_min_score_SA = enforcer.get('min_safe_to_apply_score_SA', 60)
self._apply_recommendations_svc_acc_key_file = enforcer.get('key_file_path', None)
self._cloud_resource = util.build_resource(
service_name='cloudresourcemanager',
key_file_path=self._apply_recommendations_svc_acc_key_file
)
self._recommender_resource = util.build_resource(
service_name='recommender',
key_file_path=self._apply_recommendations_svc_acc_key_file
)
def eval(self, record):
"""Function to perform data processing.
Arguments:
record (dict): Record to evaluate.
{
'raw': {
"name": "projects/{project-id}/locations/{location}/recommenders/google.iam.policy.Recommender/recommendations/{recommendation-id}",
"description": "Replace the current role with a smaller role to cover the permissions needed.",
"lastRefreshTime": "2021-01-18T08:00:00Z",
"primaryImpact": {
"category": "SECURITY"
},
"content": {
"operationGroups": [
{
"operations": [
{
"action": "add",
"resourceType": "cloudresourcemanager.googleapis.com/Project",
"resource": "//cloudresourcemanager.googleapis.com/projects/565961175665",
"path": "/iamPolicy/bindings/*/members/-",
"value": "user:foo@bar.com",
"pathFilters": {
"/iamPolicy/bindings/*/condition/expression": "",
"/iamPolicy/bindings/*/role": "roles/storage.objectCreator"
}
},
{
"action": "remove",
"resourceType": "cloudresourcemanager.googleapis.com/Project",
"resource": "//cloudresourcemanager.googleapis.com/projects/565961175665",
"path": "/iamPolicy/bindings/*/members/*",
"pathFilters": {
"/iamPolicy/bindings/*/condition/expression": "",
"/iamPolicy/bindings/*/members/*": "user:<user-name>@<doamin.com>",
"/iamPolicy/bindings/*/role": "roles/storage.objectAdmin"
}
}
}
]
},
"stateInfo": {
"state": "ACTIVE"
},
"etag": "\"ef625ab631b20e49\"",
"recommenderSubtype": "REPLACE_ROLE",
"associatedInsights": [
{
"insight": "projects/{project-id}/locations/{location}/recommenders/google.iam.policy.Recommender/recommendations/{recommendation-id}"
}
]
}
}
Yields:
dict: Processed record.
{
'GCPIAMProcessor': {
'record_type': 'iam_recommendation'
'recommendation_name' : name,
'project': project,
'recommendation_description' : description,
'recommendation_action': content.operationGroups.operations[i],
'recommendetion_recommender_subtype': recommenderSubtype,
'recommendation_insights': associatedInsights
}
}
"""
# Extract the different `CureIAM_record.recommendation_action.value`
# from the gcpcloud.GCPCloudIAMRecommendations
iam_raw_record = record.get('raw', {})
recommendation_dict = dict()
if iam_raw_record is not None:
recommendation_dict.update(
{
'project' : iam_raw_record['project'],
'recommendation_id': iam_raw_record['name'],
'recommendation_description': iam_raw_record['description'],
'recommendation_actions' : iam_raw_record['content']['operationGroups'][0]['operations'],
'recommendetion_recommender_subtype': iam_raw_record['recommenderSubtype'],
'recommendation_insights': [ i.get('insights') for i in iam_raw_record['associatedInsights']]
}
)
# Identify the account type on which recommendation is fetched
# If iam_raw_record['recommenderSubtype'] is REPLACE_ROLE, then user
# info will be present as iam_raw_record['content']['operationGroups']['operations'] list
_actor = ''
_actor_total_permissions = 0
_actor_exercised_permissions = 0
_actor_exercised_permissions_category = ''
for op_grp in iam_raw_record['content']['operationGroups']:
for op in op_grp['operations']:
if op['action'] == 'remove':
_actor = op['pathFilters']['/iamPolicy/bindings/*/members/*']
# After above parsing _actor would contain something like
# <account_type>:<account_id>
_actor_type, _actor = _actor.split(':')
recommendation_dict.update(
{
'account_type': _actor_type,
'account_id': _actor
}
)
# Get all the Permissions the current actor have
# insights is a list, in case of multiple insights
# all insights will have same `currentTotalPermissionsCount`
# So we are good to include the results from the first one
# only.
insights = iam_raw_record.get('insights', None)
if insights:
_content = insights[0].get('content', None)
if _content:
_actor_exercised_permissions = len(_content.get(
'exercisedPermissions',
[]
)) + len(
_content.get(
'inferredPermissions',
[]
)
)
_actor_total_permissions = _content.get(
'currentTotalPermissionsCount',
'0'
)
_actor_exercised_permissions_category = insights[0].get(
'category',
''
)
recommendation_dict.update(
{
'account_total_permissions': int(_actor_total_permissions),
'account_used_permissions': _actor_exercised_permissions,
'account_permission_insights_category': _actor_exercised_permissions_category
}
)
_res = {
'raw': iam_raw_record,
'processor': recommendation_dict ,
'score': IAMRiskScoreModel(recommendation_dict).score(),
'apply_recommendation': IAMApplyRecommendationModel(recommendation_dict).model()
}
_res['apply_recommendation'].update(
{
'safe_to_apply_score': _res['score']['safe_to_apply_recommendation_score']
}
)
# If recommendation was applied in past
# update the risk score and safe_to_apply_
# _score to 0
if _res['raw']['stateInfo']['state']=='SUCCEEDED':
_res['score'].update(
{
'risk_score': 0,
'over_privilege_score': 0
}
)
self._recommendation_applied += 1
_log.info('Recommendation %s applied in past, setting score to 0', recommendation_dict['recommendation_id'])
# enforce the recommendation before saving it in DB.
# Also dont re-apply the recommendation is it is already applied
if self._enforcer and _res['raw']['stateInfo']['state']=='ACTIVE':
_log.info('Enforcing recommendation %s ...', recommendation_dict['recommendation_id'])
_recomemndation_applied = self._enforce_recommendation(_res)
if _recomemndation_applied:
_res['raw']['stateInfo']['state'] = 'SUCCEEDED'
_res['apply_recommendation'].update(
{
'recommendation_state': 'Applied',
'recommendation_applied_time': str(datetime.datetime.utcnow().isoformat())
}
)
_res['score'].update(
{
'risk_score': 0,
'over_privilege_score': 0
}
)
self._recommendation_applied_today += 1
_log.info('Applied Recommendation %s', recommendation_dict['recommendation_id'])
else:
_log.warn('Recommendation %s not applied', recommendation_dict['recommendation_id'])
yield _res
def _enforce_recommendation(self, record):
"""Method to perform Recommendation enforcement
IAM recommendation doesn't have API to apply the recommendation
directly rather we will have to create IAM resource which will
perform the policy enforcement. This method does the same.
Arguments:
record(dict): dict record contaning raw + processor record
Returns:
bool: Indicating if the we were able to successfully apply
recommendation or not.
"""
"""
Flow:
Apply IAM policy from recommender
- success
- mark recommendation as succeeded
- return True
- no
- dont change the recommendation status
- return False
"""
if not self._enable_enforcer :
return
cloud_resource = self._cloud_resource
recommender_resource = self._recommender_resource
_processor_record = record.get('processor', None)
_score_record = record.get('score', None)
if _processor_record and _score_record:
_project = _processor_record.get('project', None)
_recommendation_actions = _processor_record.get('recommendation_actions', None)
_recommendation_id = _processor_record.get('recommendation_id', None)
_account_id = _processor_record.get('account_id')
_account_type = _processor_record.get('account_type')
_safety_score = _score_record.get('safe_to_apply_recommendation_score', None)
_we_want_to_apply_recommendation = False
_log.info('Testing recommendation for project %s; account %s; safety_score %d',
_project,
_account_id,
_safety_score)
if (
(
self._apply_recommendation_allowlist_projects is None
or (_project not in self._apply_recommendation_blocklist_projects
and _project in self._apply_recommendation_allowlist_projects)
)
and
(
_account_type not in self._apply_recommendation_blocklist_account_types
and _account_type in self._apply_recommendation_allowlist_account_types
)
and
(
_account_id not in self._apply_recommendation_blocklist_accounts
)
):
# If Recommendation is for SA, apply only for ['REMOVE_ROLE', 'REPLACE_ROLE']
if (
_account_type == 'serviceAccount'
and _processor_record.get('recommendetion_recommender_subtype') in ['REMOVE_ROLE', 'REPLACE_ROLE']
):
_we_want_to_apply_recommendation = True
else:
if _account_type != 'serviceAccount':
# If user is owner of any project dont apply recommendation
# <TODO> this is very bad of detecting owners, need to find better way of doing this.
if not 'owner' in str(record['raw']['content']['operationGroups']):
_we_want_to_apply_recommendation = True
if _account_id == 'user' and _safety_score < self._apply_recommendation_min_score_user:
_we_want_to_apply_recommendation = False
elif _account_id == 'group' and _safety_score < self._apply_recommendation_min_score_group:
_we_want_to_apply_recommendation = False
elif _account_id == 'SA' and _safety_score < self._apply_recommendation_min_score_SA:
_we_want_to_apply_recommendation = False
if _we_want_to_apply_recommendation:
_log.info('Applying recommendation for project %s; account %s; account_type %s ; safety_score %d',
_project,
_account_id,
_account_type,
_safety_score)
_policies = (
cloud_resource.projects()
.getIamPolicy(
resource=_project,
body={"options": {"requestedPolicyVersion": "1"}}
).execute()
)
_updated_policies = _policies
for _recommendation_action in _recommendation_actions:
if _recommendation_action.get('action') == 'remove':
member = (
_recommendation_action.get('pathFilters')
.get('/iamPolicy/bindings/*/members/*')
)
role = (
_recommendation_action.get('pathFilters')
.get('/iamPolicy/bindings/*/role')
)
_updated_policies = self.modify_policy_remove_member(
_updated_policies,
role,
member
)
elif _recommendation_action.get('action') == 'add':
member = _recommendation_action.get('value')
role = (
_recommendation_action.get('pathFilters')
.get('/iamPolicy/bindings/*/role')
)
_updated_policies = self.modify_policy_add_member(
_updated_policies,
role,
member
)
#Apply the policies present in recommendations
policy = (
cloud_resource.projects()
.setIamPolicy(resource=_project, body={'policy': _updated_policies})
.execute()
)
# print(policy)
# Update the recommendation status.
_status = (
recommender_resource
.projects()
.locations()
.recommenders()
.recommendations()
.markSucceeded(
body={
'etag': record.get('raw').get('etag'),
'stateMetadata': {
'reviewed-by': 'cureiam',
'owned-by': 'security'
}
},
name=_recommendation_id)
.execute()
)
# So we have applied recommendation and we are good.
return True
return False
def modify_policy_remove_member(self, policy, role, member):
"""Removes a member from a role binding."""
try:
binding = next(b for b in policy["bindings"] if b["role"] == role)
if "members" in binding and member in binding["members"]:
binding["members"].remove(member)
except StopIteration:
# Policy removed in previous iterations
pass
return policy
def modify_policy_add_member(self, policy, role, member):
"""Adds a new role binding to a policy."""
binding = {"role": role, "members": [member]}
policy["bindings"].append(binding)
return policy
def done(self):
"""Perform cleanup work.
Since this is a mock plugin, this method does nothing. However,
a typical event plugin may or may not need to perform cleanup
work in this method depending on its nature of work.
"""
_log.info('Recommendation applied: %s; Recommendations applied today: %s',
self._recommendation_applied, self._recommendation_applied_today) | 46.197727 | 166 | 0.509421 | 1,615 | 20,327 | 6.095975 | 0.199381 | 0.059827 | 0.039716 | 0.009243 | 0.326663 | 0.254241 | 0.173794 | 0.136516 | 0.114678 | 0.101168 | 0 | 0.006461 | 0.413735 | 20,327 | 440 | 167 | 46.197727 | 0.819669 | 0.276775 | 0 | 0.208494 | 0 | 0 | 0.149329 | 0.04625 | 0 | 0 | 0 | 0.002273 | 0 | 1 | 0.023166 | false | 0.003861 | 0.023166 | 0 | 0.069498 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77c83499cfbfd5ad6ff4654b86b77b358a42fede | 7,864 | py | Python | models/abstract_model.py | kkangshen/bayesian-deep-rul | 449038571097cfccee5e128623a16a963a4dca63 | [
"MIT"
] | 6 | 2020-02-28T14:56:46.000Z | 2022-03-24T02:44:56.000Z | models/abstract_model.py | kkangshen/bayesian-deep-rul | 449038571097cfccee5e128623a16a963a4dca63 | [
"MIT"
] | null | null | null | models/abstract_model.py | kkangshen/bayesian-deep-rul | 449038571097cfccee5e128623a16a963a4dca63 | [
"MIT"
] | 4 | 2020-01-02T15:03:02.000Z | 2022-03-10T12:46:35.000Z | # -*- coding: utf-8 -*-
"""Abstract model definition."""
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
class AbstractModel(nn.Module):
def __init__(self, input_size):
"""
Parameters
----------
module : ModuleType
Module defining the model.
input_size : (int, int, int)
Input size.
"""
super(AbstractModel, self).__init__()
self.input_size = input_size
self.criterion = nn.MSELoss(reduction="sum")
self.layers = None
def forward(self, x):
"""Forward pass.
Parameters
----------
x : Tensor
Input sample.
Returns
-------
Tensor
Output label.
"""
if not self.layers:
raise NotImplementedError
self.kl = 0
for layer in self.layers:
out = layer(x)
if len(out) == 2: # TODO: improve
# Bayesian
x, _kl = out
self.kl += _kl
else:
# frequentist
x = out
return x.view(-1)
def loss(self, pred, label, beta=0):
"""Compute loss.
Parameters
----------
pred : Tensor
Predicted label.
label : Tensor
True label.
beta : float, optional
Beta factor.
Returns
-------
Tensor
Loss.
"""
if not self.layers:
raise NotImplementedError
likelihood = -0.5 * self.criterion(pred, label)
complexity = beta * self.kl if beta != 0 else 0
return complexity - likelihood
def get_weight_statistics(self):
"""Extract weight statistics for later visualization (bias not used).
Returns
-------
([str], [ndarray], [ndarray])
List of layer names,
list of 1D array of `float` representing layer weight means,
list of 1D array of `float` representing layer weight standard deviations.
"""
if not self.layers:
raise NotImplementedError
names = []
qmeans = []
qstds = []
for idx, layer in enumerate(self.layers):
if hasattr(layer, "qw_mean") and hasattr(layer, "log_alpha"):
names.append(str(layer.__class__).split(".")[-1].split("'")[0] + "-" + str(idx + 1))
qmeans.append(layer.qw_mean.detach().cpu().numpy())
qstds.append(np.sqrt(np.exp(layer.log_alpha.detach().cpu().numpy()) * (layer.qw_mean.detach().cpu().numpy() ** 2)))
else:
trainable_params = [param.detach().cpu().numpy() for param in layer.parameters() if param.requires_grad]
if len(trainable_params) > 0:
weights = np.asarray(trainable_params[0])
names.append(str(layer.__class__).split(".")[-1].split("'")[0] + "-" + str(idx + 1))
qmeans.append(weights)
qstds.append(np.zeros(weights.shape))
return names, qmeans, qstds
# Source code modified from:
# Title: sksq96/pytorch-summary
# Author: Shubham Chandel (sksq96)
# Date: 2018
# Availability: https://github.com/sksq96/pytorch-summary/tree/b50f213f38544ac337beeeda93b03c7e48e69c78
def summary(self, log_fn, batch_size=-1, device="cuda"):
"""Log model summary.
Parameters
----------
log_fn : callable
Logging function.
batch_size : int, optional
Batch size.
device : string, optional
Device.
Returns
-------
int
Number of trainable parameters.
"""
if not self.layers:
raise NotImplementedError
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
summary[m_key]["input_shape"] = list(input[0].size())
summary[m_key]["input_shape"][0] = batch_size
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [[-1] + list(o.size())[1 :] for o in output][0]
else:
summary[m_key]["output_shape"] = list(output.size())
summary[m_key]["output_shape"][0] = batch_size
params = 0
if hasattr(module, "params_count") and callable(module.params_count):
params += module.params_count()
summary[m_key]["trainable"] = True
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += torch.prod(torch.LongTensor(list(module.weight.size())))
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += torch.prod(torch.LongTensor(list(module.bias.size())))
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
and not (module == self)
):
hooks.append(module.register_forward_hook(hook))
device = device.lower()
assert device in [
"cuda",
"cpu",
], "Input device is not valid, please specify 'cuda' or 'cpu'."
if device == "cuda" and torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
# multiple inputs to the network
#if isinstance(input_size, tuple):
#input_size = [input_size]
# batch_size of 2 for batchnorm
#x = [torch.rand(2, *in_size).type(dtype) for in_size in input_size]
x = torch.rand(1, *self.input_size).type(dtype)
# create properties
summary = OrderedDict()
hooks = []
# register hook
self.apply(register_hook)
# make a forward pass
#self(*x)
self(x)
# remove these hooks
for h in hooks:
h.remove()
log_fn("________________________________________________________________")
line_new = "{:>20} {:>25} {:>15}".format("Layer (type)", "Output Shape", "Param #")
log_fn(line_new)
log_fn("================================================================")
total_params = 0
trainable_params = 0
line_count = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
log_fn(line_new)
line_count += 1
log_fn("================================================================")
log_fn("Total params: {0:,}".format(total_params))
log_fn("Trainable params: {0:,}".format(trainable_params))
log_fn("Non-trainable params: {0:,}".format(total_params - trainable_params))
log_fn("________________________________________________________________")
return trainable_params
| 33.042017 | 131 | 0.517294 | 798 | 7,864 | 4.798246 | 0.245614 | 0.014364 | 0.028728 | 0.01567 | 0.20632 | 0.140507 | 0.086707 | 0.074171 | 0.051188 | 0.028728 | 0 | 0.017071 | 0.344481 | 7,864 | 237 | 132 | 33.181435 | 0.725703 | 0.192014 | 0 | 0.166667 | 0 | 0 | 0.115764 | 0.043582 | 0 | 0 | 0 | 0.004219 | 0.008333 | 1 | 0.058333 | false | 0 | 0.033333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77c9573c06e85833053d03d6dbce3da891ac8dbe | 4,316 | py | Python | ezql.py | Dimwest/MyEzQL | 866c7e853cf605d475e8204a7465f6e596bcf2d8 | [
"MIT"
] | 10 | 2019-01-18T15:54:35.000Z | 2019-02-21T16:18:41.000Z | ezql.py | Dimwest/MyEzQL | 866c7e853cf605d475e8204a7465f6e596bcf2d8 | [
"MIT"
] | 1 | 2020-08-08T20:50:00.000Z | 2020-08-08T20:50:00.000Z | ezql.py | Dimwest/MyEzQL | 866c7e853cf605d475e8204a7465f6e596bcf2d8 | [
"MIT"
] | null | null | null | import fire
from configparser import ConfigParser
from utils.processing import str_to_sql_dict
from utils.validation import *
from utils.logging import *
from parse.worker import Worker
from output.cmd import beautify
from output.mermaid import Mermaid
from output.json import to_json
from typing import Optional, List
from pathlib import Path
class MyEzQl(object):
def parse(self, i: str, ds: Optional[str]=None, dl: Optional[str]=None,
pmode: Optional[str]=None, chart: Optional[str]=None,
json: Optional[str]=None, tables: Optional[List[str]]=None,
procedures: Optional[List[str]]=None,
fmode: Optional[str]=None, v: Optional[str]=None) -> None:
"""
Core function parsing input file or directory and pretty-printing results
in the terminal.
Provides various parsing and output options to tweak according to needs.
:param i: path to input .sql file or directory containing .sql files
:param ds: default schema, can be set in config.py for convenience purpose
:param dl: delimiter, defaults to ;;
:param pmode: parsing mode, can be 'procedure' or 'ddl'
:param chart: path to output .html flowchart, defaults to '', in which case
no output file is created
:param json: path to output .json file, defaults to '', in which case
no output file is created
:param tables: list of table names to filter on, only the parents
and children of these table(s) will be kept in the outputs.
Procedures filtering has precedence over tables filtering.
:param procedures: list of procedure names to filter on, only the
statements located inside the selected procedure(s) will be kept
in outputs. Procedures filtering has precedence over tables filtering.
:param fmode: filtering mode, can be 'simple' or 'rec'
:param v: verbosity level, which will ultimately set the DEBUG output level.
Must be one of ('v', 'vv', 'vvv', 'vvvv'), defaults to None, resulting in
logging.INFO logger level
"""
# Read config
cfg = ConfigParser()
cfg.read(f'{Path(__file__).parent}/config.ini')
# Set default schema to config value if not provided
ds = cfg['parser_config']['default_schema'] if not ds else ds
# Set delimiter to config value if not provided
dl = cfg['parser_config']['delimiter'] if not dl else dl
# Set parsing mode to config value if not provided
pmode = cfg['parser_config']['default_parsing_mode'] if not pmode else pmode
fmode = cfg['parser_config']['default_filter_mode'] if not fmode else fmode
v = cfg['parser_config']['default_verbosity'] if not v else v
validate_args(i, chart, json, tables, procedures, pmode, fmode, v)
set_verbosity(v)
logger.warning(f'\nStart parsing with parameters:'
f'\n\n default schema --> {ds}'
f'\n delimiter --> {dl}'
f'\n parsing mode --> {pmode}'
f"\n filter mode --> {fmode if tables or procedures else 'off'} "
f"\n{' -> on procedure(s) ' + str(procedures) if procedures else ''}"
f"\n{' -> on table(s) ' + str(tables) if tables else ''}")
# Configure and run parser
worker = Worker(default_schema=ds, delimiter=dl, pmode=pmode, fmode=fmode)
worker.run(i)
# If procedure filter defined, apply filtering to results
if procedures:
procedures = str_to_sql_dict(procedures)
worker.procedures_filter(procedures)
# If tables filter defined, apply filtering to results
if tables:
tables = str_to_sql_dict(tables)
worker.tables_filter(tables)
# Pretty print results in terminal
beautify(worker.results)
# Print errored files if existing
worker.execution_warnings()
# If .html flowchart output required, create it
if chart:
m = Mermaid(worker.results)
m.tables_chart(chart)
if json:
to_json(worker.results, json)
if __name__ == '__main__':
fire.Fire(MyEzQl)
| 37.530435 | 95 | 0.630908 | 560 | 4,316 | 4.789286 | 0.273214 | 0.02349 | 0.03915 | 0.032811 | 0.165548 | 0.155854 | 0.110365 | 0.082028 | 0.082028 | 0.035048 | 0 | 0 | 0.284291 | 4,316 | 114 | 96 | 37.859649 | 0.868242 | 0.377896 | 0 | 0 | 0 | 0 | 0.199037 | 0.013644 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0 | 0.22 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ce28391d9e87410ba9c6fe9f7e9101b1ee66d1 | 16,521 | py | Python | 04-data-lake/etl.py | Ceridan/data-engineering-projects | c608ea76e6db0069f1b8dc24b16c367cf243f657 | [
"MIT"
] | null | null | null | 04-data-lake/etl.py | Ceridan/data-engineering-projects | c608ea76e6db0069f1b8dc24b16c367cf243f657 | [
"MIT"
] | null | null | null | 04-data-lake/etl.py | Ceridan/data-engineering-projects | c608ea76e6db0069f1b8dc24b16c367cf243f657 | [
"MIT"
] | null | null | null | import configparser
import os
from pyspark.sql import SparkSession, Window
from pyspark.sql.functions import col, asc, desc
from pyspark.sql.functions import date_format, row_number, monotonically_increasing_id
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType, DoubleType, TimestampType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID'] = config.get('S3', 'AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY'] = config.get('S3', 'AWS_SECRET_ACCESS_KEY')
def create_spark_session():
"""Create session on the AWS EMR Spark cluster. Required to processing data using Spark"""
spark = SparkSession \
.builder \
.appName('Sparkify Data Lake') \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""Process raw songs dataset using Spark and create Songs and Artists dimensional tables stored in S3"""
print('Start processing song data...')
# Read song data file
song_data_path = input_data + 'song_data/*/*/*/*'
df = spark.read.json(song_data_path)
# Process Data Frame with raw songs data and create Songs dimensional table stored in S3
process_songs(spark, df, output_data)
# Process Data Frame with raw songs data and create Artists dimensional table stored in S3
process_artists(spark, df, output_data)
print('Finish processing song data.')
def process_log_data(spark, input_data, output_data):
"""
1. Process raw logs dataset using Spark and create Users and Time dimensional tables stored in S3.
2. Process both raw logs and songs dataset and create Songplays fact table stored in S3.
"""
print('Start processing log data...')
# Read log data file
log_data_path = input_data + 'log_data/*'
log_df = spark.read.json(log_data_path)
# Process Data Frame with raw logs data and create Users dimensional table stored in S3
process_users(spark, log_df, output_data)
# Process Data Frame with raw logs data and create Time dimensional table stored in S3
process_time(spark, log_df, output_data)
# Read song data file
song_data_path = input_data + 'song_data/*/*/*/*'
song_df = spark.read.json(song_data_path)
# Process both Data Frames with raw logs and songs data and create Songplays fact table stored in S3
process_songplays(spark, song_df, log_df, output_data)
print('Finish processing log data.')
def process_songs(spark, df, output_data):
"""Process Data Frame with raw songs data using Spark and create Songs dimensional table stored in S3"""
print('Processing songs...')
# Define schema for the Songs table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
songs_schema = StructType([
StructField('song_id', StringType(), nullable=False),
StructField('title', StringType(), nullable=False),
StructField('artist_id', StringType(), nullable=True),
StructField('year', LongType(), nullable=True),
StructField('duration', DoubleType(), nullable=True)
])
# Cleanup data. Remove rows with empty song_id or title and select required fields for Songs table.
# We also use dropDuplicates by song_id here to avoid the same song row appears twice in the table.
songs_rdd = df \
.filter(col('song_id').isNotNull()) \
.filter(col('title').isNotNull()) \
.dropDuplicates(['song_id']) \
.select('song_id', 'title', 'artist_id', 'year', 'duration') \
.rdd
# Create Songs table using clean data and schema.
songs_table = spark.createDataFrame(songs_rdd, songs_schema)
print('Writing songs_table data frame to parquet to S3')
# Write Songs table to parquet files partitioned by year and artist to S3
songs_table_path = output_data + 'tables/songs/songs.parquet'
songs_table \
.write \
.partitionBy('year', 'artist_id') \
.mode('overwrite') \
.parquet(songs_table_path)
print('Songs table has been created.')
def process_artists(spark, df, output_data):
"""Process Data Frame with raw songs data using Spark and create Artists dimensional table stored in S3"""
print('Processing artists...')
# Define schema for the Artists table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
artists_schema = StructType([
StructField('artist_id', StringType(), nullable=False),
StructField('name', StringType(), nullable=False),
StructField('location', StringType(), nullable=True),
StructField('latitude', DoubleType(), nullable=True),
StructField('longitude', DoubleType(), nullable=True)
])
# Cleanup data. Remove rows with empty artist_id or artist_name and select required fields for Artists table.
# We also use dropDuplicates by artist_id here to avoid the same artist row appears twice in the table.
artists_rdd = df \
.filter(col('artist_id').isNotNull()) \
.filter(col('artist_name').isNotNull()) \
.dropDuplicates(['artist_id']) \
.select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude') \
.rdd
# Create Artists table using clean data and schema.
artists_table = spark.createDataFrame(artists_rdd, artists_schema)
print('Writing artists_table data frame to parquet to S3')
# Write Artists table to parquet files to S3
artists_table_path = output_data + 'tables/artists/artists.parquet'
artists_table \
.write \
.mode('overwrite') \
.parquet(artists_table_path)
print('Artists table has been created.')
def process_users(spark, df, output_data):
"""
Process Data Frame with raw logs data using Spark and create Users dimensional table stored in S3.
To process Users data properly we need to make two decisions:
1. Log file have different actions, ex. NextSong, Home, Login etc. Should we filter logs data by action or not?
Because we want to store information about all of our users thus we do not want to filter data by action
and we will write all users to the Users table even if them never perform NextSong action.
2. The same user can occurs multiple times in the log file. There are two approaches to deal with it:
- We can create historical Users dimension table where each row will have extra fields
EffectiveDateFrom and EffectiveDateTo. It allows us to analyze all changes that was made by the user,
ex. he/she may change name, switch from free to paid subscription and vice versa.
- Also we may store only the latest state of our users. It means that we will write to the Users dimension
table only latest occurrence in the log file for each user (ordered by timestamp).
For the current processing task we will use the second approach: write only the latest state of our users.
"""
print('Processing users...')
# Define schema for the Users table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
users_schema = StructType([
StructField('user_id', LongType(), nullable=False),
StructField('first_name', StringType(), nullable=True),
StructField('last_name', StringType(), nullable=True),
StructField('gender', StringType(), nullable=True),
StructField('level', StringType(), nullable=True)
])
# Use Window function to enumerate all occurrences of the single user in the log file.
# When it is done, we just can select each row with the value 1 for the number of occurrences (also see next
# code statement).
users_window = Window \
.partitionBy('userId') \
.orderBy(col('ts').desc()) \
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
# Cleanup data. Remove rows with empty userId, apply Window function to find latest occurrences for each user
# and select required fields for Users table.
# We also use dropDuplicates by userId here to avoid the same artist row appears twice in the table.
# We also can avoid using dropDuplicates method here because final data already will be unique because of our logic
# with Window function and getting only the latest row for each userId. But we add dropDuplicates here to make
# our solution more robust.
users_rdd = df \
.filter(col('userId').isNotNull()) \
.dropDuplicates('userId') \
.withColumn('num', row_number().over(users_window)) \
.withColumn('user_id', col('userId').cast(LongType())) \
.filter(col('num') == 1) \
.select('user_id', 'firstName', 'lastName', 'gender', 'level') \
.rdd
# Create Users table using clean data and schema.
users_table = spark.createDataFrame(users_rdd, users_schema)
print('Writing users_table data frame to parquet to S3')
# Write Users table to parquet files to S3
users_table_path = output_data + 'tables/users/users.parquet'
users_table \
.write \
.mode('overwrite') \
.parquet(users_table_path)
print('Users table has been created.')
def process_time(spark, df, output_data):
"""
Process Data Frame with raw logs data using Spark and create Time dimensional table stored in S3.
To properly create Time table we need to convert timestamp field in the logs. There are two approaches
how to deal with it:
- Use Spark udf() function and write processing code as normal Python code.
- Use power of Spark and its predefined functions to work with timestamp.
For the current processing task we will use the second approach: rely on Spark predefined functions.
"""
print('Processing time...')
# Define schema for the Time table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions
time_schema = StructType([
StructField('start_time', TimestampType(), nullable=False),
StructField('hour', IntegerType(), nullable=False),
StructField('day', IntegerType(), nullable=False),
StructField('week', IntegerType(), nullable=False),
StructField('month', IntegerType(), nullable=False),
StructField('year', IntegerType(), nullable=False),
StructField('weekday', IntegerType(), nullable=False)
])
# Take unique timestamps from the log data and apply various functions to extract different parts of datetime
# on the select stage to get all required fields for the Time table.
# We also use dropDuplicates by timestamp here to avoid the same timestamp row appears twice in the table.
time_rdd = df \
.select('ts') \
.withColumn('timestamp', (col('ts') / 1000).cast(TimestampType())) \
.dropDuplicates(['timestamp']) \
.select(
col('timestamp').alias('start_time'),
hour('timestamp').alias('hour'),
dayofmonth('timestamp').alias('day'),
weekofyear('timestamp').alias('week'),
month('timestamp').alias('month'),
year('timestamp').alias('year'),
date_format(col('timestamp'), 'F').cast(IntegerType()).alias('weekday')
) \
.rdd
# Create Time table using clean data and schema.
time_table = spark.createDataFrame(time_rdd, time_schema)
print('Writing time_table data frame to parquet to S3')
# Write Time table to parquet files partitioned by year and month to S3
time_table_path = output_data + 'tables/time/time.parquet'
time_table \
.write \
.partitionBy('year', 'month') \
.mode('overwrite') \
.parquet(time_table_path)
print('Time table has been created.')
def process_songplays(spark, song_df, log_df, output_data):
"""
Process Data Frame with raw logs and songs data using Spark and create Songplays fact table stored in S3.
To create Songplays table we need raw data from both logs and songs files. Here we will join both tables
and the tricky part is to choose proper key for the joining. Joining also helps us to cleanup data, because
we do not want to include rows to the Songplays table where logs data do not match songs data,
ex. some song name appears in the log but it doesn't exist in the song data.
Thus for current processing task we will choose joining by several conditions:
- Songs data `title` should match logs data `song`.
- Songs data `artist_name` should match logs data `artist`.
- Songs data `duration` should match logs data `length`.
"""
print('Processing songplays...')
# Define schema for the Songplays table. Schema also could be inferred implicitly
# but defining it manually protects us from wrong type conversions.
# Songplays schema contains two additional columns: "year" and "month" for partitioning.
songplays_schema = StructType([
StructField('songplay_id', LongType(), nullable=False),
StructField('start_time', TimestampType(), nullable=False),
StructField('user_id', LongType(), nullable=False),
StructField('level', StringType(), nullable=True),
StructField('song_id', StringType(), nullable=False),
StructField('artist_id', StringType(), nullable=False),
StructField('session_id', LongType(), nullable=True),
StructField('location', StringType(), nullable=True),
StructField('user_agent', StringType(), nullable=True),
StructField('year', IntegerType(), nullable=False),
StructField('month', IntegerType(), nullable=False)
])
# Cleanup data. Remove rows with empty song_id or artist_id from Songs data.
clean_song_df = song_df \
.filter(col('song_id').isNotNull()) \
.filter(col('artist_id').isNotNull())
# Cleanup data. Choose only NextSong actions from Log data.
clean_log_df = log_df \
.filter(col('page') == 'NextSong')
# Join songs and logs data frames, enrich with missing columns and select required columns
# to create Songplays table.
# Also we use Spark function `monotonically_increasing_id` to create unique identifiers for Songplays table rows.
songplays_rdd = clean_song_df \
.join(clean_log_df,
(clean_song_df.title == clean_log_df.song)
& (clean_song_df.artist_name == clean_log_df.artist)
& (clean_song_df.duration == clean_log_df.length)
, 'inner') \
.withColumn('id', monotonically_increasing_id() + 1) \
.withColumn('start_time', (col('ts') / 1000).cast(TimestampType())) \
.withColumn('user_id', col('userId').cast(LongType())) \
.withColumn('year', year('start_time')) \
.withColumn('month', month('start_time')) \
.select('id', 'start_time', 'user_id', 'level', 'song_id', 'artist_id', 'sessionId', 'location',
'userAgent', 'year', 'month') \
.repartition('year', 'month') \
.rdd
# Create Songplays table using clean data and schema.
songplays_table = spark.createDataFrame(songplays_rdd, songplays_schema)
print('Writing songplays_table data frame to parquet to S3')
# Write Songplays table to parquet files partitioned by year and month to S3
songplays_table_path = output_data + 'tables/songplays/songplays.parquet'
songplays_table \
.write \
.partitionBy('year', 'month') \
.mode('overwrite') \
.parquet(songplays_table_path)
print('Songplays table has been created.')
def main():
"""Create Spark session and call functions to process raw logs and songs datasets"""
# Create Spark session for application "Sparkify Data Lake"
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3n://ceri-sparkify"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
# Stops Spark session for the job
spark.stop()
# Entrypoint for the Python program
if __name__ == "__main__":
main()
| 44.056 | 120 | 0.685794 | 2,149 | 16,521 | 5.164262 | 0.161005 | 0.018021 | 0.036763 | 0.014868 | 0.460443 | 0.380699 | 0.301496 | 0.255902 | 0.156064 | 0.119121 | 0 | 0.00349 | 0.219539 | 16,521 | 374 | 121 | 44.173797 | 0.85722 | 0.409721 | 0 | 0.20202 | 0 | 0 | 0.192913 | 0.022984 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.035354 | 0 | 0.085859 | 0.09596 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d27a48425b7756b0ea323bb46686f3b1ceed5d | 1,196 | py | Python | DataStructure and algorithms/Overallfibnocci.py | Rajatkhatri7/Project-Milap | 0bb5dfc05064a8727760755fa55e53fd7bb8d8d3 | [
"Apache-2.0"
] | null | null | null | DataStructure and algorithms/Overallfibnocci.py | Rajatkhatri7/Project-Milap | 0bb5dfc05064a8727760755fa55e53fd7bb8d8d3 | [
"Apache-2.0"
] | null | null | null | DataStructure and algorithms/Overallfibnocci.py | Rajatkhatri7/Project-Milap | 0bb5dfc05064a8727760755fa55e53fd7bb8d8d3 | [
"Apache-2.0"
] | null | null | null | # grows via 2^n/2
# fn= { 0 ; n=0
# 1 ; n=1
# f(n-1)+f(n-2) ; n>1}
def fib(n):
if n<=1:
return n
else:
return fib(n-1)+fib(n-2)
# for f(n)>n it will take very long time to compute
# bad algo
"""
F(n)
/ \
f(n-1) f(n-2)
/ \ / \
f(n-2) f(n-3) f(n-3) f(n-4)
/ \ / \ / \ / \
/ \ / \ / \ f(n-5) f(n-6)
/ \ / \ f(n-4) f(n-5)
f(n-3) f(n-4) f(n-4) f(n-5)
Above we can see that we are computing same thing again(look on (f-3)) which is not required
"""
def Fastfib(n):
f = []
f.append(int(0))
f.append(int(1))
print(f)
f[1]=1
for i in range(2,n+1):
f.append((f[i-1]+f[i-2]))
print(f)
return f[n]
"""it takes less time it is more powerful"""
if __name__ == "__main__":
n=int(input("enter the no: "))
res=fib(n)
res2=Fastfib(n)
print(f"sum of first {n} fibnocci no is with recursion : ",res)
print(f"sum of first {n} fibnocci no is with fastalgo : ",res2) | 23.92 | 93 | 0.405518 | 197 | 1,196 | 2.42132 | 0.35533 | 0.079665 | 0.025157 | 0.033543 | 0.236897 | 0.224319 | 0.190776 | 0.190776 | 0.138365 | 0.138365 | 0 | 0.053779 | 0.424749 | 1,196 | 50 | 94 | 23.92 | 0.639535 | 0.129599 | 0 | 0.095238 | 0 | 0 | 0.228407 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0 | 0 | 0.238095 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d4c896f4135bf9076e4a4fc25159dd59f2bc8a | 5,765 | py | Python | unit-2/tower.py | GalvinGao/2019-ProgrammingCourse | b668bc9bab902959a574aa3db73ae481131c0c27 | [
"MIT"
] | null | null | null | unit-2/tower.py | GalvinGao/2019-ProgrammingCourse | b668bc9bab902959a574aa3db73ae481131c0c27 | [
"MIT"
] | null | null | null | unit-2/tower.py | GalvinGao/2019-ProgrammingCourse | b668bc9bab902959a574aa3db73ae481131c0c27 | [
"MIT"
] | null | null | null | import time
import turtle as t
t.mode('standard')
t.speed(8)
DISTANCE = 8
RESIZE_RATIO = 6
t.pensize(RESIZE_RATIO)
class Restorer:
def __init__(self):
self.last_pos = t.pos()
def restore(self):
t.goto(self.last_pos[0], self.last_pos[1])
class Draw:
@staticmethod
def goto(x, y, heading=0):
t.penup()
t.goto(x * RESIZE_RATIO, y * RESIZE_RATIO)
t.setheading(heading)
t.pendown()
@staticmethod
def line(first_line=(), second_line=()):
restorer = Restorer()
assert len(first_line) == 2 and type(
first_line) == tuple, "'first_line' must be a Tuple object with 2 positional parameters."
assert len(second_line) == 2 and type(
second_line) == tuple, "'second_line' must be a Tuple object with 2 positional parameters."
t.penup()
t.goto(first_line[0] * RESIZE_RATIO, first_line[1] * RESIZE_RATIO)
t.pendown()
t.goto(second_line[0] * RESIZE_RATIO, second_line[1] * RESIZE_RATIO)
t.penup()
restorer.restore()
def rectangle_absolute(self, top_left_corner: tuple, bottom_right_corner: tuple, fill_color: str = "black"):
t.fillcolor(fill_color)
self.goto(top_left_corner[0], top_left_corner[1])
t.begin_fill()
for _ in range(2):
t.forward((bottom_right_corner[0] - top_left_corner[0]) * RESIZE_RATIO)
t.left(90)
t.forward((bottom_right_corner[1] - top_left_corner[1]) * RESIZE_RATIO)
t.left(90)
t.end_fill()
@staticmethod
def circle(distance: float = DISTANCE):
t.circle(RESIZE_RATIO * distance)
def function(self, _function, trace_size: float = 0.1, x_range: tuple = (), y_range: tuple = ()):
restorer = Restorer()
for index in range(trace_size, trace_size):
y = _function(index)
restorer.restore()
def square(self, stroke="black"):
t.color(stroke)
for _ in range(4):
t.forward(DISTANCE * RESIZE_RATIO)
self.turn_left()
def rectangle_relative(self, x_side, y_side):
for _ in range(2):
t.forward(x_side * RESIZE_RATIO)
self.turn_left()
t.forward(y_side * RESIZE_RATIO)
self.turn_left()
def triangle(self):
self.polygon(sides=3, fill="white", stroke="black")
def circle(self, distance=DISTANCE):
t.circle(RESIZE_RATIO * distance)
def polygon(self, sides=5, fill="red", stroke="black"):
assert sides >= 3, "Side amount of a polygon should be greater or equals to 3."
t.color(stroke, fill)
turnAngle = 360 / sides
t.begin_fill()
for i in range(sides):
t.forward(RESIZE_RATIO * DISTANCE / sides * 5)
t.left(turnAngle)
t.end_fill()
def car(self):
self.goto(-4, -3)
self.rectangle_relative(8, 3)
t.fillcolor("black")
self.goto(-2, -4)
t.begin_fill()
self.circle(1)
t.end_fill()
self.goto(2, -4)
t.begin_fill()
self.circle(1)
t.end_fill()
self.goto(-4, -2.5)
t.begin_fill()
self.circle(0.5)
t.end_fill()
def house(self):
self.rectangle_relative(8, 6)
self.goto(8, 6, heading=-150)
triangle_sides = 4.6
t.forward(triangle_sides * RESIZE_RATIO)
t.left(60)
t.forward(triangle_sides * RESIZE_RATIO)
def tower(self):
self.line((0, 0), (24, 0))
self.line((5, 10), (0, 0))
self.line((19, 10), (24, 0))
self.rectangle_absolute((5, 10), (19, 12), "black")
self.line((7, 12), (10, 22))
self.line((17, 12), (14, 22))
self.rectangle_absolute((8, 22), (16, 23), "black")
self.line((10, 23), (11, 35))
self.line((13, 35), (14, 23))
self.rectangle_absolute((10, 35), (14, 36), "black")
self.line((11, 36), (11, 48))
self.line((13, 36), (13, 48))
self.line((19, 10), (0, 0))
self.line((5, 10), (24, 0))
self.line((14, 21), (7, 13))
self.line((17, 13), (10, 21))
self.line((13, 34), (10, 24))
self.line((11, 34), (14, 24))
self.line((13, 47), (11, 37))
self.line((11, 47), (13, 37))
self.goto(12, 48)
t.fillcolor("black")
t.begin_fill()
self.circle(2)
t.end_fill()
def turn_left(self):
t.left(90)
def turn_right(self):
t.right(90)
def turn_around(self):
t.left(180)
def start_section(self, text):
t.penup()
self.turn_right()
t.forward(DISTANCE * RESIZE_RATIO)
t.pendown()
t.write(text)
t.penup()
self.goto(0, 0, 0)
t.pendown()
t.pensize(2)
def end_section(self):
time.sleep(.5)
t.reset()
draw = Draw()
# 1. Square
draw.start_section("1. Square")
draw.square()
draw.end_section()
# 2. Rectangle
draw.start_section("2. Rectangle")
draw.rectangle_relative(8, 4)
draw.end_section()
# 3. Triangle
draw.start_section("3. Triangle")
draw.triangle()
draw.end_section()
# 4. Circle
draw.start_section("4. Circle")
draw.circle()
draw.end_section()
# 5. Blue Square
draw.start_section("5. Blue Square")
draw.square("blue")
draw.end_section()
# 6. Hexagon with red background and yellow border
draw.start_section("6. Hexagon with red background and yellow border")
draw.polygon(sides=6, fill="red", stroke="yellow")
draw.end_section()
# 7. Car
draw.start_section("7. Car")
draw.car()
draw.end_section()
# 8. House
draw.start_section("8. House")
draw.house()
draw.end_section()
# 10. Tower
draw.start_section("10. Tower")
draw.tower()
draw.end_section()
| 27.716346 | 112 | 0.580399 | 815 | 5,765 | 3.958282 | 0.164417 | 0.064786 | 0.044637 | 0.017359 | 0.269374 | 0.179479 | 0.110353 | 0.087415 | 0.087415 | 0.087415 | 0 | 0.058879 | 0.269384 | 5,765 | 207 | 113 | 27.850242 | 0.707028 | 0.023244 | 0 | 0.323529 | 0 | 0 | 0.069205 | 0 | 0 | 0 | 0 | 0 | 0.017647 | 1 | 0.117647 | false | 0 | 0.011765 | 0 | 0.141176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d58ca19b534449cc89976ecbaf5bc5474e985e | 14,960 | py | Python | Project_03_Books/src/game.py | moniqklimek/training | 51504ab839ed4b5ccc5731662a5077d5db334b93 | [
"MIT"
] | null | null | null | Project_03_Books/src/game.py | moniqklimek/training | 51504ab839ed4b5ccc5731662a5077d5db334b93 | [
"MIT"
] | null | null | null | Project_03_Books/src/game.py | moniqklimek/training | 51504ab839ed4b5ccc5731662a5077d5db334b93 | [
"MIT"
] | null | null | null | import json
import pprint
"""
TITLE: imagine buy in bookshoop - interaktive fun with User :)
ISSUE : help you choose the right item, get to know the User's preferences, i.e. - the thematic category that interests him, the results improved for him, a detailed description of the selected item
assumptions:
no method has been developed to protect the program against entering incorrect answers by the User
established:
- that the categories will be written as displayed on the console with uppercase letters (no spaces, etc.)
- that the user will copy the entire title of the book as it is displayed on the console
logic
100. Ask the user what category of prince interests him(show him the sorted results)
101. Enter the selected category and ask if User wants to sort them by:
- increasing price,
- decreasing price,
- the highest number of stars,
- the lowest number of stars,
- availability,
and present the results
102.The user has chosen a given book - show him a short description and product description
logika - PL
100. spytaj Kupujacego jaka kategoria ksiazego go intresuje (pokaz mu posortowane wyniki)
101. wejdz do wybranej kategori i spytaj czy Kupujacy chce posortowac je po:
- cenie rosnacej,
- cenie malejacej,
- najwyzszej ilosci gwiazdek,
- najnizszej ilosci gwiazdek,
- dostepnosci,
i zaprezentuj wyniki do dalszego wyboru w postaci listy
102. user wybral dana ksiazke - pokaz mu do niej szczegolowy opis i opis produktu
"""
# open and read the content of files from part 01 this issue (scraping results)
f1 = open('resources/01_category_first_link.json')
scrap1 = json.load(f1)
f1.close()
f2 = open('resources/02_single_books.json')
scrap2 = json.load(f2)
f2.close()
f3 = open('resources/03_details_single_books.json')
scrap3 = json.load(f3)
f3.close()
class Game:
def __init__(self):
pass
# I am using a file called --> "01_category_first_link.json"
# important because each file has different keys to access the content of the dictionaries
def sorted_thematica_category(self,s1):
category_list = [letter['Book_Category'] for letter in s1]
sorted_category_list = sorted(category_list)
return sorted_category_list
# I am using a file called --> "02_single_books.json"
def show_all_books_ctagory(self, s2, choosen_category):
list_all_books_this_cat=[]
for el in s2:
if el['Book_Category'] == choosen_category:
list_all_books_this_cat.append(el['Book_Title'])
how_many_books = len(list_all_books_this_cat)
return how_many_books, list_all_books_this_cat
def printing_long_questions(self):
print('--------')
print('Please tell me how to sort the results for YOU. Write 1 or 2 or 3 or 4 or 5.')
print(' \t\t 1 - sort by price - DESC.')
print(' \t\t 2 - sort by price - ASC.')
print(' \t\t 3 - sort by popularity ranking - DESC.')
print(' \t\t 4 - sort by popularity ranking - ASC.')
print(' \t\t 5 - sort by Title alphabetically. ')
def user_choose_filter_method(self, nr, list_title):
if nr==1 or nr==2:
list_dict_title_and_price=self.generate_tab_title_price(scrap2, list_title)
if nr == 1:
result_method = self.sort_method_1(list_dict_title_and_price)
else:
#nr 2
result_method = self.sort_method_2(list_dict_title_and_price)
if nr == 3:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_3(list_dict_title_and_stars)
if nr == 4:
# create dict only with key like stars and title
list_dict_title_and_stars = self.generate_tab_title_stars(scrap2, list_title)
# sorted by stars
result_method = self.sort_method_4(list_dict_title_and_stars)
if nr == 5:
result_method = self.sort_method_5(list_title)
return result_method
# building a new DICTIONARY - cutting the content from existing DICTIONARIES
# idea from https://stackoverflow.com/questions/3420122/filter-dict-to-contain-only-certain-keys
def remove_key_from_existing_dict(self, existing_dict, *key_to_delete_from_existing_dict):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_delete_from_existing_dict='Book_Stars'
output--> {'Book_Price': 10.97,'Book_Title': 'The Long Shadow', , 'Book_total_category_amouth': 1}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key not in key_to_delete_from_existing_dict)
return new_dict
def leave_only_selected_keys_in_existing_dict(self,existing_dict, *key_to_stay):
"""
input -{'Book_Price': 10.97, 'Book_Stars': 1, 'Book_Title': 'The Long Shadow', 'Book_total_category_amouth': 1}
key_to_stay='Book_Stars', 'Book_Title'
output--> {'Book_Stars': 1, 'Book_Title': 'The Long Shadow'}
"""
new_dict = dict((key, value) for key, value in existing_dict.items() if key in key_to_stay)
return new_dict
# building a new list of dictionaries - cutting the content from skraping 2 (list - dictionaries)
def generate_tab_title_price(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Price', 'Book_Title'
list_dict_only_title_price=[]
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(small_dict, 'Book_Price', 'Book_Title')
list_dict_only_title_price.append(new_short_dict)
return list_dict_only_title_price
def generate_tab_title_stars(self, scrap2, list_title):
# scrap2= big list dics
# i want filter and catch only interesting me title --list_title
# and return only key --'Book_Title', 'Book_Stars'
list_dict_only_title_stars = []
for small_dict in scrap2:
for title in list_title:
if small_dict['Book_Title'] in title:
new_short_dict = self.leave_only_selected_keys_in_existing_dict(
small_dict, 'Book_Title', 'Book_Stars')
list_dict_only_title_stars.append(new_short_dict)
return list_dict_only_title_stars
def sort_method_1(self,list_dict_title_and_price):
#Press 1 - sort by price descending (malejaco)
# return list with dict price and title
# inspiration - -> https: // stackoverflow.com/questions/1143671/how-to-sort-objects-by-multiple-keys-in-python
sorted_by_price_DESC= sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
return sorted_by_price_DESC
def sort_method_2(self, list_dict_title_and_price):
# Press 2 - sorted by price in ascending order (rosnaco)
# return list with dict price and title
sorted_by_price_DESC = sorted(list_dict_title_and_price, key=lambda d: (-d['Book_Price'], d['Book_Title']))
sorted_by_price_ASC = sorted_by_price_DESC[::-1]
return sorted_by_price_ASC
def sort_method_3(self, list_dict_only_title_AND_stars):
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
return sorted_by_stars_DESC
def sort_method_4(self, list_dict_only_title_AND_stars):
# catch list dict with stars and title and return sorted by stars
#Press 3 - sorted by popularity ranking - Max stars to min
sorted_by_stars_DESC = sorted(list_dict_only_title_AND_stars, key=lambda d: (-d['Book_Stars'], d['Book_Title']))
sorted_by_stars_ASC = sorted_by_stars_DESC[::-1]
return sorted_by_stars_ASC
def sort_method_5(self, list_title):
# Press 5 - sort by title alphabetically
"""
["It's Only the Himalayas", 'Full Moon over Noah’s Ark: An Odyssey to Mount Ararat and Beyond', 'See America: A Celebration of Our National Parks & Treasured Sites', 'Vagabonding: An Uncommon Guide to the Art of Long-Term World Travel', 'Under the Tuscan Sun',
'A Summer In Europe', 'The Great Railway Bazaar', 'A Year in Provence (Provence #1)', 'The Road to Little Dribbling: Adventures of an American in Britain (Notes From a Small Island #2)', 'Neither Here nor There: Travels in Europe', '1,000 Places to See Before You Die']
"""
# mamy kategorie wybrana, mamy liste ksiazek - sort by price descending.
sorted_title = sorted(list_title)
return sorted_title
# choose inf detail from scrap 3
# I am using a file called --> "03_details_single_books.json"
def catch_index_if_have_title(self,title_choosen, scrap3):
# output: list dicts
# purpose: catch only index - for concret - value :title_choosen
# which help to link another parts this dict with information like
counter_index_in_list_dicts = 0
for el in scrap3:
if el['title_book'] == title_choosen:
break
else:
counter_index_in_list_dicts += 1
return counter_index_in_list_dicts
def return_details(self,title_choosen, scrap3):
# i need index link with this title
index_list_with_dicts = self.catch_index_if_have_title(title_choosen, scrap3)
tab_details=[]
title_book = scrap3[index_list_with_dicts]["title_book"]
tab_details.append(title_book)
category = scrap3[index_list_with_dicts]["category"]
tab_details.append(category)
price = scrap3[index_list_with_dicts]["price"]
tab_details.append(price)
productDescription = scrap3[index_list_with_dicts]["productDescription"]
tab_details.append(productDescription)
how_many = scrap3[index_list_with_dicts]["in_stock_how_many_available"]
tab_details.append(how_many)
about = scrap3[index_list_with_dicts]['detals_link_to_book']
tab_details.append(about)
upc = scrap3[index_list_with_dicts]["productInformation_UPC"]
tab_details.append(upc)
return tab_details
def printing_final_result(self, tab_details):
title_book = tab_details[0]
category = tab_details[1]
category = tab_details[1]
price = tab_details[2]
productDescription = tab_details[3]
in_stock_how_many_available = tab_details[4]
detals_link_to_book = tab_details[5]
productInformation_UPC = tab_details[6]
print('\n\t The book has a title: {}.Category is {}'.format(title_book, category))
print('\n\t Book Price:', price)
print('\n\t Content Description:', productDescription)
print('\n\t We still have {} item/s in stock'.format(in_stock_how_many_available))
print('\n\t If you want to know more about the book, please open the link:', detals_link_to_book)
print('\n\t UPC number:', productInformation_UPC)
# logic for conversation with User through Terminal
def logic(self):
answer1_user_if_play = input("Do you want to buy some interesting book? :) . Choose (n/y) \n")
if answer1_user_if_play == 'y':
print('--------')
print("\t Lets game :) ..... \n\t Below thematical book's Category for Your choose. \n")
#step one - choose category
sorted_category = self.sorted_thematica_category(scrap1)
print(sorted_category)
print('--------')
customer_choose_category_book = input(
'\t Please choose one and copy Your choice here ...\n\t (EXAMPLE:... Academic)\n\t (EXAMPLE:... Add a comment)\n\t YOUR TURN - Chose one Category from list : ...')
"""
while customer_choose_category_book not in sorted_category_list:
print('Please once again choose category. This one not exist in own base and list at top')
"""
if customer_choose_category_book in sorted_category:
how_books, title_books_this_choosen_category = self.show_all_books_ctagory(scrap2, customer_choose_category_book)
print('We have for You in shop {} book/books title for category {}'.format(how_books, customer_choose_category_book))
print(title_books_this_choosen_category)
else:
print('Please once again choose category. This one not exist in own base and list at top')
# step two - choose how user want to sort results and what want to see
self.printing_long_questions()
nr_choosen_method=int(input())
print(title_books_this_choosen_category)
print('--------')
lista_books_filter_by_user_mean=self.user_choose_filter_method(nr_choosen_method, title_books_this_choosen_category)
if len(lista_books_filter_by_user_mean)==1:
print('\t It is exactly one book in this category')
print('--------')
# any sens to choose book , if exist only one
# for example for catgeory crime - [{'Book_Stars': 1, 'Book_Title': 'The Long Shadow of Small Ghosts: Murder and Memory in an American City'}]
user_choose_single_title = lista_books_filter_by_user_mean[0]['Book_Title']
tab_inf = self.return_details(user_choose_single_title, scrap3)
#print(tab_inf)
self.printing_final_result(tab_inf)
else:
print('\t Also this is list for You')
print(lista_books_filter_by_user_mean)
# choose single title book from User input- purpose--> show for this book all details
user_choose_single_title = input('\t\n Now please, copy and paste the entire Title of the book here:...(EXAMPLE:... Feathers: Displays of Brilliant Plumage) ')
# use the scrap nr 3 with details
tab_inf=self.return_details(user_choose_single_title,scrap3)
print(tab_inf)
self.printing_final_result(tab_inf)
if answer1_user_if_play in ('n','n ','n ', 'NO', 'nie', 'N'):
print('Nice day any way.')
if __name__ == "__main__":
game = Game()
game.logic()
| 43.74269 | 281 | 0.657754 | 2,070 | 14,960 | 4.481159 | 0.204348 | 0.018974 | 0.015416 | 0.018974 | 0.407288 | 0.313389 | 0.268327 | 0.212807 | 0.205908 | 0.181544 | 0 | 0.014616 | 0.259091 | 14,960 | 341 | 282 | 43.870968 | 0.822266 | 0.215642 | 0 | 0.183432 | 0 | 0.023669 | 0.161118 | 0.015537 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106509 | false | 0.005917 | 0.011834 | 0 | 0.207101 | 0.201183 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d754abc0dd2b42226712db869734718758f2ec | 1,592 | py | Python | books_and_chapters/urls.py | assulthoni/Django-Bookworm | d816c099b1eaceff05958ed3bf9e7dd611e9b5fd | [
"MIT"
] | null | null | null | books_and_chapters/urls.py | assulthoni/Django-Bookworm | d816c099b1eaceff05958ed3bf9e7dd611e9b5fd | [
"MIT"
] | null | null | null | books_and_chapters/urls.py | assulthoni/Django-Bookworm | d816c099b1eaceff05958ed3bf9e7dd611e9b5fd | [
"MIT"
] | null | null | null | """django_bookworm.books_and_chapters URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
from django.views.generic.base import RedirectView
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('books/', login_required(views.homepage), name='books'), # for adding a new book
path('books/search/', views.search_book, name='search_book'),
path('books/<slug:slug>/', login_required(views.get_book_details), name='book_detail'),
path('books/<int:pk>/delete/', login_required(views.delete_book), name='delete_single_book'),
path('books/<int:pk>/edit/', login_required(views.edit_book_details), name='book_details_edit'),
path('chapters/add/', views.add_chapter, name='add_chapter'),
path('chapters/<int:pk>/delete/', views.delete_chapter, name='delete_chapter'),
path('chapters/<int:pk>/edit/', views.edit_chapter, name='edit_chapter'),
path('', RedirectView.as_view(url='/accounts/login/', permanent=False))
]
| 49.75 | 100 | 0.723618 | 230 | 1,592 | 4.878261 | 0.330435 | 0.057932 | 0.064171 | 0.02139 | 0.147059 | 0.104278 | 0.066845 | 0 | 0 | 0 | 0 | 0.005776 | 0.130025 | 1,592 | 31 | 101 | 51.354839 | 0.804332 | 0.422739 | 0 | 0 | 0 | 0 | 0.279605 | 0.076754 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d8b870eb29ec01acb07c7f782c1ca8e13356c3 | 573 | py | Python | manage.py | mwaiyusuf/news_highlight | dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb | [
"MIT"
] | null | null | null | manage.py | mwaiyusuf/news_highlight | dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb | [
"MIT"
] | null | null | null | manage.py | mwaiyusuf/news_highlight | dfae21f67c3bbe20521a5c3c96a0a6a759fbd8fb | [
"MIT"
] | null | null | null | from app import create_app
from flask_script import Manager,Server #initialise our extensions and server class that aid in launching of our server
# Creating app instance
app = create_app('development')
manager = Manager(app)
manager.add_command('server',Server) #launch app server
@manager.command
def test():
"""Run the ubit tests"""
import unittest
tests = unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if __name__ == '__main__': #checks if the script is run directly
manager.run()
| 30.157895 | 119 | 0.743455 | 77 | 573 | 5.376623 | 0.545455 | 0.094203 | 0.101449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002079 | 0.160558 | 573 | 18 | 120 | 31.833333 | 0.858628 | 0.300175 | 0 | 0 | 0 | 0 | 0.076531 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77d998bd3b17f3da023e7ce99e510488aaf3a564 | 9,384 | py | Python | src/robotrunner.py | bbokser/hopper-mpc-simple | 51fd3c0cb515d32b2a9cce93a0db45dddf95174c | [
"MIT"
] | null | null | null | src/robotrunner.py | bbokser/hopper-mpc-simple | 51fd3c0cb515d32b2a9cce93a0db45dddf95174c | [
"MIT"
] | null | null | null | src/robotrunner.py | bbokser/hopper-mpc-simple | 51fd3c0cb515d32b2a9cce93a0db45dddf95174c | [
"MIT"
] | null | null | null | """
Copyright (C) 2020-2022 Benjamin Bokser
"""
import plots
import mpc_cvx
# import time
# import sys
import numpy as np
import copy
from scipy.linalg import expm
import itertools
np.set_printoptions(suppress=True, linewidth=np.nan)
def projection(p0, v):
# find point p projected onto ground plane from point p0 by vector v
z = 0
t = (z - p0[2]) / v[2]
x = p0[0] + t * v[0]
y = p0[1] + t * v[1]
p = np.array([x, y, z])
return p
class Runner:
def __init__(self, dims=2, ctrl='mpc', dt=1e-3):
self.dims = dims
self.ctrl = ctrl
self.dt = dt
self.total_run = 5000
self.tol = 1e-3 # desired mpc tolerance
self.m = 7.5 # mass of the robot, kg
self.N = 10 # mpc horizon length
self.g = 9.81 # gravitational acceleration, m/s2
self.t_p = 1 # gait period, seconds
self.phi_switch = 0.5 # switching phase, must be between 0 and 1. Percentage of gait spent in contact.
# for now, mpc sampling time is equal to gait period
self.mpc_dt = self.t_p * self.phi_switch # mpc sampling time
self.N_time = self.N*self.mpc_dt # mpc horizon time
if dims == 2:
self.n_x = 5 # number of states
self.n_u = 2 # number of controls
self.A = np.array([[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, -1],
[0, 0, 0, 0, 0]])
self.B = np.array([[0, 0],
[0, 0],
[1 / self.m, 0],
[0, 1 / self.m],
[0, 0]])
self.X_0 = np.zeros(self.n_x)
self.X_0[1] = 0.7
self.X_0[-1] = self.g # initial conditions
self.X_f = np.array([2, 0.5, 0, 0, self.g])
elif dims == 3:
self.n_x = 7 # number of states
self.n_u = 3 # number of controls
self.A = np.array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1],
[0, 0, 0, 0, 0, 0, 0]])
self.B = np.array([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1 / self.m, 0, 0],
[0, 1 / self.m, 0],
[0, 0, 1 / self.m],
[0, 0, 0]])
self.X_0 = np.zeros(self.n_x)
self.X_0[2] = 0.7
self.X_0[-1] = self.g # initial conditions
self.X_f = np.hstack([2, 2, 0.5, 0, 0, 0, self.g]).T # desired final state
mu = 0.3 # coeff of friction
self.mpc = mpc_cvx.Mpc(t=self.mpc_dt, A=self.A, B=self.B, N=self.N, m=self.m, g=self.g, mu=mu)
self.mpc_factor = self.mpc_dt * 2 / self.dt # repeat mpc every x seconds
def run(self):
total = self.total_run + 1 # number of timesteps to plot
t = 0 # time
t0 = t # starting time
mpc_factor = self.mpc_factor # repeat mpc every x seconds
mpc_counter = copy.copy(mpc_factor)
X_traj = np.zeros((total, self.n_x))
X_traj[0, :] = self.X_0 # initial conditions
f_hist = np.zeros((total, self.n_u))
s_hist = np.zeros(total)
U_pred = np.zeros((self.N, self.n_u))
X_pred = np.zeros((self.N, self.n_x))
pf_ref = np.zeros(self.n_u)
j = int(self.mpc_factor)
X_pred_hist = np.zeros((self.N+1, self.n_u))
f_pred_hist = np.zeros((total, self.n_u))
p_pred_hist = np.zeros((total, self.n_u))
for k in range(0, self.total_run):
t = t + self.dt
s = self.gait_scheduler(t, t0)
if self.ctrl == 'mpc':
if mpc_counter == mpc_factor: # check if it's time to restart the mpc
mpc_counter = 0 # restart the mpc counter
X_ref = self.path_plan(X_in=X_traj[k, :])
X_refN = X_ref[::int(self.mpc_dt / self.dt)]
U_pred, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN)
p_pred = (X_pred[2, 0:3]+(X_pred[2, 0:3]+X_pred[3, 0:3])/2)/2 # next pred body pos over next ftstep
f_pred = U_pred[2, :] # next predicted foot force vector
p_pred_hist = np.vstack((p_pred_hist, p_pred))
f_pred_hist = np.vstack((f_pred_hist, 0.5*f_pred/np.sqrt(np.sum(f_pred**2))))
pf_ref = np.vstack((pf_ref, projection(p_pred, f_pred)))
X_pred_hist = np.dstack((X_pred_hist, X_pred[:, 0:self.n_u]))
mpc_counter += 1
f_hist[k, :] = U_pred[0, :]*s # take first timestep
else: # Open loop traj opt, this will fail if total != mpc_factor
if int(total/self.N) != mpc_factor:
print("ERROR: Incorrect settings", total/self.N, mpc_factor)
if k == 0:
X_ref = self.path_plan(X_in=X_traj[k, :])
X_refN = X_ref[::int(self.mpc_factor)] # self.traj_N(X_ref)
force_f, X_pred = self.mpc.mpcontrol(X_in=X_traj[k, :], X_ref=X_refN)
for i in range(0, self.N):
f_hist[int(i*j):int(i*j+j), :] = list(itertools.repeat(force_f[i, :], j))
s_hist[k] = s
X_traj[k+1, :] = self.rk4(xk=X_traj[k, :], uk=f_hist[k, :])
# X_traj[k + 1, :] = self.dynamics_dt(X=X_traj[k, :], U=f_hist[k, :], t=self.dt)
# print(X_traj[-1, :])
# print(f_hist[4500, :])
plots.fplot(total, p_hist=X_traj[:, 0:self.n_u], f_hist=f_hist, s_hist=s_hist, dims=self.dims)
plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u], dims=self.dims)
plots.posfplot(p_ref=self.X_f[0:self.n_u], p_hist=X_traj[:, 0:self.n_u],
p_pred_hist=p_pred_hist, f_pred_hist=f_pred_hist, pf_hist=pf_ref, dims=self.dims)
# plots.posplot(p_ref=self.X_f[0:self.n_u], p_hist=X_pred_hist[:, 0:self.n_u, 1], dims=self.dims)
# plots.posplot_t(p_ref=self.X_ref[0:self.n_u], p_hist=X_traj[:, 0:2], total=total)
return None
def dynamics_ct(self, X, U):
# CT dynamics X -> dX
A = self.A
B = self.B
X_next = A @ X + B @ U
return X_next
def dynamics_dt(self, X, U, t):
n_x = self.n_x # number of states
n_u = self.n_u # number of controls
A = self.A
B = self.B
AB = np.vstack((np.hstack((A, B)), np.zeros((n_u, n_x + n_u))))
M = expm(AB * t)
Ad = M[0:n_x, 0:n_x]
Bd = M[0:n_x, n_x:n_x + n_u]
X_next = Ad @ X + Bd @ U
return X_next
def rk4(self, xk, uk):
# RK4 integrator solves for new X
dynamics = self.dynamics_ct
h = self.dt
f1 = dynamics(xk, uk)
f2 = dynamics(xk + 0.5 * h * f1, uk)
f3 = dynamics(xk + 0.5 * h * f2, uk)
f4 = dynamics(xk + h * f3, uk)
return xk + (h / 6.0) * (f1 + 2 * f2 + 2 * f3 + f4)
def gait_scheduler(self, t, t0):
phi = np.mod((t - t0) / self.t_p, 1)
if phi > self.phi_switch:
s = 0 # scheduled swing
else:
s = 1 # scheduled stance
return s
def path_plan(self, X_in):
# Path planner--generate reference trajectory
dt = self.dt
size_mpc = int(self.mpc_factor*self.N) # length of MPC horizon in s TODO: Perhaps N should vary wrt time?
t_ref = 0 # timesteps given to get to target, either mpc length or based on distance (whichever is smaller)
X_ref = None
if self.dims == 2:
t_ref = int(np.minimum(size_mpc, abs(self.X_f[0] - X_in[0])*1000)) # ignore z distance due to bouncing
X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions
# interpolate velocities
X_ref[:-1, 2] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 3] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
elif self.dims == 3:
t_ref = int(np.minimum(size_mpc, np.linalg.norm(self.X_f[0:2] - X_in[0:2]) * 1000))
X_ref = np.linspace(start=X_in, stop=self.X_f, num=t_ref) # interpolate positions
# interpolate velocities
X_ref[:-1, 3] = [(X_ref[i + 1, 0] - X_ref[i, 0]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 4] = [(X_ref[i + 1, 1] - X_ref[i, 1]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
X_ref[:-1, 5] = [(X_ref[i + 1, 2] - X_ref[i, 2]) / dt for i in range(0, np.shape(X_ref)[0] - 1)]
if (size_mpc - t_ref) == 0:
pass
elif t_ref == 0:
X_ref = np.array(list(itertools.repeat(self.X_f, int(size_mpc))))
else:
X_ref = np.vstack((X_ref, list(itertools.repeat(self.X_f, int(size_mpc - t_ref)))))
return X_ref
| 43.646512 | 120 | 0.494991 | 1,551 | 9,384 | 2.829142 | 0.154094 | 0.036463 | 0.042388 | 0.043756 | 0.38742 | 0.317001 | 0.297402 | 0.26618 | 0.249772 | 0.214676 | 0 | 0.053106 | 0.361893 | 9,384 | 214 | 121 | 43.850467 | 0.679693 | 0.17679 | 0 | 0.121387 | 0 | 0 | 0.004044 | 0 | 0 | 0 | 0 | 0.004673 | 0 | 1 | 0.046243 | false | 0.00578 | 0.034682 | 0 | 0.127168 | 0.011561 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ddff0fe98781dd316cac4d51b90e21e33f27de | 6,122 | py | Python | toy_example.py | chuchienshu/Pytorch-color-work | d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3 | [
"MIT"
] | null | null | null | toy_example.py | chuchienshu/Pytorch-color-work | d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3 | [
"MIT"
] | null | null | null | toy_example.py | chuchienshu/Pytorch-color-work | d4a6d052bd39e815f2303bc1583e2bc9bdb6cce3 | [
"MIT"
] | null | null | null | from sklearn.neighbors import NearestNeighbors
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def check_value(inds, val):
''' Check to see if an array is a single element equaling a particular value
for pre-processing inputs in a function '''
if(np.array(inds).size==1):
if(inds==val):
return True
return False
def na(): # shorthand for new axis
return np.newaxis
def flatten_nd_array(pts_nd,axis=1):
''' Flatten an nd array into a 2d array with a certain axis
INPUTS
pts_nd N0xN1x...xNd array
axis integer
OUTPUTS
pts_flt prod(N \ N_axis) x N_axis array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
pts_flt = pts_nd.transpose((axorder))
pts_flt = pts_flt.reshape(NPTS,SHP[axis])
return pts_flt
def unflatten_2d_array(pts_flt,pts_nd,axis=1,squeeze=False):
''' Unflatten a 2d array with a certain axis
INPUTS
pts_flt prod(N \ N_axis) x M array
pts_nd N0xN1x...xNd array
axis integer
squeeze bool if true, M=1, squeeze it out
OUTPUTS
pts_out N0xN1x...xNd array '''
NDIM = pts_nd.ndim
SHP = np.array(pts_nd.shape)
nax = np.setdiff1d(np.arange(0,NDIM),np.array((axis))) # non axis indices
NPTS = np.prod(SHP[nax])
if(squeeze):
axorder = nax
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
else:
axorder = np.concatenate((nax,np.array(axis).flatten()),axis=0)
axorder_rev = np.argsort(axorder)
M = pts_flt.shape[1]
NEW_SHP = SHP[nax].tolist()
NEW_SHP.append(M)
pts_out = pts_flt.reshape(NEW_SHP)
pts_out = pts_out.transpose(axorder_rev)
return pts_out
class NNEncode():
''' Encode points using NearestNeighbors search and Gaussian kernel '''
def __init__(self,NN,sigma,km_filepath='',cc=-1):
if(check_value(cc,-1)):
self.cc = np.load(km_filepath)
else:
self.cc = cc
self.K = self.cc.shape[0]
self.NN = int(NN)
self.sigma = sigma
self.nbrs = NearestNeighbors(n_neighbors=NN, algorithm='ball_tree').fit(self.cc)
self.alreadyUsed = False
def encode_points_mtx_nd(self,pts_nd,axis=1,returnSparse=False,sameBlock=True):
pts_flt = flatten_nd_array(pts_nd,axis=axis)
#pts_flt ---> [N*H*W, 2]
P = pts_flt.shape[0]
#P ---> N*H*W
if(sameBlock and self.alreadyUsed):
self.pts_enc_flt[...] = 0 # already pre-allocated
print('alreadyUsed')
print(self.p_inds)
else:
print('notUsed')
# print(self.p_inds)
self.alreadyUsed = True
self.pts_enc_flt = np.zeros((P,self.K))
#self.pts_enc_flt.shape ---> [N*H*W, 313]
self.p_inds = np.arange(0,P,dtype='int')[:,na()]
#self.p_inds.shape ---> [N*H*W, 1]
(dists,inds) = self.nbrs.kneighbors(pts_flt)
#inds.shape ---> [N*H*W, NN]
wts = np.exp(-dists**2/(2*self.sigma**2))
wts = wts/np.sum(wts,axis=1)[:,na()]
#wts.shape ---> [N*H*W, NN]
#将输入的 feature map(ab 值)与调色板 bin 中最近的 NN(此处取 10) 个距离值赋值到 pts_enc_flt 中,然后展开成 4d 形式返回。
self.pts_enc_flt[self.p_inds,inds] = wts
pts_enc_nd = unflatten_2d_array(self.pts_enc_flt,pts_nd,axis=axis)
#pts_enc_nd.shape -----> [N, 313, H, W]
return pts_enc_nd
def decode_points_mtx_nd(self,pts_enc_nd,axis=1):
pts_enc_flt = flatten_nd_array(pts_enc_nd,axis=axis)
pts_dec_flt = np.dot(pts_enc_flt,self.cc)
pts_dec_nd = unflatten_2d_array(pts_dec_flt,pts_enc_nd,axis=axis)
return pts_dec_nd
def decode_1hot_mtx_nd(self,pts_enc_nd,axis=1,returnEncode=False):
pts_1hot_nd = nd_argmax_1hot(pts_enc_nd,axis=axis)
pts_dec_nd = self.decode_points_mtx_nd(pts_1hot_nd,axis=axis)
if(returnEncode):
return (pts_dec_nd,pts_1hot_nd)
else:
return pts_dec_nd
# self.cretion( output, torch.max(target, 1)[1] )
nnenc = NNEncode(10,5,km_filepath='/home/chuchienshu/Documents/propagation_classification/models/custom_layers/pts_in_hull.npy')
bottom = np.random.randint(0,10,(2,3,3,3)).astype('float32')
# print(bottom)
bt = Variable(torch.from_numpy(bottom).cuda())
fac = np.array([[1,2],[3,4],[5,6]])
fac_a = fac[:,0][np.newaxis,:,np.newaxis,np.newaxis]
fac_b = fac[:,1][np.newaxis,:,np.newaxis,np.newaxis]
pred_ab = np.concatenate((np.sum(bottom * fac_a, axis=1, keepdims=True), np.sum(bottom * fac_b, axis=1, keepdims=True)), axis=1)
# print(fac_a,fac_a.shape)
# print(fac_b,fac_b.shape)
# print(bottom * fac_a, ' jfdis')
# print(bottom * fac_b, ' fac_b')
# print(np.sum(bottom * fac_a, axis=1, keepdims=True), ' 44')
# print(np.sum(bottom * fac_b, axis=1, keepdims=True), ' 66')
print(pred_ab, pred_ab.shape)
for i, im in enumerate(pred_ab):
print(im)
print(i)
exit()
# bt = flatten_nd_array(bt.data.numpy())
##bt = bt.permute(0,2,3,1).contiguous().view(50, -1)
#/////////////////////////////////////////////////////////
bottom = np.random.randint(0,10,(8,2,5,5)).astype('float32')
print(bottom)
nnenc.encode_points_mtx_nd(bottom,axis=1)
for _ in range(6):
print('fjkfd')
print(nnenc.cc
)
print('############')
exit()
#/////////////////////////////////////////////////////////
import matplotlib.pyplot as plt
n = 1024
# x = np.random.normal(0, 1, n) # 平均值为0,方差为1,生成1024个数
# y = np.random.normal(0, 1, n)
x = X[:,0]
y = X[:,1]
t = np.arctan2(x, y) # for color value,对应cmap
plt.scatter(x, y, s=65, c=t, alpha=0.5) # s为size,按每个点的坐标绘制,alpha为透明度
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xticks([])
plt.yticks([])
plt.show() | 32.913978 | 128 | 0.611728 | 960 | 6,122 | 3.725 | 0.229167 | 0.026846 | 0.020134 | 0.018177 | 0.338647 | 0.302573 | 0.251678 | 0.214765 | 0.202461 | 0.14821 | 0 | 0.026526 | 0.22411 | 6,122 | 186 | 129 | 32.913978 | 0.726316 | 0.267723 | 0 | 0.243478 | 0 | 0 | 0.034727 | 0.020791 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069565 | false | 0 | 0.052174 | 0.008696 | 0.208696 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77e5180d6ab5330f71187b669f22ec6397e5429c | 514 | py | Python | custom/_legacy/pact/reports/admin_reports.py | dslowikowski/commcare-hq | ad8885cf8dab69dc85cb64f37aeaf06106124797 | [
"BSD-3-Clause"
] | 1 | 2015-02-10T23:26:39.000Z | 2015-02-10T23:26:39.000Z | custom/_legacy/pact/reports/admin_reports.py | SEL-Columbia/commcare-hq | 992ee34a679c37f063f86200e6df5a197d5e3ff6 | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | custom/_legacy/pact/reports/admin_reports.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport
class PactAdminReport(GenericTabularReport, CustomProjectReport):
fields = ['corehq.apps.reports.filters.dates.DatespanFilter']
name = "PACT Admin Reports"
slug = "pactadmin"
emailable = True
exportable = True
report_template_path = "pact/admin/pactadmin_reports.html"
@property
def report_context(self):
ret = {"foo": "bar"}
return ret
| 24.47619 | 65 | 0.725681 | 53 | 514 | 6.962264 | 0.660377 | 0.081301 | 0.138211 | 0.113821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184825 | 514 | 20 | 66 | 25.7 | 0.880668 | 0 | 0 | 0 | 0 | 0 | 0.223092 | 0.158513 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.846154 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77e99bc3b885eac60af8099d66d105d9eea0d121 | 4,807 | py | Python | tests/test_helper.py | INM-6/correlation-toolbox | 26b9e999069990a8b756d8a4d880bd152f95149f | [
"MIT"
] | 1 | 2018-10-12T22:54:16.000Z | 2018-10-12T22:54:16.000Z | tests/test_helper.py | INM-6/correlation-toolbox | 26b9e999069990a8b756d8a4d880bd152f95149f | [
"MIT"
] | 6 | 2017-04-03T07:44:17.000Z | 2018-06-08T08:37:47.000Z | tests/test_helper.py | INM-6/correlation-toolbox | 26b9e999069990a8b756d8a4d880bd152f95149f | [
"MIT"
] | 2 | 2017-04-05T04:42:12.000Z | 2018-07-17T11:43:24.000Z | # global imports
import unittest
import numpy as np
from future.builtins import range
# local imports
import correlation_toolbox.helper as cthlp
class TestHelper(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
self.rate = 30. # (Hz)
self.T = 3e4 # (ms)
self.N = 100
self.p = 0.6 # percentage of neurons active
self.Neff = int(self.p * self.N)
self.cc = 0.3
self.tbin = 1. # (ms)
def test_create_poisson_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.N)
self.assertEqual(self.N, len(np.unique(sp[:, 0]))) # N
self.assertTrue(self.T >= np.max(sp[:, 1])) # T
emp_rate = 1. * len(sp) / self.T * 1e3 / self.N
self.assertTrue(abs(self.rate - emp_rate) < 1e0) # rate
def test_sort_gdf_by_id(self):
# create N-5 poisson instead of N, creates empty arrays in sp_srt
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
self.assertEqual(self.N, len(sp_ids)) # N
self.assertTrue(self.T >= np.max([np.max(x)
for x in sp_srt if len(x) > 0])) # T
for i in range(self.N):
emp_rate = 1. * len(sp_srt[i]) / self.T * 1e3
assert(emp_rate >= 0.)
if emp_rate > 0.:
self.assertTrue(abs(self.rate - emp_rate) < 1e1) # rate
self.assertTrue(min(np.diff(sp_srt[i])) > 0.) # time ordering
def test_instantaneous_spike_count(self):
# create N-5 poisson instead of N, creates empty arrays in sp_srt
# to test binning for empty spiketrains
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
# test whether binning produces correct results
sp_srt = np.array([[1., 2., 5., 7.], [4., 6., 9.]])
# ground truth
bsp_true = np.array(
[[1, 1, 0, 0, 1, 0, 1, 0], [0, 0, 0, 1, 0, 1, 0, 1]])
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
self.assertTrue(len(bins) == len(bsp[0])) # number of bins
self.assertEqual(2, len(bsp)) # number of binned spike trains
self.assertEqual(np.sum(bsp_true - bsp), 0.) # histogram
def test_create_correlated_spiketrains_sip(self):
# create N-5 poisson instead of N, changes correlation
sp = cthlp.create_correlated_spiketrains_sip(
self.rate, self.T, self.Neff, self.cc)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
emp_rate = 1. * np.sum(bsp) / self.T * 1e3 / self.N
self.assertTrue(abs(self.p * self.rate - emp_rate) < 5e-1) # rate
self.assertEqual(self.N, len(bsp)) # N
self.assertTrue(self.T >= np.max(bins)) # T
emp_cc = np.corrcoef(cthlp.strip_binned_spiketrains(bsp))
emp_a_cc = []
for i in range(self.Neff):
for j in range(self.Neff):
if i != j:
emp_a_cc.append(emp_cc[i, j])
emp_mu_cc = 1. / (self.N * (self.N - 1.)) * np.sum(emp_a_cc)
# correlation coefficient
self.assertTrue(abs(self.p ** 2 * self.cc - emp_mu_cc) < 2e-2)
def test_centralize(self):
v1 = np.random.normal(-50, 2, int(self.T * 1e1))
v2 = np.random.normal(-30, 2, int(self.T * 1e1))
v_cen_time = cthlp.centralize([v1, v2], time=True)
for v in v_cen_time:
self.assertTrue(abs(np.mean(v)) < 1e-12)
v_cen_units = cthlp.centralize([v1, v2], units=True)
for v in v_cen_units.T:
self.assertTrue(abs(np.mean(v)) < 1e-12)
v_cen_timeunits = cthlp.centralize([v1, v2], time=True, units=True)
self.assertTrue(abs(np.mean(v_cen_timeunits)) < 1e-12)
def test_strip_sorted_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
self.assertEqual(self.N, len(sp_srt))
sp_srt = cthlp.strip_sorted_spiketrains(sp_srt)
self.assertEqual(self.Neff, len(sp_srt))
def test_strip_binned_spiketrains(self):
sp = cthlp.create_poisson_spiketrains(self.rate, self.T, self.Neff)
sp_ids, sp_srt = cthlp.sort_gdf_by_id(sp, 0, self.N - 1)
bins, bsp = cthlp.instantaneous_spike_count(sp_srt, self.tbin)
self.assertEqual(self.N, len(bsp))
bsp = cthlp.strip_binned_spiketrains(bsp)
self.assertEqual(self.Neff, len(bsp))
if __name__ == '__main__':
unittest.main()
| 44.100917 | 78 | 0.607239 | 740 | 4,807 | 3.767568 | 0.17973 | 0.034075 | 0.047704 | 0.060258 | 0.572095 | 0.465208 | 0.39957 | 0.357245 | 0.346844 | 0.322453 | 0 | 0.030687 | 0.261078 | 4,807 | 108 | 79 | 44.509259 | 0.754223 | 0.097774 | 0 | 0.197674 | 0 | 0 | 0.001856 | 0 | 0 | 0 | 0 | 0 | 0.255814 | 1 | 0.093023 | false | 0 | 0.046512 | 0 | 0.151163 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77eb25a04371bec64faee1671d09a1d02f6b7e59 | 3,783 | py | Python | Tareas/Tarea_4.py | JESUS-2120/Python_2 | b1854b4118215684eb1adb5acdbc3313c2a15f20 | [
"Apache-2.0"
] | null | null | null | Tareas/Tarea_4.py | JESUS-2120/Python_2 | b1854b4118215684eb1adb5acdbc3313c2a15f20 | [
"Apache-2.0"
] | null | null | null | Tareas/Tarea_4.py | JESUS-2120/Python_2 | b1854b4118215684eb1adb5acdbc3313c2a15f20 | [
"Apache-2.0"
] | null | null | null | '''
NAME
Tarea_4.py
VERSION
1.0
AUTHOR
Victor Jesus Enriquez Castro <victorec@lcg.unam.mx>
DESCRIPTION
Empleando Entrez.einfo y ENtrez.read el programa imprime la descripcion
de los campos FieldList y LinkList en la base de datos protein, de la misma
manera dadas palabras claves de busqueda se obtienen los IDs de los articulos
que coinciden con los criterios de busqueda en la base de datos pubmed
CATEGORY
Data Base
INPUT
Este programa unicamente recibe como inputs las palabras clave para la
busqueda de los articulos en la base de datos pubmed
EXAMPLES
Input:
Ingrese el termino con el que desea realizar su busqueda: ludosky ma
Ingrese el campo del termino ingresado: AUTH
Ingrese el termino con el que desea realizar su busqueda: electrocyte
Ingrese el campo del termino ingresado: Title
Ingrese el termino con el que desea realizar su busqueda: Baumannii
Ingrese el campo del termino ingresado: Title
Output:
ECNO -> Description:
EC number for enzyme or CAS registry number
protein_protein_small_genome -> Description:
All proteins from this genome
El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt
GITHUB
https://github.com/JESUS-2120/Python_2/blob/main/Tareas/Tarea_4.py
'''
#Importamos las librerias necesarias
from Bio import Entrez
from pprint import pprint
#Ingresamos un correo electronico
Entrez.email = "victorec@lcg.unam.mx"
#TAREA 1
#Indicamos la base de datos de interes
handle = Entrez.einfo(db = "protein")
record = Entrez.read(handle)
#Obtenemos la descripcion para cada uno de los campos solicitados
for i in range(0,len(record["DbInfo"]["FieldList"])):
if record["DbInfo"]["FieldList"][i]["Name"] == "ECNO":
print(record["DbInfo"]["FieldList"][i]["Name"],"->","Description:")
print(record["DbInfo"]["FieldList"][i]["Description"])
print("\n")
for i in range(0,len(record["DbInfo"]["LinkList"])):
if record["DbInfo"]["LinkList"][i]["Name"] == "protein_protein_small_genome":
print(record["DbInfo"]["LinkList"][i]["Name"],"->","Description:")
print(record["DbInfo"]["LinkList"][i]["Description"])
print("\n")
#TAREA 2
print("Bienvenido al buscador automatico\nSi desea usar el formato ya existente ingrese (1) si desea ingresar su propio string ingrese (2): ")
opc = int(input())
if (opc < 1 or opc > 2):
opc = int(input("Ingrese un numero valido: "))
if opc == 1:
print("Considerando como ejemplo\ntermino = 'ludosky ma[AUTH] AND (electrocyte[Title] OR Baumannii[Title])\ningrese los campos con los que desea realizar su busqueda")
#Creamos la lista palabras que utilizaremos para guardar las palabras de busqueda
palabras = ["","","","","",""]
#Pedimos al usuario las palabras de busqueda
for i in range(3):
palabras[i] = str(input("Ingrese el termino con el que desea realizar su busqueda: "))
palabras[i + 3] = str(input("Ingrese el campo del termino ingresado: "))
#Concatenamos todo en un string que nos permita concretar la busqueda
termino = palabras[0] + "[" + palabras[3] + "]" + " AND (" + palabras[1] + "[" + palabras[4] + "] OR " + palabras[2] + "[" + palabras[5] + "])"
if opc == 2:
termino = input("Ingrese su string de busqueda: ")
#Buscamos en la base de datos
handle = Entrez.esearch(db="pubmed", term= termino)
record = Entrez.read(handle)
#Creamos el archivo IDs
IDS = open("../files/IDs.txt","w")
IDS.write("Los IDs de su busqueda son: \n")
#Escribimos los IDs en el archivo que creamos
for rec in record["IdList"]:
IDS.write(">" + rec + "\n")
print("El archivo con los IDs de su busqueda se encuentra en: ../files/ bajo el nombre IDs.txt")
| 32.333333 | 168 | 0.68755 | 542 | 3,783 | 4.782288 | 0.333948 | 0.027778 | 0.015432 | 0.025077 | 0.31983 | 0.244599 | 0.177469 | 0.148148 | 0.127315 | 0.127315 | 0 | 0.008873 | 0.195612 | 3,783 | 116 | 169 | 32.612069 | 0.842918 | 0.539783 | 0 | 0.114286 | 0 | 0.085714 | 0.455125 | 0.028678 | 0 | 0 | 0 | 0.008621 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ebdf9ca9d4616e429b8e92977f8018b29b5675 | 3,464 | py | Python | HDPython/v_enum.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | null | null | null | HDPython/v_enum.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | null | null | null | HDPython/v_enum.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | 1 | 2021-10-20T20:08:16.000Z | 2021-10-20T20:08:16.000Z | import os,sys,inspect
from HDPython.base import *
from HDPython.v_symbol import v_symbol
from HDPython.primitive_type_converter import get_primitive_hdl_converter
from HDPython.lib_enums import varSig, InOut_t
class v_enum(HDPython_base):
def __init__(self,EnumIn,EnumVal=None,name=None, Inout = InOut_t.Internal_t,includes="",value=None,varSigConst=varSig.variable_t):
super().__init__()
self.__hdl_converter__ =get_primitive_hdl_converter("v_enum" )()
if type(EnumIn).__name__ == "EnumMeta":
Enumtype = EnumIn
elif type(type(EnumIn)).__name__ == "EnumMeta":
Enumtype = type(EnumIn)
EnumVal = EnumIn
if EnumVal == None:
EnumVal = Enumtype(0)
if name == None:
name = Enumtype.__name__
self.symbol = v_symbol(name,EnumVal.value,Inout=Inout,includes=includes,value=EnumVal.value,varSigConst=varSigConst )
self._type = Enumtype
self.name = name
self.__hdl_name__ = None
self._Inout = Inout
self._varSigConst = varSigConst
def __lshift__(self, rhs):
if isinstance(rhs,type(self)):
self.symbol << rhs.symbol
return
if isinstance(rhs,self._type):
self.symbol << value(rhs)
return
raise Exception("[ENUM] Unable tp connect different types", self, rhs)
def _sim_get_new_storage(self):
self.symbol._sim_get_new_storage()
def set_simulation_param(self,module, name,writer):
self.symbol.set_simulation_param(module, name, writer)
def __repr__(self):
ret = str(self._type(value(self.symbol)).name) +": "+ str(value(self.symbol))
return ret
def setInout(self,Inout):
self.symbol.setInout(Inout)
def set_varSigConst(self, varSigConst):
self._varSigConst=varSigConst
self.symbol.set_varSigConst(varSigConst)
def isVarSigType(self, varSigType):
return self.symbol.isVarSigType( varSigType)
def _sim_get_value(self):
return value(self.symbol)
def __eq__(self,rhs):
return value(self) == value(rhs)
def set_vhdl_name(self,name, Overwrite = False):
if self.__hdl_name__ and self.__hdl_name__ != name and Overwrite == False:
raise Exception("double Conversion to vhdl")
else:
self.__hdl_name__ = name
def isInOutType(self, Inout):
if Inout==None or self._Inout == Inout:
return True
elif self._Inout== InOut_t.Master_t:
mem = self.getMember(Inout)
return len(mem) > 0
elif self._Inout == InOut_t.Slave_t:
if Inout == InOut_t.Master_t:
Inout = InOut_t.Slave_t
elif Inout == InOut_t.Slave_t:
Inout = InOut_t.Master_t
elif Inout == InOut_t.input_t:
Inout = InOut_t.output_t
elif Inout == InOut_t.output_t:
Inout = InOut_t.input_t
mem = self.getMember(Inout)
return len(mem) > 0
def __str__(self):
if self.__hdl_name__:
return self.__hdl_name__
return self._type(value(self.symbol)).name
def _issubclass_(self,test):
if super()._issubclass_(test):
return True
return "v_enum" == test
| 28.162602 | 134 | 0.602483 | 408 | 3,464 | 4.762255 | 0.208333 | 0.072054 | 0.062275 | 0.024704 | 0.215646 | 0.063819 | 0.036027 | 0.036027 | 0.036027 | 0 | 0 | 0.001244 | 0.303984 | 3,464 | 122 | 135 | 28.393443 | 0.804645 | 0 | 0 | 0.125 | 0 | 0 | 0.027425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175 | false | 0 | 0.0625 | 0.0375 | 0.4125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77ecf310ae779764d67ed3eb67a498968e7015a3 | 2,146 | py | Python | setup.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | 12 | 2015-11-03T20:41:29.000Z | 2019-02-15T17:13:27.000Z | setup.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | 35 | 2015-08-23T17:10:00.000Z | 2017-05-10T12:08:57.000Z | setup.py | Frojd/Fabrik | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pip
from setuptools import setup, find_packages
import fabrik
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
package_exclude = ("tests*", "examples*")
packages = find_packages(exclude=package_exclude)
# Convert markdown to rst
try:
from pypandoc import convert
long_description = convert("README.md", "rst")
except:
long_description = ""
setup(
name="fabrik",
version=fabrik.__version__,
description="A simple to use deployment toolkit built on top of Fabric",
long_description=long_description,
author="Fröjd",
author_email="martin.sandstrom@frojd.se",
url="https://github.com/frojd/fabrik",
packages=packages,
include_package_data=True,
install_requires=[
'Fabric==1.12.0',
'Unipath==1.1',
'PyCrypto==2.6.1',
'jinja2==2.8',
'click>=5.0',
'GitPython==1.0.1',
],
tests_require=[
'Fabric==1.12.0',
'Unipath==1.1',
'PyCrypto==2.6.1',
'jinja2==2.8',
'click>=5.0',
'GitPython==1.0.1',
],
entry_points={
"console_scripts": [
"fabrik = fabrik.scripts.fabrik:main",
"fabrik_start = fabrik.cli.scripts.init:main",
"fabrik_cleanup = fabrik.cli.scripts.cleanup:main",
]
},
license="MIT",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Software Distribution",
"Topic :: System :: Systems Administration",
],
)
| 26.825 | 76 | 0.596925 | 234 | 2,146 | 5.380342 | 0.512821 | 0.047657 | 0.059571 | 0.015886 | 0.090548 | 0.090548 | 0.090548 | 0.090548 | 0.090548 | 0.090548 | 0 | 0.025031 | 0.255359 | 2,146 | 79 | 77 | 27.164557 | 0.762829 | 0.030755 | 0 | 0.223881 | 0 | 0 | 0.475686 | 0.052961 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.089552 | 0 | 0.089552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77efdc6a298f42286268601e0029f556def6374b | 12,607 | py | Python | tool.py | Illidanz/MonoPriTranslation | 2cff8fe3133d76e1311273ef3b54dde428151390 | [
"MIT"
] | 3 | 2021-10-31T04:20:16.000Z | 2022-02-16T04:12:57.000Z | tool.py | Illidanz/MonoPriTranslation | 2cff8fe3133d76e1311273ef3b54dde428151390 | [
"MIT"
] | 1 | 2021-11-01T10:27:59.000Z | 2021-11-01T10:27:59.000Z | tool.py | Illidanz/MonoPriTranslation | 2cff8fe3133d76e1311273ef3b54dde428151390 | [
"MIT"
] | null | null | null | import codecs
import csv
import filecmp
import os
import time
import click
import pyimgur
import requests
from zipfile import ZipFile, ZIP_DEFLATED
from hacktools import common, wii
version = "1.5.5"
isofile = "data/disc.iso"
infolder = "data/extract/"
outfolder = "data/repack/"
replacefolder = "data/replace/"
fontin = "data/font_input.txt"
fontout = "data/font_output.txt"
fontimgout = "data/extract_FNT/font_jp.png"
fontimgin = "data/work_FNT/font_jp.png"
fontfile = "data/extract/DATA/files/resfont/font_jp.brfnt"
dolin = "data/extract/DATA/sys/main.dol"
dolout = "data/repack/DATA/sys/main.dol"
patchin = "data/extract/DATA/files/"
patchout = "data/repack/DATA/files/"
patchfolder = "data/patch/monopri/"
xmlfile = "data/patch/riivolution/monopri.xml"
@common.cli.command()
@click.option("--iso", is_flag=True, default=False)
@click.option("--msbe", is_flag=True, default=False)
@click.option("--movie", is_flag=True, default=False)
@click.option("--tpl", is_flag=True, default=False)
@click.option("--fnt", is_flag=True, default=False)
@click.option("--speaker", is_flag=True, default=False)
@click.option("--merge", is_flag=True, default=False)
def extract(iso, msbe, movie, tpl, fnt, speaker, merge):
all = not iso and not msbe and not movie and not fnt and not tpl
if all or iso:
wii.extractIso(isofile, infolder, outfolder)
if all or msbe:
import extract_msbe
extract_msbe.run(speaker, merge)
if all or movie:
import extract_movie
extract_movie.run()
if all or fnt:
wii.extractFontData(fontfile, fontout)
common.makeFolder("data/extract_FNT/")
wii.extractBRFNT(fontfile, fontimgout)
if all or tpl:
wii.extractARC("data/extract/DATA/files/3d/map/", "data/extract_3D/")
wii.extractARC("data/extract/DATA/files/effect/", "data/extract_EFF/")
wii.extractBREFT("data/extract_EFF", "data/extract_BREFT", "data/out_EFF")
wii.extractARC("data/extract/DATA/files/lytdemo/exp_data/", "data/extract_TPL/")
common.copyFolder("data/extract/DATA/files/textures/", "data/extract_TPL/textures/")
wii.extractTPL("data/extract_TPL/", "data/out_TPL/")
@common.cli.command()
@click.option("--no-patch", is_flag=True, default=False)
@click.option("--msbe", is_flag=True, default=False)
@click.option("--onlyquest", is_flag=True, default=False)
@click.option("--movie", is_flag=True, default=False)
@click.option("--tpl", is_flag=True, default=False)
@click.option("--fnt", is_flag=True, default=False)
def repack(no_patch, msbe, onlyquest, movie, tpl, fnt):
all = not msbe and not movie and not tpl and not fnt
if all or fnt:
common.logMessage("Repacking FNT from", "data/work_FNT", "...")
fontfilein = fontfile
if os.path.isfile(fontfile.replace("/extract/", "/replace/")):
fontfilein = fontfilein.replace("/extract/", "/replace/")
fontfileout = fontfile.replace("/extract/", "/repack/")
wii.repackFontData(fontfilein, fontfileout, fontin)
wii.repackBRFNT(fontfileout, fontimgin)
common.logMessage("Done!")
if all or fnt or msbe:
import repack_msbe
repack_msbe.run(onlyquest)
if all or fnt or movie:
import repack_movie
repack_movie.run()
if all or tpl:
import repack_tpl
repack_tpl.run()
if os.path.isdir(replacefolder):
common.mergeFolder(replacefolder, outfolder)
# Patch the main.dol file
common.copyFile(dolin, dolout)
with common.Stream(dolout, "rb+", False) as f:
# Set the movie subtitles X position to 0 since we're doing some manual centering
# Change "fsubs f28,f7,f8 to fsubs f28,f8,f8"
f.seek(0x8CF4) # 0x8000cfb4
f.writeUInt(0xef884028)
if not no_patch:
common.makeFolders(patchfolder)
common.makeFolder(patchfolder.replace("monopri/", "riivolution/"))
common.logMessage("Creating patch folder in", patchfolder, "...")
files = common.getFiles(patchin)
for file in common.showProgress(files):
if patchout + file == dolout:
continue
if not filecmp.cmp(patchin + file, patchout + file):
common.makeFolders(patchfolder + os.path.dirname(file))
common.copyFile(patchout + file, patchfolder + file)
with common.Stream(xmlfile, "w") as f:
f.writeLine('<wiidisc version="1">')
f.writeLine('\t<id game="RSEJGD"/>')
f.writeLine('\t<options>')
f.writeLine('\t\t<section name="Translation">')
f.writeLine('\t\t\t<option name="Translation Patch">')
f.writeLine('\t\t\t\t<choice name="Enabled">')
f.writeLine('\t\t\t\t\t<patch id="monoprifolder"/>')
f.writeLine('\t\t\t\t</choice>')
f.writeLine('\t\t\t</option>')
f.writeLine('\t\t</section>')
f.writeLine('\t</options>')
f.writeLine('\t<patch id="monoprifolder">')
f.writeLine('\t\t<folder external="/monopri" recursive="false"/>')
f.writeLine('\t\t<folder external="/monopri" disc="/"/>')
f.writeLine('\t\t<memory offset="0x8000cfb4" value="ef884028" original="ef874028" />')
f.writeLine('\t</patch>')
f.writeLine('</wiidisc>')
common.logMessage("Creating ZIP file ...")
with common.Stream("patcher.bat", "w") as f:
f.writeLine('del monopri_patched.iso')
f.writeLine('rmdir /s/q patch_temp')
f.writeLine('wit EXTRACT -o %1 patch_temp')
f.writeLine('xcopy patch\\monopri patch_temp\\DATA\\files /s/e/y/q')
f.writeLine('xcopy main.dol patch_temp\\DATA\\sys\\main.dol /y/q')
f.writeLine('wit COPY patch_temp monopri_patched.iso')
f.writeLine('rmdir /s/q patch_temp')
common.copyFile(dolout, "main.dol")
with ZipFile("data/patch.zip", "w", ZIP_DEFLATED) as zip:
for foldername, _, filenames in os.walk("data/patch"):
for filename in filenames:
filepath = os.path.join(foldername, filename)
zip.write(filepath, filepath[5:])
zip.write("main.dol")
zip.write("patcher.bat")
os.remove("patcher.bat")
os.remove("main.dol")
common.logMessage("Done!")
@common.cli.command()
@click.argument("clientid")
def generatepo(clientid):
tplfolder = "data/work_TPL"
tploriginal = "data/out_TPL"
files = common.getFiles(tplfolder)
im = pyimgur.Imgur(clientid)
with common.Stream("data/tpl.po", "w") as f:
for file in common.showProgress(files):
uploaded = False
while not uploaded:
try:
image = im.upload_image(tploriginal + file, title="file")
f.writeLine("#. " + image.link)
f.writeLine("msgid \"" + file.split("/")[2] + "\"")
f.writeLine("msgstr \"\"")
f.writeLine("")
uploaded = True
time.sleep(30)
except requests.HTTPError:
time.sleep(300)
common.logMessage("Done!")
@common.cli.command()
def dupe():
seen = {}
sections = common.getSections("data/msbe_input.txt")
for section in sections:
if section == "quest.bin":
continue
for line in sections[section]:
translation = sections[section][line][0]
if line not in seen:
seen[line] = [translation, section, 1]
else:
seen[line][2] += 1
if translation != seen[line][0]:
common.logMessage("{}: {}={} ({} @{})".format(section, line, translation, seen[line][0], seen[line][1]))
for line in seen:
if seen[line][2] > 2:
common.logMessage("Dupe", seen[line][2], line + "=")
def cleanSection(section):
for str in section:
newlist = []
for trans in section[str]:
if trans != "":
newlist.append(trans)
if len(newlist) == 0:
section[str] = [""]
else:
section[str] = newlist
return section
@common.cli.command()
def smartcat():
click.confirm("Importing Smartcat CSV will override the msbe_input.txt and movie_input.txt files, are you sure?", abort=True)
common.logMessage("Importing Smartcat CSV ...")
# Read the lines from the CSV files
infiles = ["data/msbe_output_rearranged.csv", "data/msbe_events.csv", "data/msbe_system.csv", "data/movie.csv"]
section = {}
commons = {}
current = ""
for file in infiles:
with open(file, newline="", encoding="utf-8") as csvfile:
rows = csv.reader(csvfile, delimiter=",", quotechar="\"")
for row in rows:
orig = row[0]
trans = row[1]
if orig == "ja" or ".png" in orig or "youtube.com" in orig or orig == "Table of Contents:" or orig == "!Images":
continue
if orig.startswith("("):
orig = orig.split(") ", 1)[1]
if orig != "":
if orig.startswith("!FILE:"):
current = orig.split(",")[0].replace("!FILE:", "")
section[current] = {}
elif current != "":
if orig in section[current]:
section[current][orig].append(trans)
else:
section[current][orig] = [trans]
if orig in commons:
commons[orig].append(trans)
else:
commons[orig] = [trans]
# Clean up empty lines that have translations somewhere else
commons = cleanSection(commons)
for name in section:
section[name] = cleanSection(section[name])
# Export everything to msbe_input following msbe_output for ordering
outputfiles = ["data/msbe_output.txt", "data/movie_output.txt"]
inputfiles = ["data/msbe_input.txt", "data/movie_input.txt"]
for i in range(len(outputfiles)):
with codecs.open(outputfiles[i], "r", "utf-8") as fin:
with codecs.open(inputfiles[i], "w", "utf-8") as f:
current = ""
for line in fin:
line = line.rstrip("\r\n").replace("\ufeff", "")
if line.startswith("!FILE:"):
current = line.replace("!FILE:", "")
if current not in section:
common.logWarning("Section", current, "not found")
current = ""
else:
f.write("!FILE:" + current + "\n")
elif current != "":
line = line.replace("=", "")
linestart = ""
if i == 1:
linesplit = line.split(":", 2)
linestart = linesplit[0] + ":" + linesplit[1] + ":"
line = linesplit[2]
sectionline = line
if line not in section[current]:
if line.strip(" ") in section[current] or line.strip(" ") in commons:
sectionline = line.strip(" ")
elif line.replace("<3D>", "=") in section[current] or line.replace("<3D>", "=") in commons:
sectionline = line.replace("<3D>", "=")
if sectionline not in section[current] and sectionline in commons:
section[current][sectionline] = commons[sectionline]
if sectionline in section[current]:
f.write(linestart + line + "=" + section[current][sectionline][0] + "\n")
if len(section[current][sectionline]) > 1:
section[current][sectionline].pop()
else:
f.write(linestart + line + "=\n")
common.logWarning("Line \"" + sectionline + "\" in section", current, "not found")
common.logMessage("Done!")
if __name__ == "__main__":
click.echo("MonoPriTranslation version " + version)
if not os.path.isdir("data"):
common.logError("data folder not found.")
quit()
common.runCLI(common.cli)
| 43.622837 | 129 | 0.560482 | 1,435 | 12,607 | 4.86899 | 0.21115 | 0.040074 | 0.023615 | 0.03163 | 0.182625 | 0.152998 | 0.107915 | 0.074853 | 0.065407 | 0.065407 | 0 | 0.009718 | 0.298009 | 12,607 | 288 | 130 | 43.774306 | 0.779774 | 0.025224 | 0 | 0.146617 | 0 | 0 | 0.194528 | 0.041202 | 0 | 0 | 0.002117 | 0 | 0 | 1 | 0.022556 | false | 0 | 0.06391 | 0 | 0.090226 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77f166eca841bd7df6212b114048257be00c424b | 50,437 | py | Python | sequana/bedtools.py | brwnj/sequana | 58f6ca47815bf7253f27e4631d971a0a479c3a63 | [
"BSD-3-Clause"
] | null | null | null | sequana/bedtools.py | brwnj/sequana | 58f6ca47815bf7253f27e4631d971a0a479c3a63 | [
"BSD-3-Clause"
] | null | null | null | sequana/bedtools.py | brwnj/sequana | 58f6ca47815bf7253f27e4631d971a0a479c3a63 | [
"BSD-3-Clause"
] | 1 | 2019-10-11T18:21:05.000Z | 2019-10-11T18:21:05.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Utilities for the genome coverage"""
import re
import ast
import os
import sys
from biokit.stats import mixture
from sequana.lazy import pandas as pd
from sequana.lazy import numpy as np
from sequana.lazy import pylab
from sequana import logger
from sequana.tools import gc_content, genbank_features_parser
from sequana.errors import SequanaException
from easydev import do_profile
__all__ = ["GenomeCov", "ChromosomeCov", "DoubleThresholds"]
class DoubleThresholds(object):
"""Simple structure to handle the double threshold for negative and
positive sides
Used yb GenomeCov and related classes.
::
dt = DoubleThresholds(-3,4,0.5,0.5)
This means the low threshold is -3 while the high threshold is 4. The two
following values must be between 0 and 1 and are used to define the value
of the double threshold set to half the value of the main threshold.
Internally, the main thresholds are stored in the low and high attributes.
The secondary thresholds are derived from the main thresholds and the
two ratios. The ratios are named ldtr and hdtr for low double threshold
ratio and high double threshold ration. The secondary thresholds are
denoted low2 and high2 are are update automatically if low, high, ldtr or
hdtr are changed.
"""
def __init__(self, low=-3, high=3, ldtr=0.5, hdtr=0.5):
assert ldtr>=0. and ldtr<=1.,\
"ldrt parameter (low double threshold ratio) must be in [0,1]"
assert hdtr>=0. and hdtr<=1.,\
"hdrt parameter (high double threshold ratio) must be in [0,1]"
assert low < 0, "low threshold must be negative"
assert high > 0, "high threshold must be positive"
self._ldtr = ldtr
self._hdtr = hdtr
self._high = high
self._low = low
def _get_ldtr(self):
return self._ldtr
def _set_ldtr(self, ldtr):
self._ldtr = ldtr
self._low2 = self._low * self._ldtr
ldtr = property(_get_ldtr, _set_ldtr)
def _get_hdtr(self):
return self._hdtr
def _set_hdtr(self, hdtr):
self._hdtr = hdtr
self._high2 = self._high * self._hdtr
hdtr = property(_get_hdtr, _set_hdtr)
def _get_low(self):
return self._low
def _set_low(self, value):
assert value < 0.
self._low = value
self._low2 = self._low * self._ldtr
low = property(_get_low, _set_low)
def _get_high(self):
return self._high
def _set_high(self, value):
assert value > 0.
self._high = value
self._high2 = self._high * self._ldtr
high = property(_get_high, _set_high)
def _get_low2(self):
return self._low * self._ldtr
low2 = property(_get_low2)
def _get_high2(self):
return self._high * self._hdtr
high2 = property(_get_high2)
def get_args(self):
return "%.2f,%.2f,%.2f,%.2f" % (self.low, self.high, self.ldtr,
self.hdtr)
def copy(self):
thresholds = DoubleThresholds(self.low, self.high,
self.ldtr, self.hdtr)
return thresholds
def __str__(self):
txt = "Low threshold: %s\n" % self.low
txt += "High threshold: %s\n" % self.high
txt += "double-low threshold: %s\n" % self.low2
txt += "double-high threshold: %s" % self.high2
return txt
class GenomeCov(object):
"""Create a list of dataframe to hold data from a BED file generated with
samtools depth.
This class can be used to plot the coverage resulting from a mapping, which
is stored in BED format. The BED file may contain several chromosomes.
There are handled independently and accessible as a list of
:class:`ChromosomeCov` instances.
Example:
.. plot::
:include-source:
from sequana import GenomeCov, sequana_data
filename = sequana_data('JB409847.bed')
reference = sequana_data("JB409847.fasta")
gencov = GenomeCov(filename)
gencov.compute_gc_content(reference)
gencov = GenomeCov(filename)
for chrom in gencov:
chrom.running_median(n=3001, circular=True)
chrom.compute_zscore()
chrom.plot_coverage()
gencov[0].plot_coverage()
Results are stored in a list of :class:`ChromosomeCov` named
:attr:`chr_list`.
"""
def __init__(self, input_filename, genbank_file=None,
low_threshold=-3, high_threshold=3, ldtr=0.5, hdtr=0.5):
""".. rubric:: constructor
:param str input_filename: the input data with results of a bedtools
genomecov run. This is just a 3-column file. The first column is a
string (chromosome), second column is the base postion and third
is the coverage.
:param str genbank_file: annotation file of your referenve.
:param float low_threshold: threshold used to identify under-covered
genomic region of interest (ROI). Must be negative
:param float high_threshold: threshold used to identify over-covered
genomic region of interest (ROI). Must be positive
:param float ldtr: fraction of the low_threshold to be used to define
the intermediate threshold in the double threshold method. Must be
between 0 and 1.
:param float rdtr: fraction of the low_threshold to be used to define
the intermediate threshold in the double threshold method. Must be
between 0 and 1.
"""
# Keep information if the genome is circular and the window size used
self._circular = None
self._feature_dict = None
self._gc_window_size = None
self._genbank_filename = None
self._window_size = None
# the user choice have the priorities over csv file
if genbank_file:
self.genbank_filename = genbank_file
# check is the input is a csv of a previous analysis
try:
self.chr_list = self._read_csv(input_filename)
except FileNotFoundError as e:
print("FileNotFound error({0}): {1}".format(e.errno, e.strerror))
sys.exit(1)
if not self.chr_list:
# read bed file
self.thresholds = DoubleThresholds(low_threshold, high_threshold,
ldtr, hdtr)
self.chr_list = self._read_bed(input_filename)
def __getitem__(self, index):
return self.chr_list[index]
def __iter__(self):
return self.chr_list.__iter__()
def __len__(self):
return len(self.chr_list)
def __eq__(self, other):
if len(self.chr_list) != len(other.chr_list):
return False
for a,b in zip(self.chr_list, other.chr_list):
if all(a.df['cov'] == b.df['cov']) is False:
return False
return True
def compute_coverage(self, window, circular=False, reference=None):
"""Compute GC content (if reference provided), running_median/zscore for each chromosome.
"""
if reference:
self.compute_gc_content(reference)
for c in self.chr_list:
c.running_median(window, circular)
c.compute_zscore()
@property
def circular(self):
""" Get the circularity of chromosome(s). It must be a boolean.
"""
return self._circular
@circular.setter
def circular(self, circular):
if isinstance(circular, bool):
self._circular = circular
else:
logger.error("TypeError: Circular must be a boolean. True if your "
"genome is circular and False if not.")
sys.exit(1)
@property
def feature_dict(self):
""" Get the features dictionary of the genbank.
"""
return self._feature_dict
@feature_dict.setter
def feature_dict(self, anything):
logger.error("AttributeError: You can't set attribute.\n"
"GenomeCov.feature_dict is set when"
"GenomeCov.genbank_filename is set.")
sys.exit(1)
@property
def gc_window_size(self):
""" Get or set the window size to compute the GC content.
"""
return self._gc_window_size
@gc_window_size.setter
def gc_window_size(self, n):
if n % 2 == 0:
logger.warning("Window size must be an odd number.")
self._gc_window_size = n + 1
logger.warning("{0} is incremented by 1".format(n))
else:
self._gc_window_size = n
@property
def genbank_filename(self):
""" Get or set the genbank filename to annotate ROI detected with
:meth:`ChromosomeCov.get_roi`. Changing the genbank filename will
configure the :attr:`GenomeCov.feature_dict`.
"""
return self._genbank_filename
@genbank_filename.setter
def genbank_filename(self, genbank_filename):
if os.path.isfile(genbank_filename):
self._genbank_filename = os.path.realpath(genbank_filename)
self._feature_dict = genbank_features_parser(
genbank_filename)
else:
logger.error("FileNotFoundError: The genbank file doesn't exist.")
sys.exit(1)
@property
def window_size(self):
""" Get or set the window size to compute the running median. Size
must be an interger.
"""
return self._window_size
@window_size.setter
def window_size(self, n):
if n % 2 == 0:
logger.warning("Window size must be an odd number.")
self._window_size = n + 1
logger.warning("{0} is incremented to {1}".format(
n, self._window_size))
else:
self._window_size = n
def _read_bed(self, input_filename):
""" Read bed generated by samtools depth tools and create
:class:'ChromosomeCov' list.
"""
df = pd.read_table(input_filename, header=None)
df = df.rename(columns={0: "chr", 1: "pos", 2: "cov", 3: "mapq0"})
chr_list = self._set_chr_list(df)
# Set the link to this instance in each chromosome
# useful if one wants to recompute GC content with different window
return chr_list
def _read_csv(self, input_filename):
""" Read csv generated by :class:'GenomeCov' and create
:class:'ChromosomeCov' list.
"""
# set regex to get important information about previous analysis
re_threshold = re.compile("thresholds:([\d,\.-]+)")
re_window_size = re.compile("\swindow_size:(\d+)")
re_circular = re.compile("circular:(\w+)")
re_gc_window_size = re.compile("gc_window_size:(\d+)")
re_genbank = re.compile("genbank:([\{0}\w\.\-]+)".format(os.sep))
re_chrom = re.compile("^# ([\w\-\.]+):")
re_gaussian = re.compile("(\[\{.+\}\])")
with open(input_filename, "r") as fp:
line = fp.readline()
# check if file was generated by sequana_coverage
if not line.startswith("# sequana_coverage"):
return None
# get thresholds
thresholds = re_threshold.findall(line)[0]
thresholds = [float(f) for f in thresholds.split(',')]
self.thresholds = DoubleThresholds(*thresholds)
# get window size
self.window_size = int(re_window_size.search(line).group(1))
# get circular
circular = re_circular.search(line).group(1)
self.circular = False if circular == "False" else True
# get gc_window_size
gc = re_gc_window_size.search(line)
if gc:
self.gc_window_size = int(gc.group(1))
# get genbank
gb = re_genbank.search(line)
if gb and not self.genbank_filename:
self.genbank_filename = gb.group(1)
# get gaussians for each chromosome
gaussians_dict = dict()
for line in fp:
chrom = re_chrom.search(line)
if chrom:
gaussians = re_gaussian.search(line)
gaussians = ast.literal_eval(gaussians.group(1))
gaussians_dict[chrom.group(1)] = gaussians
else:
break
df = pd.read_csv(fp, header=None, names=line.strip().split(","))
chr_list = self._set_chr_list(df)
# Add gaussians and range informations
for chrom in chr_list:
chrom.set_gaussians(gaussians_dict[chrom.chrom_name])
if self.circular:
chrom.range = [None, None]
else:
mid = int(self.window_size/2)
chrom.range = [mid, -mid]
chrom.mixture_fitting = mixture.EM(
chrom.df['scale'][chrom.range[0]:chrom.range[1]])
return chr_list
def _set_chr_list(self, df):
df = df.set_index("chr", drop=False)
return [ChromosomeCov(df.loc[key], self, self.thresholds) for key in
df.index.unique()]
def compute_gc_content(self, fasta_file, window_size=101, circular=False,
letters=['G', 'C', 'c', 'g']):
""" Compute GC content of genome sequence.
:param str fasta_file: fasta file name.
:param int window_size: size of the sliding window.
:param bool circular: if the genome is circular (like bacteria
chromosome)
Store the results in the :attr:`ChromosomeCov.df` attribute (dataframe)
with a column named *gc*.
"""
self.gc_window_size = window_size
self.circular = circular
gc_dict = gc_content(fasta_file, self.gc_window_size, circular,
letters=letters)
for chrom in self.chr_list:
if chrom.chrom_name in gc_dict.keys():
chrom.df["gc"] = gc_dict[chrom.chrom_name]
else:
msg = ("The chromosome (or contig) %s in your"
" BED/BAM file was not found in the reference provided."
" Make sure your input reference file is the same"
" as the one used to perform the mapping or just"
" remove the --reference parameter.")
raise SequanaException(msg % chrom.chrom_name)
def get_stats(self, output="json"):
"""Return basic statistics for each chromosome
:return: dictionary with chromosome names as keys
and statistics as values.
.. seealso:: :class:`ChromosomeCov`.
"""
stats = {}
for chrom in self.chr_list:
stats[chrom.chrom_name] = chrom.get_stats(output=output)
return stats
def hist(self, logx=True, logy=True, fignum=1, N=20, lw=2, **kwargs):
for chrom in self.chr_list:
chrom.plot_hist_coverage(logx=logx, logy=logy, fignum=fignum, N=N,
histtype='step', hold=True, lw=lw, **kwargs)
pylab.legend()
def to_csv(self, output_filename, **kwargs):
""" Write all data in a csv.
:param str output_filename: csv output file name.
:param **dict kwargs: parameters of :meth:`pandas.DataFrame.to_csv`.
"""
# Concatenate all df
df_list = [chrom.get_df() for chrom in self.chr_list]
df = pd.concat(df_list)
header = ("# sequana_coverage thresholds:{0} window_size:{1} circular:"
"{2}".format(self.thresholds.get_args(), self.window_size,
self.circular))
if self.genbank_filename:
header += ' genbank:' + self.genbank_filename
if self.gc_window_size:
header += ' gc_window_size:{0}'.format(self.gc_window_size)
with open(output_filename, "w") as fp:
print(header, file=fp)
for chrom in self.chr_list:
print("# {0}".format(chrom.get_gaussians()), file=fp)
df.to_csv(fp, **kwargs)
class ChromosomeCov(object):
"""Factory to manipulate coverage and extract region of interests.
Example:
.. plot::
:include-source:
from sequana import GenomeCov, sequana_data
filename = sequana_data("virus.bed")
gencov = GenomeCov(filename)
chrcov = gencov[0]
chrcov.running_median(n=3001)
chrcov.compute_zscore()
chrcov.plot_coverage()
df = chrcov.get_roi().get_high_roi()
The *df* variable contains a dataframe with high region of interests (over
covered)
.. seealso:: sequana_coverage standalone application
"""
def __init__(self, df, genomecov, thresholds=None):
""".. rubric:: constructor
:param df: dataframe with position for a chromosome used within
:class:`GenomeCov`. Must contain the following columns:
["chr", "pos", "cov"]
:param thresholds: a data structure :class:`DoubleThresholds` that holds
the double threshold values.
"""
self._bed = genomecov
self.df = df.set_index("pos", drop=False)
self.chrom_name = str(df["chr"].iloc[0])
try:
self.thresholds = thresholds.copy()
except:
self.thresholds = DoubleThresholds()
def __str__(self):
stats = self.get_stats(output="dataframe")
stats.set_index("name", inplace=True)
def _getter(data, key):
return data.ix[key].Value
txt = "\nGenome length: %s" % int(len(self.df))
txt += "\nSequencing depth (DOC): %8.2f " % _getter(stats,'DOC')
txt += "\nSequencing depth (median): %8.2f " % _getter(stats, 'Median')
txt += "\nBreadth of coverage (BOC) (percent): %.2f " % _getter(
stats, 'BOC')
txt += "\nGenome coverage standard deviation : %8.2f " % _getter(
stats,'STD')
txt += "\nGenome coverage coefficient variation : %8.2f " % _getter(
stats,'CV')
return txt
def __len__(self):
return self.df.__len__()
@property
def bed(self):
return self._bed
@bed.setter
def bed(self):
logger.error("AttributeError: You can't set the ChromosomeCov.bed. "
"Setting is done automatically when the class is "
"created.")
def columns(self):
""" Return immutable ndarray implementing an ordered, sliceable set.
"""
return self.df.columns
def get_df(self):
return self.df.set_index("chr", drop=True)
def get_size(self):
return self.__len__()
def get_mean_cov(self):
return self.df["cov"].mean()
def get_var_coef(self):
return np.sqrt(self.df["cov"].var()) / self.get_mean_cov()
def get_gaussians(self):
return "{0}: {1}".format(self.chrom_name, self.gaussians_params)
def set_gaussians(self, gaussians):
""" Set gaussians predicted if you read a csv file generated by
:class:`GenomeCov`.
"""
self.gaussians_params = gaussians
self.best_gaussian = self._get_best_gaussian()
def moving_average(self, n, circular=False):
"""Compute moving average of the genome coverage
:param n: window's size. Must be odd
:param bool circular: is the chromosome circular or not
Store the results in the :attr:`df` attribute (dataframe) with a
column named *ma*.
"""
N = len(self.df['cov'])
assert n < N/2
from sequana.stats import moving_average
ret = np.cumsum(np.array(self.df["cov"]), dtype=float)
ret[n:] = ret[n:] - ret[:-n]
ma = ret[n - 1:] / n
mid = int(n / 2)
self.df["ma"] = pd.Series(ma, index=np.arange(start=mid,
stop=(len(ma) + mid)))
if circular:
# FIXME: shift of +-1 as compared to non circular case...
# shift the data and compute the moving average
self.data = list(self.df['cov'].values[N-n:]) +\
list(self.df['cov'].values) + \
list(self.df['cov'].values[0:n])
ma = moving_average(self.data, n)
self.ma = ma[n//2+1:-n//2]
self.df["ma"] = pd.Series(self.ma, index=self.df['cov'].index)
def running_median(self, n, circular=False):
"""Compute running median of genome coverage
:param int n: window's size.
:param bool circular: if a mapping is circular (e.g. bacteria
whole genome sequencing), set to True
Store the results in the :attr:`df` attribute (dataframe) with a
column named *rm*.
.. versionchanged:: 0.1.21
Use Pandas rolling function to speed up computation.
"""
self.bed.window_size = n
self.bed.circular = circular
# in py2/py3 the division (integer or not) has no impact
mid = int(n / 2)
self.range = [None, None]
try:
if circular:
# BASED on running_median pure implementation, could be much
# slower than pure pandas rolling function. Keep those 4 lines
# for book keeping though.
#cover = list(self.df["cov"])
#cover = cover[-mid:] + cover + cover[:mid]
#rm = running_median.RunningMedian(cover, n).run()
#self.df["rm"] = rm[mid:-mid]
rm = pd.concat([self.df['cov'][-mid:],
self.df['cov'],
self.df['cov'][:mid]]).rolling(
n, center=True).median()
self.df["rm"] = rm[mid:-mid]
else:
rm = self.df['cov'].rolling(n, center=True).median()
# Like in RunningMedian, we copy the NAN with real data
rm[0:mid] = self.df['cov'][0:mid]
rm[-mid:] = self.df['cov'][-mid:]
#rm = running_median.RunningMedian(cover, n).run()
self.df["rm"] = rm
# set up slice for gaussian prediction
self.range = [mid, -mid]
except:
self.df["rm"] = self.df["cov"]
def get_evenness(self):
"""Return Evenness of the coverage
:Reference: Konrad Oexle, Journal of Human Genetics 2016, Evaulation
of the evenness score in NGS.
work before or after normalisation but lead to different results.
"""
from sequana.stats import evenness
return evenness(self.df['cov'])
def get_cv(self):
"""Return the coefficient variation
The coefficient of variation (CV) is defined as sigma / mu
To get percentage, you must multiply by 100.
"""
sigma = self.df['cov'].std()
mu = self.df['cov'].mean()
return sigma/mu
def _coverage_scaling(self):
"""Normalize data with moving average of coverage
Store the results in the :attr:`df` attribute (dataframe) with a
column named *scale*.
.. note:: Needs to call :meth:`running_median`
"""
if "rm" not in self.df.columns:
txt = "Column rm (running median) is missing.\n" + self.__doc__
print(txt)
raise KeyError
else:
self.df["scale"] = self.df["cov"] / self.df["rm"]
self.df = self.df.replace(np.inf, np.nan)
self.df = self.df.replace(-np.inf, np.nan)
def _get_best_gaussian(self):
results_pis = [model["pi"] for model in self.gaussians_params]
indice = np.argmax(results_pis)
return self.gaussians_params[indice]
def compute_zscore(self, k=2, step=10, use_em=True, verbose=True):
""" Compute zscore of coverage and normalized coverage.
:param int k: Number gaussian predicted in mixture (default = 2)
:param int step: (default = 10). This parameter is used to speed
up computation and is ignored if the length of the coverage/sequence
is below 100,000
Store the results in the :attr:`df` attribute (dataframe) with a
column named *zscore*.
.. note:: needs to call :meth:`running_median` before hand.
"""
# here for lazy import
from biokit.stats import mixture
# normalize coverage
self._coverage_scaling()
data = self.df['scale'][self.range[0]:self.range[1]]
if len(data) < 100000:
step = 1
# remove nan and inf values
data = data.replace(0, np.nan)
data = data.dropna()
if data.empty:
data = np.full(len(self.df), 1, dtype=int)
self.df['scale'] = data
if use_em:
self.mixture_fitting = mixture.EM(
data[::step])
self.mixture_fitting.estimate(k=k)
else:
self.mixture_fitting = mixture.GaussianMixtureFitting(
data[::step],k=k)
self.mixture_fitting.estimate()
# keep gaussians informations
self.gaussians = self.mixture_fitting.results
params_key = ("mus", "sigmas", "pis")
self.gaussians_params = [{key[:-1]: self.gaussians[key][i] for key in
params_key} for i in range(k)]
self.best_gaussian = self._get_best_gaussian()
# warning when sigma is equal to 0
if self.best_gaussian["sigma"] == 0:
logger.warning("A problem related to gaussian prediction is "
"detected. Be careful, Sigma is equal to 0.")
self.df["zscore"] = np.zeros(len(self.df), dtype=int)
else:
self.df["zscore"] = (self.df["scale"] - self.best_gaussian["mu"]) / \
self.best_gaussian["sigma"]
# Naive checking that the
if k == 2:
mus = self.gaussians['mus']
sigmas = self.gaussians["sigmas"]
index0 = mus.index(self.best_gaussian["mu"])
if index0 == 0:
mu1 = mus[1]
s0 = sigmas[0]
mu0 = mus[0]
else:
mu1 = mus[0]
s0 = sigmas[1]
mu0 = mus[1]
if abs(mu0-mu1) < s0:
logger.warning(("Warning: k=2 but note that |mu0-mu1| < sigma0. "
"k=1 could be a better choice"))
def get_centralness(self):
"""Proportion of central (normal) genome coverage
This is 1 - (number of non normal data) / (total length)
.. note:: depends on the thresholds attribute being used.
.. note:: depends slightly on :math:`W` the running median window
"""
filtered = self.get_roi()
Cplus = sum(filtered.get_high_roi()['size'])
Cminus = sum(filtered.get_low_roi()['size'])
return 1 - (Cplus+Cminus) / float(len(self))
def get_roi(self):
"""Keep positions with zscore outside of the thresholds range.
:return: a dataframe from :class:`FilteredGenomeCov`
.. note:: depends on the :attr:`thresholds` low and high values.
"""
features = self.bed.feature_dict
try:
second_high = self.thresholds.high2
second_low = self.thresholds.low2
query = "zscore > @second_high or zscore < @second_low"
# in the genbank, the names appears as e.g. JB12345
# but in the fasta or BED files, it may be something like
# gi|269939526|emb|FN433596.1|
# so they do not match. We can try to guess it
alternative = None
if features:
if self.chrom_name not in features.keys():
msg = """Chromosome name (%s) not found
in the genbank. Make sure the chromosome names in
the BAM/BED files are compatible with the genbank
content. Genbank files contains the following keys """
for this in features.keys():
msg += "\n - %s" % this
alternative = [x for x in self.chrom_name.split("|") if x]
alternative = alternative[-1] # assume the accession is last
alternative = alternative.split('.')[0] # remove version
if alternative in features.keys():
msg += "\n Guessed the chromosome name to be: %s" % alternative
else:
features = None
logger.warning(msg % self.chrom_name)
if features:
if alternative:
return FilteredGenomeCov(self.df.query(query), self.thresholds,
features[alternative])
else:
return FilteredGenomeCov(self.df.query(query), self.thresholds,
features[self.chrom_name])
else:
return FilteredGenomeCov(self.df.query(query), self.thresholds)
except KeyError:
logger.error("Column zscore is missing in data frame.\n"
"You must run compute_zscore before get low coverage."
"\n\n", self.__doc__)
sys.exit(1)
def plot_coverage(self, filename=None, fontsize=16,
rm_lw=1, rm_color="#0099cc", rm_label="Running median",
th_lw=1, th_color="r", th_ls="--", main_color="k", main_lw=1,
main_kwargs={}, sample=True, set_ylimits=True):
""" Plot coverage as a function of base position.
:param filename:
:param rm_lw: line width of the running median
:param rm_color: line color of the running median
:param rm_color: label for the running median
:param th_lw: line width of the thresholds
:param th_color: line color of the thresholds
:param main_color: line color of the coverage
:param main_lw: line width of the coverage
:param sample: if there are more than 1 000 000 points, we
use an integer step to skip data points. We can still plot
all points at your own risk by setting this option to False
:param set_ylimits: we want to focus on the "normal" coverage ignoring
unsual excess. To do so, we set the yaxis range between 0 and a
maximum value. This maximum value is set to the minimum between the
6 times the mean coverage and 1.5 the maximum of the high coverage
threshold curve. If you want to let the ylimits free, set this
argument to False
.. note:: if there are more than 1,000,000 points, we show only
1,000,000 by points. For instance for 5,000,000 points,
In addition to the coverage, the running median and coverage confidence
corresponding to the lower and upper zscore thresholds are shown.
.. note:: uses the thresholds attribute.
"""
# z = (X/rm - \mu ) / sigma
high_zcov = (self.thresholds.high * self.best_gaussian["sigma"] +
self.best_gaussian["mu"]) * self.df["rm"]
low_zcov = (self.thresholds.low * self.best_gaussian["sigma"] +
self.best_gaussian["mu"]) * self.df["rm"]
pylab.clf()
ax = pylab.gca()
ax.set_facecolor('#eeeeee')
pylab.xlim(0,self.df["pos"].iloc[-1])
axes = []
labels = []
# 1,000,000 points is a lot for matplotlib. Let us restrict ourself to 1
# million points for now.
if len(self.df) > 1000000 and sample is True:
NN = int(len(self.df)/1000000)
else:
NN = 1
# the main coverage plot
p1, = pylab.plot(self.df["cov"][::NN], color=main_color, label="Coverage",
linewidth=main_lw, **main_kwargs)
axes.append(p1)
labels.append("Coverage")
# The running median plot
if rm_lw > 0:
p2, = pylab.plot(self.df["rm"][::NN],
color=rm_color,
linewidth=rm_lw,
label=rm_label)
axes.append(p2)
labels.append(rm_label)
# The threshold curves
if th_lw > 0:
p3, = pylab.plot(high_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls,
label="Thresholds")
p4, = pylab.plot(low_zcov[::NN], linewidth=th_lw, color=th_color, ls=th_ls,
label="_nolegend_")
axes.append(p3)
labels.append("Thresholds")
pylab.legend(axes, labels, loc="best")
pylab.xlabel("Position", fontsize=fontsize)
pylab.ylabel("Per-base coverage", fontsize=fontsize)
pylab.grid(True)
# sometimes there are large coverage value that squeeze the plot.
# Let us restrict it
if set_ylimits is True:
pylab.ylim([0, min([
high_zcov.max() * 1.5,
self.df["cov"].mean()*6])])
else:
pylab.ylim([0, pylab.ylim()[1]])
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def _set_bins(self, df, binwidth):
try:
bins = np.arange(min(df), max(df) + binwidth, binwidth)
except ValueError:
return 100
if bins.any():
return bins
return 100
def plot_hist_zscore(self, fontsize=16, filename=None, max_z=6,
binwidth=0.5, **hist_kargs):
""" Barplot of the zscore values
"""
pylab.clf()
bins = self._set_bins(self.df["zscore"][self.range[0]:self.range[1]],
binwidth)
self.df["zscore"][self.range[0]:self.range[1]].hist(
grid=True, bins=bins, **hist_kargs)
pylab.xlabel("Z-Score", fontsize=fontsize)
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def plot_hist_normalized_coverage(self, filename=None, binwidth=0.1,
max_z=4):
""" Barplot of the normalized coverage with gaussian fitting
"""
pylab.clf()
# if there are a NaN -> can't set up binning
d = self.df["scale"][self.range[0]:self.range[1]].dropna()
# remove outlier -> plot crash if range between min and max is too high
d = d[np.abs(d - d.mean()) <= (4 * d.std())]
bins = self._set_bins(d, binwidth)
self.mixture_fitting.data = d
try:
self.mixture_fitting.plot(self.gaussians_params, bins=bins, Xmin=0,
Xmax=max_z)
except ZeroDivisionError:
pass
pylab.grid(True)
pylab.xlim([0,max_z])
pylab.xlabel("Normalised per-base coverage")
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def plot_hist_coverage(self, logx=True, logy=True, fontsize=16, N=20,
fignum=1, hold=False, alpha=0.5, filename=None, **kw_hist):
"""
"""
if hold is False:
pylab.figure(fignum)
pylab.clf()
ax = pylab.gca()
ax.set_facecolor('#eeeeee')
data = self.df['cov'].dropna().values
maxcov = data.max()
if logx is True and logy is True:
bins = pylab.logspace(0, pylab.log10(maxcov), N)
pylab.hist(data, bins=bins, log=True, label=self.chrom_name,
alpha=alpha, **kw_hist)
pylab.semilogx()
pylab.xlabel("Coverage (log scale)", fontsize=fontsize)
pylab.ylabel("Count (log scale)", fontsize=fontsize)
elif logx is False and logy is True:
pylab.hist(data, bins=N, log=True, label=self.chrom_name,
alpha=alpha, **kw_hist)
pylab.xlabel("Coverage", fontsize=fontsize)
pylab.ylabel("Count (log scale)", fontsize=fontsize)
elif logx is True and logy is False:
bins = pylab.logspace(0, pylab.log10(maxcov), N)
pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha,
**kw_hist)
pylab.xlabel("Coverage (log scale)", fontsize=fontsize)
pylab.ylabel("Count", fontsize=fontsize)
pylab.semilogx()
else:
pylab.hist(data, bins=N, label=self.chrom_name, alpha=alpha,
**kw_hist)
pylab.xlabel("Coverage", fontsize=fontsize)
pylab.ylabel("Count", fontsize=fontsize)
pylab.grid(True)
if filename:
pylab.savefig(filename)
def to_csv(self, filename=None, start=None, stop=None, **kwargs):
""" Write CSV file of the dataframe.
:param str filename: csv output filename. If None, return string.
:param int start: start row index.
:param int stop: stop row index.
Params of :meth:`pandas.DataFrame.to_csv`:
:param list columns: columns you want to write.
:param bool header: determine if the header is written.
:param bool index: determine if the index is written.
:param str float_format: determine the float format.
"""
# Create directory to avoid errno 2
if filename:
directory = os.path.dirname(os.path.realpath(filename))
try:
os.makedirs(directory)
except FileExistsError:
if os.path.isdir(directory):
pass
else:
msg = "{0} exist and it is not a directory".format(
directory)
logger.error(msg)
raise FileExistsError
return self.df[start:stop].to_csv(filename, **kwargs)
def plot_gc_vs_coverage(self, filename=None, bins=None, Nlevels=6,
fontsize=20, norm="log", ymin=0, ymax=100,
contour=True, **kwargs):
if Nlevels is None or Nlevels==0:
contour = False
data = self.df[['cov','gc']].copy()
data['gc'] *= 100
data = data.dropna()
if bins is None:
bins = [100, min(int(data['gc'].max()-data['gc'].min()+1),
max(5,self.bed.gc_window_size - 4))]
bins[0] = max(10, min(bins[0], self.df['cov'].max()))
from biokit import Hist2D
h2 = Hist2D(data)
try:
h2.plot(bins=bins, xlabel="Per-base coverage",
ylabel=r'GC content (%)',
Nlevels=Nlevels, contour=contour, norm=norm,
fontsize=fontsize, **kwargs)
except:
h2.plot(bins=bins, xlabel="Per-base coverage",
ylabel=r'GC content (%)' ,
Nlevels=Nlevels, contour=False, norm=norm,
fontsize=fontsize, **kwargs)
pylab.ylim([ymin, ymax])
try:
pylab.tight_layout()
except:
pass
if filename:
pylab.savefig(filename)
def get_gc_correlation(self):
"""Return the correlation between the coverage and GC content
The GC content is the one computed in :meth:`GenomeCov.compute_gc_content`
(default window size is 101)
"""
return self.df[['cov', 'gc']].corr().iloc[0, 1]
def get_max_gc_correlation(self, reference, guess=100):
"""Plot correlation between coverage and GC content by varying the GC window
The GC content uses a moving window of size W. This parameter affects
the correlation bewteen coverage and GC. This function find the
*optimal* window length.
"""
pylab.clf()
corrs = []
wss = []
def func(params):
ws = int(round(params[0]))
if ws < 10:
return 0
self.bed.compute_gc_content(reference, ws)
corr = self.get_gc_correlation()
corrs.append(corr)
wss.append(ws)
return corr
from scipy.optimize import fmin
res = fmin(func, guess, xtol=1, disp=False) # guess is 200
pylab.plot(wss, corrs, "o")
pylab.xlabel("GC window size")
pylab.ylabel("Correlation")
pylab.grid()
return res[0]
def get_stats(self, output="json"):
"""Return basic stats about the coverage data"""
data = self.df
stats = {
'DOC': self.df['cov'].mean(),
'STD': self.df['cov'].std(),
'Median': self.df['cov'].median(),
'BOC': 100 * sum(self.df['cov'] > 0) / float(len(self.df))}
try:
stats['CV'] = stats['STD'] / stats['DOC']
except:
stats['CV'] = np.nan
stats['MAD'] = np.median(abs(data['cov'].median() -
data['cov']).dropna())
names = ['BOC', 'CV', 'DOC', 'MAD', 'Median', 'STD']
descriptions = [
"breadth of coverage: the proportion (in %s) of the "
"genome covered by at least one read.",
"the coefficient of variation.",
"the sequencing depth (Depth of Coverage), that is the average of "
"the genome coverage.",
"median of the absolute median deviation defined as median(|X-median(X)|).",
"Median of the coverage.",
"standard deviation."
]
if 'gc' in self.df.columns:
stats['GC'] = self.df['gc'].mean() * 100
names.append('GC')
descriptions.append("GC content in %")
df = pd.DataFrame({
"name": names,
"Value": [stats[x] for x in names],
"Description": descriptions})
if output == "json":
return df.to_json()
else:
return df
class FilteredGenomeCov(object):
"""Class used within :class:`ChromosomeCov` to select a subset of the
original GenomeCov
:target: developers only
"""
_feature_not_wanted = {"gene", "regulatory", "source"}
def __init__(self, df, threshold, feature_list=None):
""" .. rubric:: constructor
:param df: dataframe with filtered position used within
:class:`GenomeCov`. Must contain the following columns:
["pos", "cov", "rm", "zscore"]
:param int threshold: a :class:`~sequana.bedtools.DoubleThresholds`
instance.
"""
if isinstance(feature_list, list) and len(feature_list) == 0:
feature_list = None
region_list = self._merge_region(df, threshold=threshold)
if feature_list:
region_list = self._add_annotation(region_list, feature_list)
self.df = self._dict_to_df(region_list, feature_list)
def func(x):
try:
return x.split(".")[0]
except:
return x
for column in ['gene_end', 'gene_start']:
if column in self.df.columns:
self.df[column] = self.df[column].astype(str)
self.df[column] = self.df[column].apply(func)
def __str__(self):
return self.df.__str__()
def __len__(self):
return self.df.__len__()
def _merge_row(self, df, start, stop):
chrom = df["chr"][start]
cov = np.mean(df["cov"].loc[start:stop])
max_cov = np.max(df["cov"].loc[start:stop])
rm = np.mean(df["rm"].loc[start:stop])
zscore = np.mean(df["zscore"].loc[start:stop])
if zscore >= 0:
max_zscore = df["zscore"].loc[start:stop].max()
else:
max_zscore = df["zscore"].loc[start:stop].min()
size = stop - start + 1
return {"chr": chrom, "start": start, "end": stop + 1, "size": size,
"mean_cov": cov, "mean_rm": rm, "mean_zscore": zscore,
"max_zscore": max_zscore, "max_cov": max_cov}
def _merge_region(self, df, threshold, zscore_label="zscore"):
"""Merge position side by side of a data frame.
Uses a double threshold method.
:param threshold: the high threshold (standard one), not the low one.
.. todo:: to be documented
"""
region_start = None
region_stop = None
start = 1
stop = 1
prev = 1
# handle case where for example position n-1 have a zscore of -5 and n
# have a zscore of 5. It is two different regions.
region_zscore = 0
merge_df = []
for pos, zscore in zip(df["pos"], df[zscore_label]):
stop = pos
if stop - 1 == prev and zscore * region_zscore >= 0:
prev = stop
else:
if region_start:
merge_df.append(self._merge_row(df, region_start,
region_stop))
region_start = None
start = stop
prev = stop
region_zscore = zscore
if zscore > 0 and zscore > threshold.high:
if not region_start:
region_start = pos
region_stop = pos
else:
region_stop = pos
elif zscore < 0 and zscore < threshold.low:
if not region_start:
region_start = pos
region_stop = pos
else:
region_stop = pos
if start < stop and region_start:
merge_df.append(self._merge_row(df, region_start, region_stop))
return merge_df
def _add_annotation(self, region_list, feature_list):
""" Add annotation from a dictionary generated by parsers in
sequana.tools.
"""
region_ann = []
# an iterator of features
iter_feature = iter(feature_list)
feature = next(iter_feature)
# pass "source" feature
while feature["type"] in FilteredGenomeCov._feature_not_wanted:
try:
feature = next(iter_feature)
except StopIteration:
print("Features types ({0}) are not present in the annotation"
" file. Please change what types you want".format(
feature['type']))
return region_ann
# merge regions and annotations
for region in region_list:
feature_exist = False
while feature["gene_end"] <= region["start"]:
try:
feature = next(iter_feature)
except:
break
while feature["gene_start"] < region["end"]:
# A feature exist for detected ROI
feature_exist = True
# put locus_tag in gene field if gene doesn't exist
try:
feature["gene"]
except KeyError:
try:
feature["gene"] = feature["locus_tag"]
except:
feature["gene"] = "None"
# put note field in product if product doesn't exist
try:
feature["product"]
except KeyError:
try:
feature["product"] = feature["note"]
except:
feature["product"] = "None"
# FIXME what that ?
#if region["start"] == 237433:
# print(dict(region, **feature))
region_ann.append(dict(region, **feature))
try:
feature = next(iter_feature)
except StopIteration:
break
if feature_exist is False:
region_ann.append(dict(region, **{"gene_start": None,
"gene_end": None,
"type": None,
"gene": None,
"strand": None,
"product": None}))
return region_ann
def _dict_to_df(self, region_list, annotation):
""" Convert dictionary as dataframe.
"""
merge_df = pd.DataFrame(region_list)
colnames = ["chr", "start", "end", "size", "mean_cov", "max_cov",
"mean_rm", "mean_zscore", "max_zscore", "gene_start",
"gene_end", "type", "gene", "strand", "product"]
if not annotation:
colnames = colnames[:9]
merge_df = pd.DataFrame(region_list, columns=colnames)
int_column = ["start", "end", "size"]
merge_df[int_column] = merge_df[int_column].astype(int)
if annotation:
merge_df.rename(columns={"gene": "gene_name"}, inplace=True)
# maybe let the user set what he wants
return merge_df.loc[~merge_df["type"].isin(
FilteredGenomeCov._feature_not_wanted)]
return merge_df
def _get_sub_range(self, seq_range):
try:
return self.df[(self.df["end"] > seq_range[0]) &
(self.df["start"] < seq_range[1])]
except TypeError:
return self.df
def get_low_roi(self, seq_range=None):
df = self._get_sub_range(seq_range)
return df.loc[df["max_zscore"] < 0]
def get_high_roi(self, seq_range=None):
df = self._get_sub_range(seq_range)
return df.loc[df["max_zscore"] >= 0]
| 36.761662 | 97 | 0.5606 | 6,124 | 50,437 | 4.497714 | 0.129327 | 0.019605 | 0.009803 | 0.005228 | 0.225094 | 0.1702 | 0.15143 | 0.126525 | 0.117303 | 0.091853 | 0 | 0.013141 | 0.333128 | 50,437 | 1,371 | 98 | 36.788476 | 0.805768 | 0.24948 | 0 | 0.25 | 0 | 0 | 0.105558 | 0.003184 | 0 | 0 | 0 | 0.002188 | 0.008413 | 1 | 0.097356 | false | 0.007212 | 0.020433 | 0.024038 | 0.210337 | 0.00601 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77f23de07c25946522c768f1ef7e8ffca7391f96 | 2,272 | py | Python | qsrlib/src/qsrlib_qsrs/qsr_tpcc.py | alexiatoumpa/QSR_Detector | ff92a128dddb613690a49a7b4130afeac0dd4381 | [
"MIT"
] | 15 | 2015-06-15T16:50:37.000Z | 2022-03-27T09:25:56.000Z | qsrlib/src/qsrlib_qsrs/qsr_tpcc.py | alexiatoumpa/QSR_Detector | ff92a128dddb613690a49a7b4130afeac0dd4381 | [
"MIT"
] | 205 | 2015-01-22T12:02:59.000Z | 2022-03-29T11:59:55.000Z | qsrlib/src/qsrlib_qsrs/qsr_tpcc.py | alexiatoumpa/QSR_Detector | ff92a128dddb613690a49a7b4130afeac0dd4381 | [
"MIT"
] | 16 | 2015-02-04T23:13:18.000Z | 2022-03-08T13:45:53.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, division
from qsrlib_qsrs.qsr_triadic_abstractclass import QSR_Triadic_1t_Abstractclass
import math
class QSR_TPCC(QSR_Triadic_1t_Abstractclass):
"""TPCC QSRs.
.. seealso:: For further details about TPCC, refer to its :doc:`description. <../handwritten/qsrs/tpcc>`
"""
_unique_id = "tpcc"
_all_possible_relations = ('dlf', 'dfl', 'dsl', 'dbl', 'dlb', 'dsb', 'drb', 'dbr',
'dsr', 'dfr', 'drf', 'dsf', 'clf', 'cfl', 'csl', 'cbl',
'clb', 'csb', 'crb', 'cbr', 'csr', 'cfr', 'crf', 'csf',
'sam')
_dtype = "points"
__partition_names = ['lb','bl','fl','lf','rf','fr','br','rb']
__partition_size = 2 * math.pi / len(__partition_names)
def __init__(self):
"""Constructor."""
super(QSR_TPCC, self).__init__()
def _compute_qsr(self, origin, relatum, objct, qsr_params, **kwargs):
base_distance = math.sqrt((origin.x-relatum.x)**2 + (origin.y-relatum.y)**2)
object_distance = math.sqrt((objct.x-relatum.x)**2 + (objct.y-relatum.y)**2)
if object_distance == 0:
return "sam"
relation = "d" if object_distance > base_distance else "c" # is it far or close: first letter
angle = self._relative_angle(origin, relatum, objct)
partition = int(angle / self.__partition_size)
relation += self.__partition_names[partition]
sin_angle = math.fabs(math.sin(angle))
if sin_angle < 0.00001 or sin_angle > 0.99999:
relation = relation[0]+'s'+relation[2]
return relation
@staticmethod
def _relative_angle(a, b, c):
"""Compute relative angle used to select the (left/right/straight/front/back/straight)
relationship"""
angle_BA = math.atan2((b.y - a.y),(b.x - a.x))
if angle_BA < 0:
angle_BA += 2 * math.pi
angle_CB = math.atan2((c.y - b.y), (c.x - b.x))
if angle_CB < 0:
angle_CB += 2 * math.pi
angle_rel = angle_CB - angle_BA
if angle_rel < 0:
angle_rel += 2 * math.pi
return angle_rel
| 37.245902 | 108 | 0.559859 | 289 | 2,272 | 4.152249 | 0.449827 | 0.016667 | 0.023333 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019279 | 0.292254 | 2,272 | 60 | 109 | 37.866667 | 0.72699 | 0.12456 | 0 | 0 | 0 | 0 | 0.054731 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.384615 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77f68b4f89363b8438a93d741379bb05a3a09d63 | 1,053 | py | Python | arrandmatrix/q17.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | 79 | 2018-03-27T12:37:49.000Z | 2022-01-21T10:18:17.000Z | arrandmatrix/q17.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | null | null | null | arrandmatrix/q17.py | pengfei-chen/algorithm_qa | c2ccdcb77004e88279d61e4e433ee49527fc34d6 | [
"MIT"
] | 27 | 2018-04-08T03:07:06.000Z | 2021-10-30T00:01:50.000Z | """
问题描述:给定一个矩阵matrix,其中的值有正、负和0,返回子矩阵的最大累加和.
例如,矩阵matrix为
-90 48 78
64 -40 64
-81 -7 66
其中,最大累加和的子矩阵为:
48 78
-40 64
-7 66
所以返回累加和209.
例如,matrix为:
-1 -1 -1
-1 2 2
-1 -1 -1
其中,最大累加和的子矩阵为:
2 2
所以返回累加和为4.
"""
import sys
from arrandmatrix.q16 import MaxSum
class MaxMatrixSum:
@classmethod
def get_max_sum(cls, matrix):
if not matrix:
return 0
max_value = -sys.maxsize
for i in range(len(matrix)):
j = i
pre_arr = [0 for _ in range(len(matrix[0]))]
while j < len(matrix):
arr = cls.arr_add(matrix[j], pre_arr)
max_value = max([MaxSum.get_max_sum(arr), max_value])
j += 1
pre_arr = arr
return max_value
@classmethod
def arr_add(cls, arr1, arr2):
return [arr1[i]+arr2[i] for i in range(len(arr1))]
if __name__ == '__main__':
my_matrix = [
[-90, 48, 78],
[64, -40, 64],
[-81, -7, 66]
]
print(MaxMatrixSum.get_max_sum(my_matrix)) | 18.155172 | 69 | 0.545109 | 152 | 1,053 | 3.605263 | 0.388158 | 0.018248 | 0.016423 | 0.029197 | 0.113139 | 0.062044 | 0.062044 | 0.062044 | 0.062044 | 0 | 0 | 0.102273 | 0.331434 | 1,053 | 58 | 70 | 18.155172 | 0.676136 | 0.205128 | 0 | 0.074074 | 0 | 0 | 0.009639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0.037037 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77f7e72e1c1010465e3c08a2178c53ca02ebd59a | 362 | py | Python | Beginner/Easy Math/easy_math.py | agnisain123/CodeChef-1 | c6316b179e4b055eb17ead9df8f93505d8fc1166 | [
"Apache-2.0"
] | null | null | null | Beginner/Easy Math/easy_math.py | agnisain123/CodeChef-1 | c6316b179e4b055eb17ead9df8f93505d8fc1166 | [
"Apache-2.0"
] | null | null | null | Beginner/Easy Math/easy_math.py | agnisain123/CodeChef-1 | c6316b179e4b055eb17ead9df8f93505d8fc1166 | [
"Apache-2.0"
] | null | null | null | t=int(input())
for _ in range(t):
n=int(input())
a=list(map(int, input().split()))
max_sum=0
for j in range(n):
for k in range(j+1, n):
num=a[j]*a[k]
add=0
while(num!=0):
add+=num%10
num=num//10
if max_sum<add:
max_sum=add
print(max_sum)
| 22.625 | 37 | 0.428177 | 57 | 362 | 2.631579 | 0.403509 | 0.16 | 0.12 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037915 | 0.417127 | 362 | 15 | 38 | 24.133333 | 0.672986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
77fb88e214d8327d25cb3dbd91a4d85fb6136d9e | 5,686 | py | Python | openapi_client/models/validation_error.py | brighthive/jdx-client-api-python | ed94c578a6c9a5e9aadf8764439c22783ac1d9d5 | [
"Apache-2.0"
] | null | null | null | openapi_client/models/validation_error.py | brighthive/jdx-client-api-python | ed94c578a6c9a5e9aadf8764439c22783ac1d9d5 | [
"Apache-2.0"
] | null | null | null | openapi_client/models/validation_error.py | brighthive/jdx-client-api-python | ed94c578a6c9a5e9aadf8764439c22783ac1d9d5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
JDX reference application API
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
The version of the OpenAPI document: 0.0.17
Contact: engineering@brighthive.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ValidationError(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'status_code': 'int',
'validation_errors': 'list[ValidationErrorValidationErrors]'
}
attribute_map = {
'message': 'message',
'status_code': 'statusCode',
'validation_errors': 'validationErrors'
}
def __init__(self, message=None, status_code=None, validation_errors=None): # noqa: E501
"""ValidationError - a model defined in OpenAPI""" # noqa: E501
self._message = None
self._status_code = None
self._validation_errors = None
self.discriminator = None
if message is not None:
self.message = message
if status_code is not None:
self.status_code = status_code
if validation_errors is not None:
self.validation_errors = validation_errors
@property
def message(self):
"""Gets the message of this ValidationError. # noqa: E501
:return: The message of this ValidationError. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ValidationError.
:param message: The message of this ValidationError. # noqa: E501
:type: str
"""
if message is not None and len(message) > 1024:
raise ValueError("Invalid value for `message`, length must be less than or equal to `1024`") # noqa: E501
self._message = message
@property
def status_code(self):
"""Gets the status_code of this ValidationError. # noqa: E501
A code identifying the message response. A code of `1` indicates success. # noqa: E501
:return: The status_code of this ValidationError. # noqa: E501
:rtype: int
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""Sets the status_code of this ValidationError.
A code identifying the message response. A code of `1` indicates success. # noqa: E501
:param status_code: The status_code of this ValidationError. # noqa: E501
:type: int
"""
if status_code is not None and status_code > 9999: # noqa: E501
raise ValueError("Invalid value for `status_code`, must be a value less than or equal to `9999`") # noqa: E501
if status_code is not None and status_code < -1: # noqa: E501
raise ValueError("Invalid value for `status_code`, must be a value greater than or equal to `-1`") # noqa: E501
self._status_code = status_code
@property
def validation_errors(self):
"""Gets the validation_errors of this ValidationError. # noqa: E501
:return: The validation_errors of this ValidationError. # noqa: E501
:rtype: list[ValidationErrorValidationErrors]
"""
return self._validation_errors
@validation_errors.setter
def validation_errors(self, validation_errors):
"""Sets the validation_errors of this ValidationError.
:param validation_errors: The validation_errors of this ValidationError. # noqa: E501
:type: list[ValidationErrorValidationErrors]
"""
self._validation_errors = validation_errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ValidationError):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.678161 | 317 | 0.615195 | 673 | 5,686 | 5.059435 | 0.219911 | 0.073421 | 0.074009 | 0.066079 | 0.381791 | 0.290455 | 0.254626 | 0.21674 | 0.1163 | 0.096329 | 0 | 0.022088 | 0.299332 | 5,686 | 173 | 318 | 32.867052 | 0.83258 | 0.352796 | 0 | 0.063291 | 0 | 0 | 0.125678 | 0.011802 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151899 | false | 0 | 0.037975 | 0 | 0.341772 | 0.025316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ae03795c6c2f7e8cd5507cfab2b5d4572af1aac3 | 1,522 | py | Python | src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 21 | 2015-11-19T16:18:45.000Z | 2021-12-02T18:20:39.000Z | src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 5,671 | 2015-01-06T14:38:52.000Z | 2022-03-31T22:11:14.000Z | src/python/WMCore/WMBS/MySQL/Files/AddToFileset.py | khurtado/WMCore | f74e252412e49189a92962945a94f93bec81cd1e | [
"Apache-2.0"
] | 67 | 2015-01-21T15:55:38.000Z | 2022-02-03T19:53:13.000Z | #!/usr/bin/env python
"""
_AddToFileset_
MySQL implementation of Files.AddToFileset
"""
import time
from WMCore.Database.DBFormatter import DBFormatter
class AddToFileset(DBFormatter):
sql = """INSERT IGNORE INTO wmbs_fileset_files (fileid, fileset, insert_time)
SELECT wmbs_file_details.id, :fileset, :insert_time
FROM wmbs_file_details
WHERE wmbs_file_details.lfn = :lfn
"""
sqlAvail = """INSERT IGNORE INTO wmbs_sub_files_available (subscription, fileid)
SELECT wmbs_subscription.id AS subscription,
wmbs_file_details.id AS fileid FROM wmbs_subscription
INNER JOIN wmbs_file_details ON
wmbs_file_details.lfn = :lfn
WHERE wmbs_subscription.fileset = :fileset
"""
def execute(self, file = None, fileset = None, conn = None,
transaction = False):
binds = []
availBinds = []
timestamp = int(time.time())
for fileLFN in file:
binds.append({"lfn": fileLFN, "fileset": fileset,
"insert_time": timestamp})
availBinds.append({"lfn": fileLFN, "fileset": fileset})
self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
self.dbi.processData(self.sqlAvail, availBinds, conn = conn,
transaction = transaction)
return
| 36.238095 | 84 | 0.580158 | 148 | 1,522 | 5.797297 | 0.371622 | 0.055944 | 0.104895 | 0.04662 | 0.118881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.336399 | 1,522 | 41 | 85 | 37.121951 | 0.849505 | 0.051905 | 0 | 0.137931 | 0 | 0 | 0.46899 | 0.078049 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.068966 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ac3b18e2b2f5eb9e5b456e6eaeeeea667be53a4 | 2,210 | py | Python | backend/src/Blueprints/Posts.py | mechaadi/OnlineLawyerSuite | a5debc337afe3f3693978177bf53d7646ae3536b | [
"MIT"
] | 3 | 2020-05-31T12:31:16.000Z | 2021-08-29T00:00:01.000Z | backend/src/Blueprints/Posts.py | mechaadi/OnlineLawyerSuite | a5debc337afe3f3693978177bf53d7646ae3536b | [
"MIT"
] | 21 | 2020-05-28T16:16:03.000Z | 2022-02-27T06:51:53.000Z | backend/src/Blueprints/Posts.py | mechaadi/OnlineLawyerSuite | a5debc337afe3f3693978177bf53d7646ae3536b | [
"MIT"
] | null | null | null | from flask import Flask, Blueprint, g, request
from src.model import Post, File
from src.db import db
from src.Middlewares.AuthMiddleware import *
from werkzeug.utils import secure_filename
import os
import dateutil.parser as dt
post_bp = Blueprint('post', __name__, url_prefix='/posts')
def respond(data, code):
responder = Responder()
return responder.respond(data, code)
def respond_error(msg, code):
responder = Responder()
return responder.respond_error(msg, code)
@post_bp.route('/test')
def test():
access_token = token_urlsafe(40)
return 'post ok!'
@post_bp.route('/create', methods=['POST'])
@check_auth
def _create_post():
body = request.json
print(body)
title = body['title']
content = body['content']
pub_at = dt.parse(body['pub_at'])
# tags = json.loads(json.dumps(body['tags']))
#images = json.loads(json.dumps(body['images']))
#print(body['images'])
p = Post(title=title, content=content, images=body['images'],pub_at=pub_at, user=g.user.id)
with db.atomic() as tx:
try:
p.save()
return respond(p.to_dict(), 201)
except Exception as e:
print(e)
return respond_error(str(e), 500)
@post_bp.route('/', methods=['GET'])
def _get_all_posts():
post = Post.select()
post = [p.to_dict() for p in post]
return respond(post, 201)
@post_bp.route('/<id>', methods=['GET'])
@check_auth
def _get_by_id(id):
post = Post.get_by_id(id)
return respond(post.to_dict(), 201)
@post_bp.route('/<id>', methods=['delete'])
@check_auth
def delete_post(id):
post = Post.get_or_none(Post.id == id)
if post is not None:
if post.user.id == g.user.id:
with db.atomic() as tx:
try:
deleted_post = post
q = Post.delete().where(Post.id == post.id)
q.execute()
return respond(deleted_post.to_dict(), 201)
except Exception as e:
return respond_error(str(e), 500)
else:
return respond_error("UNAUTHORIZED USER", 404)
else:
return respond_error("POST NOT FOUND", 404)
| 24.555556 | 95 | 0.609502 | 303 | 2,210 | 4.293729 | 0.310231 | 0.079939 | 0.042275 | 0.043044 | 0.257494 | 0.223674 | 0.120676 | 0.039969 | 0.039969 | 0 | 0 | 0.015729 | 0.252036 | 2,210 | 89 | 96 | 24.831461 | 0.771325 | 0.050226 | 0 | 0.241935 | 0 | 0 | 0.05364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112903 | false | 0 | 0.112903 | 0 | 0.403226 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ac6dc3dc3bb8ad5ce9781cd7166fbd71e9510f9 | 9,349 | py | Python | felpy/analysis/optics/complex/coherence.py | twguest/FELpy | 0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd | [
"Apache-2.0"
] | 1 | 2021-03-15T14:04:19.000Z | 2021-03-15T14:04:19.000Z | felpy/analysis/optics/complex/coherence.py | twguest/FELpy | 0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd | [
"Apache-2.0"
] | 2 | 2021-11-27T11:55:48.000Z | 2021-11-27T11:56:26.000Z | felpy/analysis/optics/complex/coherence.py | twguest/FELpy | 0ac9dd965b0d8e04dddbf2c9aef5ac137d1f0dfd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 15 14:24:28 2020
@author: twguest
"""
import os
import numpy as np
from time import time
from wpg import srwlib
from felpy.model.tools import radial_profile
#from wpg.wpg_uti_wf import get_axis
from tqdm import tqdm
from felpy.utils.job_utils import JobScheduler
#import wpg.srwlib as srwl
from wpg.srw import srwlpy as srwl
from felpy.utils.np_utils import memory_map, readMap
import multiprocessing as mp
from functools import partial
from scipy.sparse import csr_matrix
from felpy.model.materials.mirror_surface import binArray
def get_longitudinal_coherence(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True):
"""
Calculate the longitudinal correlation of each slice of a complex wavefront
of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal
interval defined by the slice_no
:param cfr: complex wavefield
:param slice_no: longitudinal index [int]
:returns g: complex degree of coherence
"""
A = np.roll(cfr, -slice_no, axis = 2)
B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1)
## DEGUB print(A[:,:,0] == wfr[:,:,i])
## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])])
if map_loc is not None:
mmap = memory_map(map_loc,
shape = cfr.shape,
dtype = 'complex64')
mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
else:
return ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
def get_longitudinal_coherence_new(slice_no, cfr, map_loc = None, bins = 1, VERBOSE = True):
"""
Calculate the longitudinal correlation of each slice of a complex wavefront
of shape [nx, ny, nz] against a single slice of shape [nx,ny] at longitudinal
interval defined by the slice_no
:param cfr: complex wavefield
:param slice_no: longitudinal index [int]
:returns g: complex degree of coherence
"""
A = np.roll(cfr, -slice_no, axis = 2)
B = np.repeat(cfr[:,:,slice_no][:, :, np.newaxis], cfr.shape[-1], axis=-1)
## DEGUB print(A[:,:,0] == wfr[:,:,i])
## DEBUG print([B[:,:,k] == wfr[:,:,i] for k in range(wfr.shape[-1])])
if map_loc is not None:
mmap = memory_map(map_loc,
shape = cfr.shape,
dtype = 'complex64')
mmap[:,:,slice_no] = ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
else:
return ((A*B.conjugate()).mean(axis = -1))/np.sqrt(
(abs(A)**2).mean(axis=-1)*(abs(B)**2).mean(axis = -1))
def get_coherence_time_new(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map",
bins = 1, VERBOSE = True):
"""
Calculate the coherence time of complex wavefield of shape
[nx, ny, nt].
Relevant for statistically stationary sources.
ref: Coherence properties of the radiation from X-ray free electron laser
:param cfr: complex wavefield
:param tstep: temporal step between slices
:returns tau: coherence time [s]
"""
mmap = memory_map(map_loc = map_loc,
shape = cfr.shape,
dtype = 'complex64')
nz0 = cfr.shape[-1]
if bins == 1:
nz1 = nz0
else:
cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 )
nz1 = cfr.shape[-1]
tStep *= (nz0/nz1)
g = np.zeros([*cfr.shape], dtype = 'complex64')
if VERBOSE:
print("Calculating Coherence Time")
if mpi:
processes = mp.cpu_count()//2
pool = mp.Pool(processes)
pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc),
range(cfr.shape[-1]))
g = readMap(map_loc, cfr.shape, dtype = 'complex64')
else:
for i in tqdm(range(cfr.shape[-1])):
g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr)
tau = (abs(g)**2).sum(axis = -1)[0,0]
if VERBOSE:
print("\n")
print(tau)
print("Time Step: {} fs".format(tStep*1e15))
print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep))
del mmap
os.remove(map_loc)
return tau*tStep
def get_coherence_time(cfr, tStep, mpi = False, map_loc = "/tmp/coherence_map",
bins = 1, VERBOSE = True):
"""
Calculate the coherence time of complex wavefield of shape
[nx, ny, nt].
Relevant for statistically stationary sources.
ref: Coherence properties of the radiation from X-ray free electron laser
:param cfr: complex wavefield
:param tstep: temporal step between slices
:returns tau: coherence time [s]
"""
mmap = memory_map(map_loc = map_loc,
shape = cfr.shape,
dtype = 'complex64')
nz0 = cfr.shape[-1]
if bins == 1:
nz1 = nz0
else:
cfr = binArray(cfr, axis = -1, binstep = nz0//bins, binsize = 1 )
nz1 = cfr.shape[-1]
tStep *= (nz0/nz1)
g = np.zeros([*cfr.shape], dtype = 'complex64')
if VERBOSE:
print("Calculating Coherence Time")
if mpi:
processes = mp.cpu_count()//2
pool = mp.Pool(processes)
pool.map(partial(get_longitudinal_coherence, cfr = cfr, map_loc = map_loc),
range(cfr.shape[-1]))
g = readMap(map_loc, cfr.shape, dtype = 'complex64')
else:
for i in tqdm(range(cfr.shape[-1])):
g[:,:,i] = get_longitudinal_coherence(slice_no = i, cfr = cfr)
tau = (abs(g)**2).sum(axis = -1)[0,0]
print("g", np.max(g))
if VERBOSE:
print("\n")
print(tau)
print("Time Step: {} fs".format(tStep*1e15))
print("Coherence Time: {:.2e} fs".format(tau*1e15*tStep))
del mmap
os.remove(map_loc)
return tau*tStep, g
def get_coherence_time_wpg(wfr, mpi = False, VERBOSE = True):
srwl.SetRepresElecField(wfr._srwl_wf, 't')
time_step = (wfr.params.Mesh.sliceMax - wfr.params.Mesh.sliceMin)/wfr.params.Mesh.nSlices
return get_coherence_time(wfr.as_complex_array(), time_step, mpi = mpi)
def get_coherence_len(wfr, dx, dy, VERBOSE = True):
"""
Calculate coherence length of a complex wavefield of shape
[nx, ny. nz]
:param wfr: complex wavefield
:param dx: horizontal pixel size
:param dy: vertical pixel size
:returns Jd: complex degree of coherence
:returns clen: coherence length [m]
"""
profile, r = get_complex_radial_profile(wfr)
nt = wfr.shape[-1]
J = np.dot(profile, profile.T.conjugate())/ nt
II = np.abs(np.diag(J)) # intensity as the main diagonal
print(J.shape)
J /= II**0.5 * II[:, np.newaxis]**0.5
Jd = np.abs(np.diag(np.fliplr(J))) # DoC as the cross-diagonal
lm = np.arange(Jd.shape[0])
lm = lm[(lm >= Jd.shape[0]//2) & (Jd[lm] < 0.5)]
rstep = np.sqrt((dx)**2 + (dy)**2)
try:
lm = lm[0] - Jd.shape[0]//2
except(IndexError):
lm = np.inf
clen = lm*rstep
if VERBOSE:
print("Radial Coherence Length: {:.2f} um".format(clen*1e6))
return clen
def get_transverse_doc(wfr, VERBOSE = True):
"""
get transverse degree of coherence of the wavefront across each of the
transverse dimensions slices
"""
p, r = get_complex_radial_profile(wfr)
nt = wfr.shape[-1]
J = np.dot(p, p.T.conjugate())/nt
tdoc = np.diag(np.dot(J, J)).sum() / np.diag(J).sum()**2
if VERBOSE:
print("Transverse Degree of Coherence: {:.4f}".format(tdoc.real))
return tdoc
def get_complex_radial_profile(wfr):
"""
Calculate the radial profile of a complex array by azimuthal averaging:
I_{radial}(R) = \int_0^R \frac{I(r)2\pi r}{\pi R^2} dr
:param wfr: complex wavefield [np array]
:returns prof: radial profile
"""
r = radial_profile(wfr[:,:,0].real, [wfr.shape[0]//2,wfr.shape[1]//2])[1]
r = np.diag(r).copy()
r[:r.shape[0]//2] *= -1
rp = np.stack([radial_profile(wfr[:,:,i].real,
[wfr.shape[0]//2,wfr.shape[1]//2])[0]
+ radial_profile(wfr[:,:,i].imag,
[wfr.shape[0]//2,wfr.shape[1]//2])[0]*1j
for i in range(wfr.shape[-1])])
prof = np.moveaxis(rp, 0, -1)
return prof, r
def coherent_test(wfr):
tstep = get_axis(wfr, axis = 't')
tstep = wfr.get_temporal_resolution()
xstep, ystep = wfr.get_spatial_resolution()
wfr = wfr.as_complex_array()
tau = get_coherence_time(wfr, tstep, VERBOSE=True)
clen = get_coherence_len(wfr, xstep, ystep, VERBOSE=True)
tdoc = get_transverse_doc(wfr, VERBOSE=True)
return tau, clen, tdoc
if __name__ == "__main__":
pass | 27.578171 | 93 | 0.568403 | 1,287 | 9,349 | 4.035742 | 0.181041 | 0.023104 | 0.020793 | 0.033885 | 0.621679 | 0.608587 | 0.591837 | 0.591837 | 0.591837 | 0.57836 | 0 | 0.02453 | 0.289229 | 9,349 | 339 | 94 | 27.578171 | 0.757111 | 0.233501 | 0 | 0.56051 | 0 | 0 | 0.048071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057325 | false | 0.006369 | 0.082803 | 0 | 0.197452 | 0.089172 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ac87bf72cc8a3c3039638b11a435af514a8fc27 | 911 | py | Python | training_program_2019/core_python/basic/threading_join.py | jeffrey-zhang/learn | 7d9d53f955f0424ad4c68f69d5538867e5b7fa98 | [
"Apache-2.0"
] | 1 | 2020-10-12T01:23:51.000Z | 2020-10-12T01:23:51.000Z | training_program_2019/core_python/basic/threading_join.py | jeffrey-zhang/learn | 7d9d53f955f0424ad4c68f69d5538867e5b7fa98 | [
"Apache-2.0"
] | 1 | 2020-10-11T10:38:21.000Z | 2020-10-11T10:38:21.000Z | training_program_2019/core_python/basic/threading_join.py | jeffrey-zhang/learn | 7d9d53f955f0424ad4c68f69d5538867e5b7fa98 | [
"Apache-2.0"
] | 1 | 2020-09-07T07:22:54.000Z | 2020-09-07T07:22:54.000Z | import threading
import time
products = []
condition = threading.Condition()
class consumer(threading.Thread):
def consume(self):
global condition
global products
condition.acquire()
if len(products) == 0:
condition.wait()
print('consumer is notified: no product to consume')
products.pop()
print("consumer notification: consume 1 product")
print('consumer notification: there are ' + len(products) +" left that can be consume")
condition.notify()
condition.release()
def run(self):
for i in range(0,20):
time.sleep(4)
self.consume()
class Producer(threading.Thread):
def produce(self):
global condition
global products
condition.acquire()
if len(products) == 10:
condition.wait()
print('consumer notified') | 24.621622 | 95 | 0.594951 | 95 | 911 | 5.705263 | 0.473684 | 0.095941 | 0.066421 | 0.092251 | 0.228782 | 0.228782 | 0.228782 | 0.228782 | 0.228782 | 0.228782 | 0 | 0.012658 | 0.306257 | 911 | 37 | 96 | 24.621622 | 0.844937 | 0 | 0 | 0.275862 | 0 | 0 | 0.173246 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0 | 0.241379 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ac9c5a18f03b6f72c0a1607e900467a67c17aa0 | 1,048 | py | Python | tests/performance/test_mm_lazy_eval.py | varun19299/FeatherMap | a3991ce48eed98584bc12d6ddcb6409ef3db5d60 | [
"MIT"
] | 14 | 2020-11-05T00:24:40.000Z | 2022-03-30T15:22:31.000Z | tests/performance/test_mm_lazy_eval.py | varun19299/FeatherMap | a3991ce48eed98584bc12d6ddcb6409ef3db5d60 | [
"MIT"
] | 4 | 2020-10-01T00:46:39.000Z | 2021-02-26T23:38:09.000Z | tests/performance/test_mm_lazy_eval.py | varun19299/FeatherMap | a3991ce48eed98584bc12d6ddcb6409ef3db5d60 | [
"MIT"
] | 2 | 2020-11-09T13:09:10.000Z | 2021-02-18T11:09:32.000Z | import torch
from feathermap.utils import timed
from math import sqrt
dim_in = 2 ** 14
dim_out = 2 ** 4
A = torch.randn(dim_in, dim_out)
B = torch.randn(dim_out, dim_in)
C = torch.rand(dim_in, dim_in)
D = torch.rand(dim_in, dim_in)
E = torch.rand(1, dim_out)
F = torch.rand(dim_out, dim_in)
G = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in)))
H = torch.rand(int(sqrt(dim_in)), int(sqrt(dim_in)))
@timed
def mam(a, b):
for _ in range(10000):
out = torch.mm(a, b)
return out
def loop(a, b):
for i in range(a.size(0)):
for j in range(b.size(1)):
yield a[i, :] @ b[:, j]
def loop2(a, b):
for i in range(a.size(0)):
for j in range(b.size(1)):
yield 1
def tmm(a, b):
c = torch.mm(a, b).view(-1, 1)
return iter(c)
@timed
def run(c, dim_in):
d = torch.empty(dim_in ** 2)
for i in range(d.numel()):
d[i] = next(c)
mam(E, F) # about 23% faster
mam(G, H)
# run(loop(A, B), dim_in) # 739
# run(loop2(A, B), dim_in) # 254
# run(tmm(A, B), dim_in) # 289
| 18.714286 | 52 | 0.578244 | 208 | 1,048 | 2.802885 | 0.254808 | 0.145798 | 0.077187 | 0.082333 | 0.319039 | 0.319039 | 0.253859 | 0.253859 | 0.253859 | 0.253859 | 0 | 0.040353 | 0.243321 | 1,048 | 55 | 53 | 19.054545 | 0.69483 | 0.101145 | 0 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.083333 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad04a1e5c739e7d4690a2753dc470af7567beec | 785 | py | Python | PythonWeb/example/App/urls.py | JimouChen/python-application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
] | 1 | 2020-08-09T12:47:27.000Z | 2020-08-09T12:47:27.000Z | PythonWeb/example/App/urls.py | JimouChen/Python_Application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
] | null | null | null | PythonWeb/example/App/urls.py | JimouChen/Python_Application | b7b16506a17e2c304d1c5fabd6385e96be211c56 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from django.urls import path
from App import views
app_name = 'App'
urlpatterns = [
path('', views.home, name='home'),
# 增删改
path('cud/', views.handle_data, name='handle_data'),
# 查询
path('search/', views.find_data, name='search'),
# 使用原生sql
path('rawsql/', views.raw_sql, name='raw_sql'),
# 自定义管理器,看自己需要用不用
path('manager/', views.my_manager, name='my_manager'),
# 注册页
path('register/', views.handle_register, name='register'),
# 登录页
path('login/', views.handle_login, name='login'),
# 显示用户信息
path('show/', views.show_msg, name='show'),
# 电影显示
path('movie/', views.show_movie, name='movie'),
# django自带的分页功能
path('movie_page/', views.show_movie_page, name='movie_page'),
]
| 27.068966 | 66 | 0.63949 | 102 | 785 | 4.764706 | 0.372549 | 0.067901 | 0.057613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185987 | 785 | 28 | 67 | 28.035714 | 0.760563 | 0.081529 | 0 | 0 | 0 | 0 | 0.19128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1875 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad2462093f3bfe8307d27ce00eeee4b819dafcf | 5,098 | py | Python | carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py | umateusz/ros-bridge | e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa | [
"MIT"
] | null | null | null | carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py | umateusz/ros-bridge | e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa | [
"MIT"
] | null | null | null | carla_walker_agent/src/carla_walker_agent/carla_walker_agent.py | umateusz/ros-bridge | e1a99d94eca9fa82c7bfb8417d2282ef6939d8fa | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Agent for Walker
"""
import math
from nav_msgs.msg import Path, Odometry
from std_msgs.msg import Float64
from geometry_msgs.msg import Pose, Vector3
from carla_msgs.msg import CarlaWalkerControl
from ros_compatibility import (
CompatibleNode,
QoSProfile,
ros_ok,
ROSInterruptException,
ros_init)
import os
ROS_VERSION = int(os.environ['ROS_VERSION'])
if ROS_VERSION == 1:
import rospy
elif ROS_VERSION == 2:
import time
import threading
class CarlaWalkerAgent(CompatibleNode):
"""
walker agent
"""
# minimum distance to target waypoint before switching to next
MIN_DISTANCE = 0.5
def __init__(self):
"""
Constructor
"""
super(CarlaWalkerAgent, self).__init__('carla_walker_agent')
role_name = self.get_param("role_name", "ego_vehicle")
self._target_speed = self.get_param("target_speed", 2.0)
self._route_assigned = False
self._waypoints = []
self._current_pose = Pose()
self.on_shutdown(self._on_shutdown)
# wait for ros bridge to create relevant topics
try:
self.wait_for_one_message(
"/carla/{}/odometry".format(role_name), Odometry)
except ROSInterruptException as e:
if not ros_ok:
raise e
self._odometry_subscriber = self.create_subscriber(
Odometry, "/carla/{}/odometry".format(role_name), self.odometry_updated)
self.control_publisher = self.new_publisher(
CarlaWalkerControl, "/carla/{}/walker_control_cmd".format(role_name),
QoSProfile(depth=1, durability=False))
self._route_subscriber = self.create_subscriber(
Path, "/carla/{}/waypoints".format(role_name), self.path_updated)
self._target_speed_subscriber = self.create_subscriber(
Float64, "/carla/{}/target_speed".format(role_name), self.target_speed_updated)
def _on_shutdown(self):
"""
callback on shutdown
"""
self.loginfo("Shutting down, stopping walker...")
self.control_publisher.publish(CarlaWalkerControl()) # stop
def target_speed_updated(self, target_speed):
"""
callback on new target speed
"""
self.loginfo("New target speed received: {}".format(target_speed.data))
self._target_speed = target_speed.data
def path_updated(self, path):
"""
callback on new route
"""
self.loginfo("New plan with {} waypoints received. Assigning plan...".format(
len(path.poses)))
self.control_publisher.publish(CarlaWalkerControl()) # stop
self._waypoints = []
for elem in path.poses:
self._waypoints.append(elem.pose)
def odometry_updated(self, odo):
"""
callback on new odometry
"""
self._current_pose = odo.pose.pose
def run(self):
"""
Control loop
:return:
"""
loop_frequency = 20
if ROS_VERSION == 1:
r = rospy.Rate(loop_frequency)
self.loginfo("Starting run loop")
while ros_ok():
if self._waypoints:
control = CarlaWalkerControl()
direction = Vector3()
direction.x = self._waypoints[0].position.x - self._current_pose.position.x
direction.y = self._waypoints[0].position.y - self._current_pose.position.y
direction_norm = math.sqrt(direction.x**2 + direction.y**2)
if direction_norm > CarlaWalkerAgent.MIN_DISTANCE:
control.speed = self._target_speed
control.direction.x = direction.x / direction_norm
control.direction.y = direction.y / direction_norm
else:
self._waypoints = self._waypoints[1:]
if self._waypoints:
self.loginfo("next waypoint: {} {}".format(
self._waypoints[0].position.x, self._waypoints[0].position.y))
else:
self.loginfo("Route finished.")
self.control_publisher.publish(control)
try:
if ROS_VERSION == 1:
r.sleep()
elif ROS_VERSION == 2:
# TODO: use rclpy.Rate, not working yet
time.sleep(1 / loop_frequency)
except ROSInterruptException:
pass
def main(args=None):
"""
main function
:return:
"""
ros_init(args)
controller = CarlaWalkerAgent()
if ROS_VERSION == 2:
spin_thread = threading.Thread(target=controller.spin, daemon=True)
spin_thread.start()
try:
controller.run()
finally:
del controller
print("Done")
if __name__ == "__main__":
main()
| 29.988235 | 92 | 0.594547 | 549 | 5,098 | 5.298725 | 0.311475 | 0.049158 | 0.030938 | 0.030251 | 0.095222 | 0.052252 | 0 | 0 | 0 | 0 | 0 | 0.008745 | 0.304629 | 5,098 | 169 | 93 | 30.16568 | 0.811848 | 0.100235 | 0 | 0.156863 | 0 | 0 | 0.078887 | 0.0114 | 0 | 0 | 0 | 0.005917 | 0 | 1 | 0.068627 | false | 0.009804 | 0.098039 | 0 | 0.186275 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad5b9de70a9e9b65388bd37817ad835c2ba9f1f | 5,125 | py | Python | megyr/config_validation.py | ExcaliburZero/megyr | f5543cebe0562c78b5d3c710bd6f11c0efbff25b | [
"MIT"
] | 1 | 2020-11-17T20:35:09.000Z | 2020-11-17T20:35:09.000Z | megyr/config_validation.py | ExcaliburZero/megyr | f5543cebe0562c78b5d3c710bd6f11c0efbff25b | [
"MIT"
] | 17 | 2018-09-25T18:37:17.000Z | 2020-03-31T03:54:36.000Z | megyr/config_validation.py | ExcaliburZero/megyr | f5543cebe0562c78b5d3c710bd6f11c0efbff25b | [
"MIT"
] | null | null | null | from typing import Any, Dict, List
def validate_config(config: Dict[str, Any]) -> List[str]:
errors: List[str] = []
assert_to_list(
errors,
"input" in config,
'[no_input] Could not find "input" section in config. "input" section must be present in order to run MESA or GYRE.',
)
assert_to_list(
errors,
"stages" in config,
'[no_stages] Could not find "stages" section in config. "stages" section must be present in order to run MESA or GYRE.',
)
assert_to_list(
errors,
nested_in(config, ["input", "mesa_configs"]),
'[no_mesa_configs] Could not find "mesa_configs" setting in "input" section in config. The "mesa_configs" setting must be present in order to run MESA.',
)
if should_run_gyre(config):
assert_to_list(
errors,
nested_in(config, ["stages", "gyre_params"]),
'[no_gyre_params] Could not find "gyre_params" setting in "stages" section of config. GYRE is set to run, but needs this setting to know what value combinations to try.',
)
else:
# Check for GYRE settings present when GYRE is not set to run
gyre_missing_msg = '[gyre_not_enabled] Found "{}" setting in "{}" section of config even though GYRE is not enabled. "gyre_config" in the "input" section must be specified in order to run GYRE.'
assert_to_list(
errors,
not nested_in(config, ["output", "gyre_oscillations_summary_file"]),
gyre_missing_msg.format("gyre_oscillations_summary_file", "output"),
)
assert_to_list(
errors,
not nested_in(config, ["settings", "gyre_location"]),
gyre_missing_msg.format("gyre_location", "settings"),
)
assert_to_list(
errors,
not nested_in(config, ["settings", "gyre_mp_threads"]),
gyre_missing_msg.format("gyre_mp_threads", "settings"),
)
assert_to_list(
errors,
not nested_in(config, ["stages", "gyre_params"]),
gyre_missing_msg.format("gyre_params", "stages"),
)
assert_to_list(
errors,
not nested_in(config, ["stages", "gyre_derived"]),
gyre_missing_msg.format("gyre_derived", "stages"),
)
return errors
def set_defaults(config: Dict[str, Any]) -> None:
### Output
if not nested_in(config, ["output", "output_dir"]):
nested_put(config, ["output", "output_dir"], "out")
if not nested_in(config, ["output", "mesa_profile_summary_file"]):
nested_put(
config,
["output", "mesa_profile_summary_file"],
"mesa_profile_attributes.csv",
)
### Settings
if not nested_in(config, ["settings", "mesa_star_location"]):
nested_put(config, ["settings", "mesa_star_location"], "star")
if not nested_in(config, ["settings", "gyre_location"]):
nested_put(config, ["settings", "gyre_location"], "$GYRE_DIR/bin/gyre")
if not nested_in(config, ["settings", "gyre_mp_threads"]) and nested_in(
config, ["settings", "mesa_mp_threads"]
):
nested_put(
config,
["settings", "gyre_mp_threads"],
config["settings"]["mesa_mp_threads"],
)
def assert_to_list(errors: List[str], condition: bool, message: str) -> None:
if not condition:
errors.append((message))
def should_run_gyre(config: Dict[str, Any]) -> bool:
return nested_in(config, ["input", "gyre_config"])
def nested_in(config: Dict[str, Any], nested_keys: List[str]) -> bool:
"""
Checks if the given nested keys are within the given dict. Returns false if
any of the intermediate keys or the final key are not nested in the dict.
>>> config = {}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
False
>>> config = {"settings": {}}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
False
>>> config = {"settings": {"gyre_mp_threads": 4}}
>>> nested_in(config, ["settings", "gyre_mp_threads"])
True
"""
for key in nested_keys:
if key in config:
config = config[key]
else:
return False
return True
def nested_put(config: Dict[str, Any], nested_keys: List[str], value: Any) -> None:
"""
Puts the given nested key value pair into the given dict. If any part of
the nested key structure does not yet exist, then it will be created in the
process.
>>> config = {}
>>> nested_put(config, ["key"], "value")
>>> config["key"]
'value'
>>> config = {}
>>> nested_put(config, ["settings", "gyre_mp_threads"], 2)
>>> config["settings"]["gyre_mp_threads"]
2
"""
if len(nested_keys) == 0:
raise Exception("Invalid number of nested keys.")
if len(nested_keys) == 1:
config[nested_keys[0]] = value
else:
next_key = nested_keys[0]
if next_key not in config:
config[next_key] = {}
nested_put(config[next_key], nested_keys[1:], value)
| 33.717105 | 202 | 0.606244 | 649 | 5,125 | 4.563945 | 0.181818 | 0.067522 | 0.085078 | 0.06077 | 0.435517 | 0.334571 | 0.272451 | 0.204254 | 0.144497 | 0.139095 | 0 | 0.002125 | 0.265366 | 5,125 | 151 | 203 | 33.940397 | 0.784595 | 0.169561 | 0 | 0.263158 | 0 | 0.052632 | 0.331964 | 0.0331 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.063158 | false | 0 | 0.010526 | 0.010526 | 0.115789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad705963a4ae17d0227c03011bc22494dcbdf66 | 674 | py | Python | cdhweb/pages/tests/test_context_processors.py | bwhicks/cdh-web | d6002dc1933a4d6e97f5459aafc9ab92cb1f8050 | [
"Apache-2.0"
] | 1 | 2017-11-21T16:02:33.000Z | 2017-11-21T16:02:33.000Z | cdhweb/pages/tests/test_context_processors.py | bwhicks/cdh-web | d6002dc1933a4d6e97f5459aafc9ab92cb1f8050 | [
"Apache-2.0"
] | 367 | 2017-08-14T16:05:41.000Z | 2021-11-03T15:29:18.000Z | cdhweb/pages/tests/test_context_processors.py | bwhicks/cdh-web | d6002dc1933a4d6e97f5459aafc9ab92cb1f8050 | [
"Apache-2.0"
] | 5 | 2017-09-08T21:08:49.000Z | 2020-10-02T04:39:37.000Z | import pytest
from wagtail.core.models import Page
from cdhweb.pages.context_processors import page_intro
from cdhweb.pages.models import LinkPage, PageIntro
@pytest.mark.django_db
def test_page_intro(rf):
root = Page.objects.get(title="Root")
link_page = LinkPage(title="Students", link_url="people/students")
root.add_child(instance=link_page)
intro = PageIntro.objects.create(
page=link_page, paragraph="<p>We have great students</p>"
)
# should find a page intro for students
assert page_intro(rf.get("/people/students/")) == {"page_intro": intro}
# but not not for staff
assert page_intro(rf.get("/people/staff/")) == {}
| 32.095238 | 75 | 0.718101 | 96 | 674 | 4.90625 | 0.46875 | 0.133758 | 0.070064 | 0.072187 | 0.110403 | 0.110403 | 0 | 0 | 0 | 0 | 0 | 0 | 0.15727 | 674 | 20 | 76 | 33.7 | 0.829225 | 0.087537 | 0 | 0 | 0 | 0 | 0.158497 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.071429 | false | 0 | 0.285714 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad8ae63fb76ddfd06eb6578ef0cef5a87d9b13a | 641 | py | Python | tri_selection.py | Erwanexyz/Python | 1197c0a27530b60e6cbe048758bfe86f0e159e95 | [
"MIT"
] | 1 | 2017-09-07T09:14:55.000Z | 2017-09-07T09:14:55.000Z | tri_selection.py | Erwanexyz/Python | 1197c0a27530b60e6cbe048758bfe86f0e159e95 | [
"MIT"
] | null | null | null | tri_selection.py | Erwanexyz/Python | 1197c0a27530b60e6cbe048758bfe86f0e159e95 | [
"MIT"
] | null | null | null | def tri_selection(tableau):
'''tri_selection (list(object) -> list(object)): trie un tableau'''
# Initialisation
'''taille (int) : taille du tableau'''
taille = len(tableau)
# Début du traitement
# Pour chaque élément tableau[i] du tableau
for i in range(taille):
# Pour chaque j allant de l'élément actuel tableau[i] jusqu'à la fin du
# tableau, on vérifie si c'est le plus petit
for j in range(i, taille):
if tableau[j] < tableau[i]:
tableau[i], tableau[j] = tableau[j], tableau[i]
return tableau
print(tri_selection([3,2,1,4,8,4,10,9,8,32,91])) | 37.705882 | 79 | 0.609984 | 96 | 641 | 4.041667 | 0.53125 | 0.103093 | 0.115979 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029536 | 0.26053 | 641 | 17 | 80 | 37.705882 | 0.78903 | 0.393136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ad8d19aeb7a5e334f966fd5933774700a903b04 | 5,297 | py | Python | main.py | AKBakshay/DCNet | 94d30ae44c95a8db7f7474fcf8cf63347271c1cb | [
"MIT"
] | 3 | 2021-04-18T07:56:18.000Z | 2021-08-15T11:30:25.000Z | main.py | AKBakshay/DCNet | 94d30ae44c95a8db7f7474fcf8cf63347271c1cb | [
"MIT"
] | null | null | null | main.py | AKBakshay/DCNet | 94d30ae44c95a8db7f7474fcf8cf63347271c1cb | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
import torch
import torch.utils.data
import yaml
from torchvision import transforms
import src.config.config as config
from src.model.nn.dcnet import DCNet
from src.test.predictor import Predictor
from src.test.tester import Tester
from src.train.trainer import Trainer
def train(cfg):
# -------------------- data ------------------------
training_data_transform = transforms.Compose(
[
transforms.RandomCrop(cfg["train"]["crop_size"]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
validation_transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
validation_transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
validation_data_transform = transforms.Compose(validation_transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# ------------------ training setup -------------------------
criterion = torch.nn.MSELoss(reduction="mean")
optimizer = torch.optim.RMSprop(
model.parameters(),
lr=cfg["train"]["learning_rate"],
alpha=cfg["train"]["alpha"],
momentum=cfg["train"]["momentum"],
)
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=cfg["train"]["scheduler_steps"], gamma=cfg["train"]["gamma"]
)
model.train()
trainer = Trainer(
model=model,
cuda=cfg["basic"]["cuda"],
criterion=criterion,
optimizer=optimizer,
lr_scheduler=exp_lr_scheduler,
train_crops=cfg["train"]["crops"],
crop_size=cfg["train"]["crop_size"],
epochs=cfg["train"]["epochs"],
training_dataset_path=cfg["train"]["data_path"],
validation_dataset_path=cfg["validate"]["data_path"],
train_transform=training_data_transform,
valid_transform=validation_data_transform,
batch_size=cfg["train"]["batch_size"],
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
atm_light=cfg["env"]["atm_light"],
t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["basic"]["uint8_transform"],
save_path=cfg["output"]["weight_dir"],
)
trainer.train()
def test(cfg):
# -------------------- data ------------------------
transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
data_transform = transforms.Compose(transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# ------------------ test ---------------------------
criterion = torch.nn.MSELoss(reduction="mean")
model.eval()
tester = Tester(
model=model,
cuda=cfg["basic"]["cuda"],
criterion=criterion,
test_dataset_path=cfg["test"]["data_path"],
test_transform=data_transform,
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
atm_light=cfg["env"]["atm_light"],
random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["basic"]["uint8_transform"],
)
tester.test()
def predict(cfg):
# -------------------- data ------------------------
transformations = [transforms.ToTensor()]
if cfg["image"]["size_reduction"]:
transformations.append(transforms.Resize(size=cfg["image"]["max_size"]))
data_transform = transforms.Compose(transformations)
# -------------------- model -----------------------
model = DCNet()
model.initialize(cfg["basic"]["load_weight"], cfg["basic"]["cuda"]) # define
# -------------------- prediction ------------------
model.eval()
predictor = Predictor(
model=model,
transform=data_transform,
dataset=cfg["predict"]["data_path"],
atm_light=cfg["env"]["atm_light"],
add_ext_haze=cfg["predict"]["add_ext_haze"],
t_low=cfg["env"]["transmission_map"]["low"],
t_high=cfg["env"]["transmission_map"]["high"],
t_map_random_sampler=cfg["env"]["transmission_map"]["random_sampler"],
uint8_transform=cfg["predict"]["uint8_transform"],
cuda=cfg["basic"]["cuda"],
prediction_dir=cfg["predict"]["save_dir"],
)
predictor.predict()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--train", action="store_true")
parser.add_argument("--test", action="store_true")
parser.add_argument("--predict", action="store_true")
args = parser.parse_args()
with open(config.path["CONFIG_PATH"], "r") as ymlfile:
cfg = yaml.load(ymlfile)
if args.train:
train(cfg)
if args.test:
test(cfg)
if args.predict:
predict(cfg)
| 32.496933 | 92 | 0.570512 | 532 | 5,297 | 5.468045 | 0.195489 | 0.024751 | 0.055689 | 0.064971 | 0.465796 | 0.465796 | 0.411482 | 0.411482 | 0.36198 | 0.342729 | 0 | 0.00144 | 0.21314 | 5,297 | 162 | 93 | 32.697531 | 0.696497 | 0.092316 | 0 | 0.347826 | 0 | 0 | 0.184546 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026087 | false | 0 | 0.086957 | 0 | 0.113043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7addb1ac9be726107051f2594c1af6bf1d661239 | 9,183 | py | Python | nitroml/automl/ensemble_selection/subpipeline.py | google/nitroml | 5eabdbe6de85ff7fdae4fefda7547c0c031f9431 | [
"Apache-2.0"
] | 43 | 2020-09-13T18:07:15.000Z | 2022-01-05T19:05:28.000Z | nitroml/automl/ensemble_selection/subpipeline.py | google/nitroml | 5eabdbe6de85ff7fdae4fefda7547c0c031f9431 | [
"Apache-2.0"
] | 4 | 2020-09-14T13:15:09.000Z | 2021-11-21T11:21:13.000Z | nitroml/automl/ensemble_selection/subpipeline.py | google/nitroml | 5eabdbe6de85ff7fdae4fefda7547c0c031f9431 | [
"Apache-2.0"
] | 5 | 2020-09-14T13:03:04.000Z | 2021-10-21T01:55:48.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""An Ensemble Selection subpipeline for tabular datasets."""
import json
import os
from typing import List, Optional, Tuple
from absl import logging
from nitroml import subpipeline
from nitroml.automl.ensemble_selection.lib import ensemble_selection as es_lib
import tensorflow as tf
from tfx import types
from tfx.dsl.component.experimental.annotations import InputArtifact
from tfx.dsl.component.experimental.annotations import OutputArtifact
from tfx.dsl.component.experimental.annotations import Parameter
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.components.base import base_component
from tfx.types import standard_artifacts
from tfx.utils import path_utils
from google.protobuf import text_format
from nitroml.protos import problem_statement_pb2 as ps_pb2
class EnsembleSelection(subpipeline.Subpipeline):
"""An Ensemble Selection subpipeline for tabular datasets."""
def __init__(self,
problem_statement: ps_pb2.ProblemStatement,
examples: types.Channel,
models: List[types.Channel],
evaluation_split_name: str,
ensemble_size: int,
metric: Optional[tf.keras.metrics.Metric] = None,
goal: Optional[str] = None,
instance_name: Optional[str] = None):
"""Constructs an AutoTrainer subpipeline.
Args:
problem_statement: ProblemStatement proto identifying the task.
examples: A Channel of 'Example' artifact type produced from an upstream
The source of examples that are used in evaluation (required).
models: A List of Channels of 'standard_artifact.Model' type to use as
the library of base models in the ensemble selection algorithm.
evaluation_split_name: String name of the evaluation split in the
`examples` artifact to use for evaluation. For examples, 'eval'.
ensemble_size: Maximum number of models (with replacement) to select. This
is the number of rounds (iterations) for which the ensemble selection
algorithm will run. The number of models in the final ensemble will be
at most ensemble_size.
metric: Optional TF Keras Metric to optimize for during ensemble
selection. When `None`, the `problem_statement` is used to determine
the metric and goal.
goal: Optional string 'maximize' or 'minimize' depending on the goal of
the metric. When `None`, the `problem_statement` is used to determine
the metric and goal.
instance_name: Optional unique instance name. Necessary iff multiple
EnsembleSelection subpipelines are declared in the same pipeline.
Raises:
ValueError: When a required param is not supplied.
"""
if not metric and not goal:
metric, goal = self._determine_metric_and_goal(problem_statement)
input_models = {f'input_model{i}': model for i, model in enumerate(models)}
self._instance_name = instance_name
self._ensemble_selection = ensemble_selection(
problem_statement=text_format.MessageToString(
message=problem_statement, as_utf8=True),
examples=examples,
evaluation_split_name=evaluation_split_name,
ensemble_size=ensemble_size,
metric=json.dumps(tf.keras.metrics.serialize(metric)),
goal=goal,
instance_name=instance_name,
**input_models,
)
@property
def id(self) -> str:
"""Returns the AutoTrainer sub-pipeline's unique ID."""
autotrainer_instance_name = 'EnsembleSelection'
if self._instance_name:
autotrainer_instance_name = f'{autotrainer_instance_name}.{self._instance_name}'
return autotrainer_instance_name
@property
def components(self) -> List[base_component.BaseComponent]:
"""Returns the AutoTrainer sub-pipeline's constituent components."""
return [self._ensemble_selection]
@property
def outputs(self) -> subpipeline.SubpipelineOutputs:
"""Return the AutoTrainer sub-pipeline's outputs."""
return subpipeline.SubpipelineOutputs(
{'model': self._ensemble_selection.outputs.model})
def _determine_metric_and_goal(
self, problem_statement: ps_pb2.ProblemStatement
) -> Tuple[tf.keras.metrics.Metric, str]:
task_type = problem_statement.tasks[0].type
if task_type.HasField('multi_class_classification'):
return tf.keras.metrics.SparseCategoricalAccuracy(
name='accuracy'), 'maximize'
if task_type.HasField('binary_classification'):
return tf.keras.metrics.AUC(name='auc_roc', curve='ROC'), 'maximize'
if task_type.HasField('one_dimensional_regression'):
return tf.keras.metrics.MeanSquaredError(name='mse'), 'minimize'
raise ValueError('Invalid task type: {}'.format(task_type))
# pytype: disable=wrong-arg-types
@component
def ensemble_selection(
problem_statement: Parameter[str],
examples: InputArtifact[standard_artifacts.Examples],
evaluation_split_name: Parameter[str],
ensemble_size: Parameter[int],
metric: Parameter[str],
goal: Parameter[str],
model: OutputArtifact[standard_artifacts.Model],
input_model0: InputArtifact[standard_artifacts.Model] = None,
input_model1: InputArtifact[standard_artifacts.Model] = None,
input_model2: InputArtifact[standard_artifacts.Model] = None,
input_model3: InputArtifact[standard_artifacts.Model] = None,
input_model4: InputArtifact[standard_artifacts.Model] = None,
input_model5: InputArtifact[standard_artifacts.Model] = None,
input_model6: InputArtifact[standard_artifacts.Model] = None,
input_model7: InputArtifact[standard_artifacts.Model] = None,
input_model8: InputArtifact[standard_artifacts.Model] = None,
input_model9: InputArtifact[standard_artifacts.Model] = None,
) -> None: # pytype: disable=invalid-annotation,wrong-arg-types
"""Runs the SimpleML trainer as a separate component."""
problem_statement = text_format.Parse(problem_statement,
ps_pb2.ProblemStatement())
input_models = [
input_model0, input_model1, input_model2, input_model3, input_model4,
input_model5, input_model6, input_model7, input_model8, input_model9
]
saved_model_paths = {
str(i): path_utils.serving_model_path(model.uri)
for i, model in enumerate(input_models)
if model
}
logging.info('Saved model paths: %s', saved_model_paths)
label_key = _label_key(problem_statement)
es = es_lib.EnsembleSelection(
problem_statement=problem_statement,
saved_model_paths=saved_model_paths,
ensemble_size=ensemble_size,
metric=tf.keras.metrics.deserialize(json.loads(metric)),
goal=goal)
es.fit(*_data_from_examples(
examples_path=os.path.join(examples.uri, evaluation_split_name),
label_key=label_key))
logging.info('Selected ensemble weights: %s', es.weights)
es.save(
export_path=os.path.join(
path_utils.serving_model_dir(model.uri), 'export', 'serving'))
# pytype: enable=wrong-arg-types
def _data_from_examples(examples_path: str, label_key: str):
"""Returns a tuple of ndarrays of examples and label values."""
# Load all the examples.
filenames = tf.io.gfile.listdir(examples_path)
files = [
os.path.join(examples_path, filename) for filename in sorted(filenames)
]
dataset = tf.data.TFRecordDataset(files, compression_type='GZIP')
x, y = [], []
for serialized_example in dataset.take(10000).as_numpy_iterator():
x.append(serialized_example)
example = tf.train.Example()
example.ParseFromString(serialized_example)
y.append(_label_value(example, label_key))
return x, y
def _label_key(problem_statement: ps_pb2.ProblemStatement) -> str:
"""Returns the label key from the problem statement."""
task_type = problem_statement.tasks[0].type
if task_type.HasField('multi_class_classification'):
return task_type.multi_class_classification.label
if task_type.HasField('binary_classification'):
return task_type.binary_classification.label
if task_type.HasField('one_dimensional_regression'):
return task_type.one_dimensional_regression.label
raise ValueError('Invalid task type: {}'.format(task_type))
def _label_value(example: tf.train.Example, label_key: str):
feature = example.features.feature[label_key]
if feature.HasField('int64_list'):
return feature.int64_list.value
if feature.HasField('float_list'):
return feature.float_list.value
return feature.bytes_list.value
| 40.453744 | 86 | 0.732658 | 1,145 | 9,183 | 5.689956 | 0.253275 | 0.046662 | 0.050652 | 0.053722 | 0.268304 | 0.211512 | 0.12218 | 0.071834 | 0.043592 | 0.043592 | 0 | 0.006194 | 0.173691 | 9,183 | 226 | 87 | 40.632743 | 0.852399 | 0.27638 | 0 | 0.105634 | 0 | 0 | 0.062423 | 0.030055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06338 | false | 0 | 0.119718 | 0 | 0.28169 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ade9e743940d11d6af70b1681562c89779125eb | 23,545 | py | Python | main/academy/views.py | UsamaKashif/studentutor | 7aa5407ac81134a49e474726220e48beaadc9390 | [
"MIT"
] | 7 | 2021-01-17T23:10:15.000Z | 2021-02-01T21:35:36.000Z | main/academy/views.py | DiveshTheReal/studentutor | 0d3ef57887bde4dd2ee40d68015598f9c8052ffd | [
"MIT"
] | 7 | 2021-01-17T15:10:47.000Z | 2022-03-12T00:53:49.000Z | main/academy/views.py | DiveshTheReal/studentutor | 0d3ef57887bde4dd2ee40d68015598f9c8052ffd | [
"MIT"
] | 3 | 2021-01-18T09:36:16.000Z | 2021-01-20T16:29:40.000Z | from django.shortcuts import render,redirect
from django.contrib.auth.models import Group
from .forms import AcademySignUpForm, AcademyProfile, ProfilePicture, PostAnAdForm, AboutAcademyForm
from django.contrib.auth.models import User
from django.views.generic import RedirectView
from .decorators import unauthenticated_user, allowed_users, admin_only
from django.contrib.auth.decorators import login_required
from .models import Academy, PostAnAd, Invitations
from tutors.models import PostAnAd as PostAnAd_tutor
from tutors.models import PostAnAd as PostAnAd_tutor
from tutors.models import Invitaions,WishList_tut
from django.contrib import messages
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
from django.contrib.sites.shortcuts import get_current_site
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text, DjangoUnicodeDecodeError
from .utils import generate_token
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import threading
def academyRegister(request):
form = AcademySignUpForm()
if request.method == "POST":
form = AcademySignUpForm(request.POST)
if form.is_valid():
academy = form.save()
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
city = form.cleaned_data.get('city')
phone = form.cleaned_data.get("phone")
name = form.cleaned_data.get("name")
address = form.cleaned_data.get("address")
group = Group.objects.get(name="academy")
academy.groups.add(group)
Academy.objects.create(
academy=academy,
username= username,
name=name,
email = email,
city = city,
phone = phone,
address= address
)
academy.is_active = False
academy.save()
current_site = get_current_site(request)
template = render_to_string("academy/activate.html", {
"name": name,
"domain": current_site,
"uid": urlsafe_base64_encode(force_bytes(academy.pk)),
"token": generate_token.make_token(academy)
})
registerEmail = EmailMessage(
'Account Activation',
template,
settings.EMAIL_HOST_USER,
[email]
)
registerEmail.fail_silently = False
registerEmail.send()
return render(request,"students/activation_sent.html",{})
context = {
"form": form
}
return render(request, 'academy/academy_sign_up.html', context)
def activate_view(request, uidb64, token):
try:
uid = force_text(urlsafe_base64_decode(uidb64))
academy = User.objects.get(pk = uid)
except:
academy = None
if academy is not None and generate_token.check_token(academy, token):
academy.is_active = True
academy.save()
template = render_to_string("academy/registerEmail.html", {
"name": academy.name
})
registerEmail = EmailMessage(
'Registration Successful',
template,
settings.EMAIL_HOST_USER,
[academy.email]
)
registerEmail.fail_silently = False
registerEmail.send()
messages.success(request,'account was created for ' + academy.username)
return redirect("sign_in")
return render(request, 'students/activate_failed.html', status=401)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def academyDashboard(request):
academy = request.user.academy
form = AcademyProfile(instance=academy)
user = Academy.objects.get(username = request.user.username)
active_ads = PostAnAd.objects.filter(academyUser = request.user.academy).count()
p_form = ProfilePicture()
if request.method=="POST":
form = AcademyProfile(request.POST,request.FILES, instance=academy)
p_form = ProfilePicture(request.POST, request.FILES)
if p_form.is_valid():
image = p_form.cleaned_data["image"]
std_image = Academy.objects.get(username = request.user.username)
std_image.user_image = image
std_image.save()
return redirect("academy_dashboard")
else:
messages.warning(request, 'Supported File Extensions are .jpg And .png, Max Image Size Is 1MB')
return redirect("academy_dashboard")
if form.is_valid():
form.save()
context = {
"form": form,
"p_form": p_form,
"totalAds": user.total_ads,
"adsDel": user.ads_deleted,
"activeAds": active_ads, # needs to be updated
"invitations_sent": user.invitations_sent,
"invitations_sent_accepted": user.invitations_sent_accepted,
"invitations_sent_rejected": user.invitations_sent_rejected,
"invitations_recieved": user.invitations_recieved,
"invitations_recieved_accepted": user.invitations_recieved_accepted,
"invitations_recieved_rejected": user.invitations_recieved_rejected,
}
return render(request, 'academy/academy_dashboard.html', context)
def post_ad(subject,tuition_level,hours_per_day,days_per_week,estimated_fees,user,tutor_gender):
myad = PostAnAd(
academyUser = user,
subject = subject,
tuition_level = tuition_level, hours_per_day = hours_per_day,
days_per_week = days_per_week,
estimated_salary = estimated_fees,
tutor_gender = tutor_gender
)
myad.save()
user.total_ads += 1
user.ad_post_count += 1
user.save()
def email_send(user,my_ad,emails):
if emails:
template = render_to_string("home/stdAD.html", {
"firstname": user.first_name,
"lastname": user.last_name,
"ad":my_ad
})
ADEmail = EmailMessage(
subject = f'{user.first_name} {user.last_name} posted an AD',
body = template,
from_email = settings.EMAIL_HOST_USER,
bcc = emails
)
ADEmail.fail_silently = False
ADEmail.send()
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def postAd(request, pk):
postform = PostAnAdForm()
user = Academy.objects.get(username = request.user.username)
academyAds = PostAnAd.objects.filter(academyUser__username = request.user.username)
# wishlist,created = WishList_tut.objects.get_or_create(student=request.user.student)
emails = []
# tutors = wishlist.tutors.all()
# for t in tutors:
# emails.append(t.email)
if request.method == "POST":
postform = PostAnAdForm(request.POST)
if postform.is_valid():
subject = postform.cleaned_data["subject"]
tuition_level = postform.cleaned_data["tuition_level"]
tutor_gender = postform.cleaned_data["tutor_gender"]
hours_per_day = postform.cleaned_data["hours_per_day"]
days_per_week = postform.cleaned_data["days_per_week"]
estimated_salary = postform.cleaned_data["estimated_salary"]
adAvailabel = False
for ad in academyAds:
if ad.subject == subject and ad.tuition_level == tuition_level:
adAvailabel = True
if adAvailabel == False:
currentad = {
"subject" : subject,
"tuition_level" : tuition_level,
"hours_per_day" : hours_per_day,
"days_per_week" : days_per_week,
"estimated_salary" : estimated_salary,
"tutor_gender":tutor_gender
}
my_ad = threading.Thread(target=post_ad, args=[subject,tuition_level,hours_per_day,days_per_week,estimated_salary,user,tutor_gender])
# t2 = threading.Thread(target=email_send, args=[user,currentad,emails])
my_ad.start()
# t2.start()
messages.info(request, "Your post is Successfully Created")
return redirect("academy_dashboard")
else:
messages.info(request, "This AD Already Exists")
return redirect("academy_dashboard")
context = {
"form": postform
}
return render(request, 'academy/post_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def Ads(request):
try:
studentAbout = AboutStudent.objects.get(student__username = request.user.username).order_by("-id")
except:
studentAbout = None
ads = PostAnAd.objects.filter(academyUser=request.user.academy).order_by("-id")
context = {
"ads":ads,
"about": studentAbout
}
return render(request, 'academy/ads.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def AdsDelete(request, pk):
ad = PostAnAd.objects.get(id=pk)
user = Academy.objects.get(username=request.user.username)
if request.method == "POST":
ad.delete()
user.ads_deleted += 1
user.ad_post_count -= 1
user.save()
return redirect("ads_academy")
context = {
'ad':ad
}
return render(request, 'academy/delete_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def allTutors(request):
tutors = PostAnAd_tutor.objects.all().order_by("-id")
tuition_level_contains_query = request.GET.get('TuitionLevel')
subject_contains_query = request.GET.get('Subject')
city_contains_query = request.GET.get('City')
tuition_gender_query = request.GET.get('tuition_gender')
number = tutors.count()
if tutors:
if tuition_level_contains_query != "" and tuition_level_contains_query is not None and tuition_level_contains_query != "All":
tutors = tutors.filter(tuition_level = tuition_level_contains_query).order_by("-id")
number = tutors.count()
if subject_contains_query != "" and subject_contains_query is not None:
tutors = tutors.filter(subject__icontains = subject_contains_query).order_by("-id")
number = tutors.count()
if city_contains_query != "" and city_contains_query is not None:
tutors = tutors.filter(tutorUser__city__icontains = city_contains_query).order_by("-id")
number = tutors.count()
if tuition_gender_query != "" and tuition_gender_query is not None and tuition_gender_query != "Both":
tutors = tutors.filter(tutorUser__gender__startswith = tuition_gender_query.lower())
number = tutors.count()
tuts = []
if tutors:
for t in tutors:
tuts.append(t)
paginator = Paginator(tuts,8)
page = request.GET.get('page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
index = items.number - 1
max_index = len(paginator.page_range)
start_index = index - 5 if index >= 5 else 0
end_index = index + 5 if index <= max_index - 5 else max_index
page_range = paginator.page_range[start_index:end_index]
context = {
# "tutors":items,
"items":items,
"number": number,
"academy": request.user.academy,
"page_range": page_range,
}
return render(request, 'academy/all_tutors.html', context)
from tutors.models import AboutAndQualifications
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def SpecificTutor(request, id):
tutor = PostAnAd_tutor.objects.get(id = id)
qual = AboutAndQualifications.objects.get(tutor__username = tutor.tutorUser.username)
tutor.views += 1
tutor.save()
tutors = PostAnAd_tutor.objects.filter(tutorUser__username = tutor.tutorUser.username).order_by("-id")
# try:
# wishList = WishList.objects.get(student = request.user.student)
# except:
# wishList = None
# added = False
# if wishList is not None:
# if tutor.tutorUser in wishList.tutors.all():
# added = True
context = {
"tutor_id": tutor.tutorUser,
"tutor": tutor,
"qual": qual,
"tutors": tutors.exclude(id = id),
"student": request.user.academy,
"added":False, # needs to be updated
}
return render (request, "academy/specific_tutor.html", context)
from tutors.models import Tutor, Invitaions_by_academy
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def inviteFordemo(request, id):
ad = PostAnAd_tutor.objects.get(id = id)
tutor = Tutor.objects.get ( username = ad.tutorUser.username)
user = Academy.objects.get(username = request.user.username)
std = Academy.objects.get(username = request.user.username)
try:
invites_sent_by_std = Invitaions_by_academy.objects.get(tutor_ad = ad)
except:
invites_sent_by_std = None
if request.method == "POST":
if invites_sent_by_std != None:
if invites_sent_by_std.invitation_sent == True and invites_sent_by_std.inivitaion_by_academy.username == request.user.username:
messages.info(request, f'Invitation request already sent to {ad.tutorUser.first_name} {ad.tutorUser.last_name}')
return redirect("all_tutors_academy")
else:
Invitaions_by_academy.objects.create(
inivitaion_by_academy = std,
tutor_ad = ad,
invitation_sent = True,
accepted = False,
rejected = False
)
user.invitations_sent += 1
user.save()
tutor.invitations_recieved += 1
tutor.save()
template = render_to_string("home/inviteEmail.html", {
"firstname": ad.tutorUser.first_name,
"lastname": ad.tutorUser.last_name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
registerEmail = EmailMessage(
'Invite For Demo',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
intemplate = render_to_string("academy/inviteEmail.html", {
"firstname": request.user.academy.name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
email = EmailMessage(
'Invitation',
intemplate,
settings.EMAIL_HOST_USER,
[ad.tutorUser.email]
)
email.fail_silently = False
email.send()
messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo')
return redirect("academy_dashboard") # needs to be changes to invited page.
else:
Invitaions_by_academy.objects.create(
inivitaion_by_academy = std,
tutor_ad = ad,
invitation_sent = True,
accepted = False,
rejected = False
)
user.invitations_sent += 1
user.save()
tutor.invitations_recieved += 1
tutor.save()
template = render_to_string("home/inviteEmail.html", {
"firstname": ad.tutorUser.first_name,
"lastname": ad.tutorUser.last_name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
registerEmail = EmailMessage(
'Invite For Demo',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
intemplate = render_to_string("academy/inviteEmail.html", {
"firstname": request.user.academy.name,
"ad": ad,
"invited_to": "Tutor",
"area":ad.address,
"city":ad.tutorUser.city
})
email = EmailMessage(
'Invitation',
intemplate,
settings.EMAIL_HOST_USER,
[ad.tutorUser.email]
)
email.fail_silently = False
email.send()
messages.info(request, f'Invited {tutor.first_name} {tutor.last_name} For A Demo')
return redirect("academy_dashboard") # needs to be changed to invited page
context = {
"ad":ad
}
return render(request, 'academy/invite_for_demo.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def invited(request):
student = Academy.objects.get(username = request.user.username)
invited = Invitaions_by_academy.objects.filter(inivitaion_by_academy = student).order_by("-id")
context={
"invited": invited,
}
return render(request, "academy/invited.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def invitationsAcademy(request):
invites = Invitations.objects.filter(academy_ad__academyUser = request.user.academy).order_by("-id")
context = {
"invites":invites
}
return render(request, 'academy/invitations.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def view_your_ad_acad(request, id):
student_ad = Invitations.objects.get(id = id)
try:
tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject)[4]
except:
tutors = PostAnAd_tutor.objects.filter(subject = student_ad.academy_ad.subject)
context = {
"invite":student_ad,
"tutors": tutors.exclude(tutorUser__username = student_ad.inivitaion_by_tutor.username)
}
return render(request,'academy/view_your_ad.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def acceptInvitationAcademy(request, id):
invite = Invitations.objects.get(id = id)
student = Academy.objects.get(username = request.user.username)
tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username)
if request.method == "POST":
invite.accepted = True
invite.rejected = False
invite.save()
student.invitations_recieved_accepted += 1
student.save()
tutor.invitations_sent_accepted += 1
tutor.save()
template = render_to_string("academy/acceptEmail.html", {
"name": request.user.academy.name,
"email": request.user.email,
"register_as": "Academy",
"phone": request.user.academy.phone
})
registerEmail = EmailMessage(
'Invitation Accepted',
template,
settings.EMAIL_HOST_USER,
[invite.inivitaion_by_tutor.email]
)
registerEmail.fail_silently = False
registerEmail.send()
recieve_temp = render_to_string("academy/accept_recieve_Email.html", {
"request_from" :tutor,
"request": "Tutor"
})
Email = EmailMessage(
'Invitation Accepted',
recieve_temp,
settings.EMAIL_HOST_USER,
[request.user.email]
)
Email.fail_silently = False
Email.send()
messages.info(request, f'Accepted Invitation Request from {tutor.first_name} {tutor.last_name}')
return redirect("invitations_academy")
context = {
"invite":invite
}
return render(request, "academy/accept_invitation.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def rejectInviteAcademy(request, id):
invite = Invitations.objects.get(id = id)
student = Academy.objects.get(username = request.user.username)
tutor = Tutor.objects.get(username = invite.inivitaion_by_tutor.username)
if request.method == "POST":
invite.delete()
student.invitations_recieved_rejected += 1
student.save()
tutor.invitations_sent_rejected += 1
tutor.save()
template = render_to_string("home/rejectEmail.html", {
"firstname": request.user.academy.name,
"student_email": request.user.email
})
registerEmail = EmailMessage(
'Invitation Rejected',
template,
settings.EMAIL_HOST_USER,
[invite.inivitaion_by_tutor.email]
)
registerEmail.fail_silently = False
registerEmail.send()
messages.warning(request, f'Rejected Invite From {tutor.first_name} {tutor.last_name}')
return redirect("invitations_academy")
context = {
"invite": invite
}
return render(request,'academy/reject_invitation.html', context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def del_account_acad(request):
student = User.objects.get(username = request.user.username)
# print(request.user.student.first_name)
if request.method == "POST":
student.is_active = False
student.save()
template = render_to_string("home/delEmail.html", {
"register_as": "Academy",
"email": request.user.email,
})
registerEmail = EmailMessage(
'Account Deletion',
template,
settings.EMAIL_HOST_USER,
[request.user.email]
)
registerEmail.fail_silently = False
registerEmail.send()
return redirect("academy_dashboard")
context = {}
return render(request, "academy/del_account.html", context)
@login_required(login_url="sign_in")
@allowed_users(allowed_roles=["academy"])
def aboutAcademy(request):
aboutForm = AboutAcademyForm()
if request.method == "POST":
aboutForm = AboutAcademyForm(request.POST)
if aboutForm.is_valid():
# try:
# AboutStudent.objects.get(student__username = request.user.username).delete()
# except:
# pass
about = aboutForm.cleaned_data["textArea"]
std = Academy.objects.get(username=request.user.username)
std.profile_complete = True
std.textArea = about
std.save()
return redirect("academy_dashboard")
context = {
"form": aboutForm
}
return render(request, "academy/student_about.html", context)
| 33.302687 | 149 | 0.615715 | 2,477 | 23,545 | 5.641502 | 0.115462 | 0.028338 | 0.023114 | 0.027909 | 0.475383 | 0.431015 | 0.376342 | 0.366466 | 0.302061 | 0.288393 | 0 | 0.00237 | 0.283075 | 23,545 | 706 | 150 | 33.349858 | 0.825474 | 0.030622 | 0 | 0.432532 | 0 | 0 | 0.11711 | 0.032765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033272 | false | 0 | 0.042514 | 0 | 0.131238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ae0cbb3da1cad9d593dd273b1430c763adce93f | 2,628 | py | Python | bin/Lib/tkinter/test/support.py | yousafsyed/casperjs | ed077ae9e42cf8fb9e023e9b6840d3cea11bac40 | [
"MIT"
] | 36 | 2015-02-04T10:43:31.000Z | 2022-03-30T13:01:12.000Z | bin/Lib/tkinter/test/support.py | yousafsyed/casperjs | ed077ae9e42cf8fb9e023e9b6840d3cea11bac40 | [
"MIT"
] | 9 | 2015-03-17T05:56:16.000Z | 2021-11-17T09:31:50.000Z | bin/Lib/tkinter/test/support.py | yousafsyed/casperjs | ed077ae9e42cf8fb9e023e9b6840d3cea11bac40 | [
"MIT"
] | 22 | 2015-05-13T17:37:35.000Z | 2022-01-25T06:24:42.000Z | import sys
import tkinter
import unittest
from test.support import requires
def get_tk_root():
requires('gui') # raise exception if tk unavailable
try:
root = tkinter._default_root
except AttributeError:
# it is possible to disable default root in Tkinter, although
# I haven't seen people doing it (but apparently someone did it
# here).
root = None
if root is None:
# create a new master only if there isn't one already
root = tkinter.Tk()
return root
def root_deiconify():
root = get_tk_root()
root.deiconify()
def root_withdraw():
root = get_tk_root()
root.withdraw()
def simulate_mouse_click(widget, x, y):
"""Generate proper events to click at the x, y position (tries to act
like an X server)."""
widget.event_generate('<Enter>', x=0, y=0)
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
import _tkinter
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
def requires_tcl(*version):
return unittest.skipUnless(tcl_version >= version,
'requires Tcl version >= ' + '.'.join(map(str, version)))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = tkinter.Tcl()
patchlevel = []
for x in tcl.call('info', 'patchlevel').split('.'):
try:
x = int(x, 10)
except ValueError:
x = -1
patchlevel.append(x)
_tk_patchlevel = tuple(patchlevel)
return _tk_patchlevel
units = {
'c': 72 / 2.54, # centimeters
'i': 72, # inches
'm': 72 / 25.4, # millimeters
'p': 1, # points
}
def pixels_conv(value):
return float(value[:-1]) * units[value[-1:]]
def tcl_obj_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, _tkinter.Tcl_Obj):
if isinstance(expected, str):
return str(actual) == expected
if isinstance(actual, tuple):
if isinstance(expected, tuple):
return (len(actual) == len(expected) and
all(tcl_obj_eq(act, exp)
for act, exp in zip(actual, expected)))
return False
def widget_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, (str, tkinter.Widget)):
if isinstance(expected, (str, tkinter.Widget)):
return str(actual) == str(expected)
return False
| 28.565217 | 73 | 0.603501 | 335 | 2,628 | 4.608955 | 0.364179 | 0.046632 | 0.049223 | 0.007772 | 0.13342 | 0.107513 | 0.107513 | 0.07772 | 0.07772 | 0.07772 | 0 | 0.011646 | 0.281202 | 2,628 | 91 | 74 | 28.879121 | 0.805717 | 0.128995 | 0 | 0.142857 | 0 | 0 | 0.041832 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128571 | false | 0 | 0.071429 | 0.028571 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ae6eee9c017c38595289696d2aff8e25f24cb29 | 1,753 | py | Python | databricks/pixels/path_extractor.py | dmoore247/pixels | cf78ab016530c42bc36538a9b812fe23107f87d7 | [
"Apache-2.0"
] | 2 | 2021-03-28T18:50:50.000Z | 2021-08-09T02:06:15.000Z | databricks/pixels/path_extractor.py | dmoore247/pixels | cf78ab016530c42bc36538a9b812fe23107f87d7 | [
"Apache-2.0"
] | null | null | null | databricks/pixels/path_extractor.py | dmoore247/pixels | cf78ab016530c42bc36538a9b812fe23107f87d7 | [
"Apache-2.0"
] | null | null | null | from pyspark.ml.pipeline import Transformer
import pyspark.sql.functions as f
import pyspark.sql.types as t
from pyspark.sql import DataFrame
class PathExtractor(Transformer):
# Day extractor inherit of property of Transformer
def __init__(self, inputCol='path', tagsCol = "tags", basePath='dbfs:/'):
self.inputCol = inputCol #the name of your columns
self.basePath = basePath
def this():
#define an unique ID
this(Identifiable.randomUID("PathExtractor"))
def copy(extra):
defaultCopy(extra)
def check_input_type(self, schema):
field = schema[self.inputCol]
#assert that field is a datetype
if (field.dataType != t.StringType()):
raise Exception('PathExtractor input type %s did not match input type StringType' % field.dataType)
def _transform(self, df):
self.check_input_type(df.schema)
return self._transform_impl(df, self.basePath, self.inputCol)
@staticmethod
def _transform_impl(df:DataFrame, basePath:str, inputCol:str):
""" User overridable """
return (df
.withColumn("relative_path", f.regexp_replace(inputCol, basePath+"(.*)$",r"$1"))
.withColumn("local_path", f.regexp_replace(inputCol,"^dbfs:(.*$)",r"/dbfs$1"))
.withColumn("extension",f.regexp_replace("relative_path", ".*\.(\w+)$", r"$1"))
.withColumn("path_tags",
f.split(
f.regexp_replace(
"relative_path",
"([0-9a-zA-Z]+)([\_\.\/\:])",
r"$1,"),
",")
)
) | 42.756098 | 111 | 0.557901 | 183 | 1,753 | 5.218579 | 0.459016 | 0.050262 | 0.058639 | 0.037696 | 0.108901 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004992 | 0.314318 | 1,753 | 41 | 112 | 42.756098 | 0.789517 | 0.081004 | 0 | 0 | 0 | 0 | 0.139913 | 0.01624 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.382353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aec991a251b8b5ba72c6f603e780ca170fd3d9c | 16,092 | py | Python | SMI/pycopia/SMI/Compile.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 89 | 2015-03-26T11:25:20.000Z | 2022-01-12T06:25:14.000Z | SMI/pycopia/SMI/Compile.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 1 | 2015-07-05T03:27:43.000Z | 2015-07-11T06:21:20.000Z | SMI/pycopia/SMI/Compile.py | kdart/pycopia | 1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d | [
"Apache-2.0"
] | 30 | 2015-04-30T01:35:54.000Z | 2022-01-12T06:19:49.000Z | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compile module compiles SMI data into Python objects for use by the SNMP
module. This started out clean, but now it's ugly. But at least it spits
out something useful.
"""
from __future__ import print_function
from __future__ import division
import os
import py_compile
from pycopia import textutils
from pycopia.SMI import SMI, Basetypes, Objects
USERMIBPATH = os.environ.get("USERMIBPATH", os.path.join("/", "var", "tmp", "mibs"))
# global name translation table
# Since we convert MIB modules to Python modules, we can't have a dash in
# the name. These are translated to underscores.
TRANSTABLE = textutils.maketrans("-", "_")
def convert_name(name):
return name.translate(TRANSTABLE)
# These are some of the attributes that the SNMP module needs, and are
# exported "as-is". Other attributes are special-cased in the appropriate
# generator method.
EXPORTS = {
"Type": ["status", "format", "units", "ranges", "enumerations"],
"Node": ["access", "create", "status", "units"],
"Macro": ["name", "status"],
"Module": ["name", "path", "conformance", "language", "description"],
"Group": ["name", "status"],
"Value": ["val"],
}
# objects directly imported from SMI.Objects in the mib modules
IMPORTED_OBJECTS = ["ColumnObject", "MacroObject", "NotificationObject",
"RowObject", "ScalarObject", "NodeObject", "ModuleObject", "GroupObject"]
def _classstr(tup):
def _cstr(c):
if type(c) is str:
return c
else:
if c.__name__ in IMPORTED_OBJECTS:
return c.__name__
else:
return "%s.%s" % (c.__module__, c.__name__)
return ", ".join(map(_cstr, tup))
# generic class producer. Returns source code string
def genClass(sminode, baseclass, attrdict=None, doc=None):
if not attrdict:
attrdict = {}
for attrname in EXPORTS[sminode.__class__.__name__]:
val = getattr(sminode, attrname)
if val is None:
continue
if type(val) is str:
attrdict[attrname] = repr(val)
else:
attrdict[attrname] = val
klassname = convert_name(sminode.name)
parents = (baseclass,)
s = []
if parents:
s.append( "class %s(%s):" % (klassname, _classstr(parents)) )
else:
s.append( "class %s(object):" % (klassname) )
if doc:
s.append('\t"""%s"""' % doc)
for key, val in attrdict.items():
if val:
s.append( "\t%s = %s" % (key, val) )
if len(s) == 1:
s.append("\tpass")
s.append("\n")
return "\n".join(s)
# generates a repr for SMI.Objects.IndexObjects
class IndexGenerator(list):
def __init__(self, init=None, implied=False):
super(IndexGenerator, self).__init__(init or [])
self.implied = bool(implied)
def __repr__(self):
lv = ", ".join(self)
return "pycopia.SMI.Objects.IndexObjects([%s], %r)" % (lv, self.implied)
class ListGenerator(list):
def __init__(self, init=None):
super(ListGenerator, self).__init__(init or [])
def __repr__(self):
return "[%s]" % (", ".join(self), )
class ObjectSourceGenerator(object):
"""
Usage: ObjectSourceGenerator(fileobject, modulename)
Parameters:
fileobject = A file-type object.
modulename = An SMI module name.
"""
def __init__(self, fo, oidfo, smimodule):
self.smimodule = smimodule
self.fo = fo
self.oidfo = oidfo
self.pymodname = convert_name(smimodule.name)
#self.tempmodule = new.module(self.pymodname)
self.imports = {}
self.fo.write("""# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import %s
""" % (", ".join(IMPORTED_OBJECTS),))
self.oidfo.write("""# python
# This file is generated by a program (mib2py).
import sys
{modname} = sys.modules["pycopia.mibs.{modname}"]
OIDMAP = {{
""".format(modname=self.pymodname))
def finalize(self):
self.oidfo.write("}\n")
handle_specials(self.fo, self.smimodule)
self.fo.write("""
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
""")
def add_comment(self, text):
self.fo.write("# %s\n" % text)
def genImports(self):
self.fo.write("# imports \n")
for node in self.smimodule.get_imports():
if node.module not in self.imports:
self.imports[node.module] = []
self.imports[node.module].append(node.name)
for modname, implist in self.imports.items():
impnames = [convert_name(s) for s in implist]
self.fo.write("from pycopia.mibs.%s import %s\n" % (convert_name(modname), ", ".join(impnames)))
self.fo.write("\n")
def genModule(self):
self.fo.write(genClass(self.smimodule, Objects.ModuleObject))
def genTypes(self):
self.fo.write("# types \n")
for smi_type in self.smimodule.get_types():
name = convert_name(smi_type.name)
if hasattr(Basetypes, name ):
self.fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
else:
self.fo.write("\n")
if smi_type.snmptype:
baseclass = getattr(Basetypes, smi_type.snmptype)
self.fo.write(genClass(smi_type, baseclass))
def genNodes(self):
self.fo.write("# nodes\n")
for node in self.smimodule.get_nodes(SMI.SMI_NODEKIND_NODE):
if node.name:
initdict = {}
initdict["name"] = repr(node.name)
initdict["OID"] = repr(Basetypes.ObjectIdentifier(node.OID))
self.fo.write(genClass(node, Objects.NodeObject, initdict))
self._genOIDItem(node.OID, node.name)
self.fo.write("\n")
def genScalars(self):
self.fo.write("# scalars \n")
for scalar in self.smimodule.get_scalars():
if scalar.status not in \
(SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_MANDATORY):
continue # do not expose optional or obsolete objects
initdict = {}
initdict["syntaxobject"] = so = self._getSyntax(scalar)
if so.find("Enumeration") >= 0:
initdict["enumerations"] = scalar.syntax.enumerations
initdict["OID"] = repr(Basetypes.ObjectIdentifier(scalar.OID))
self.fo.write(genClass(scalar, Objects.ScalarObject, initdict))
self.fo.write("\n")
self._genOIDItem(scalar.OID, scalar.name)
def genColumns(self):
self.fo.write("# columns\n")
for col in self.smimodule.get_columns():
initdict = {}
initdict["syntaxobject"] = so = self._getSyntax(col)
if so.find("Enumeration") >= 0:
initdict["enumerations"] = col.syntax.enumerations
initdict["OID"] = repr(Basetypes.ObjectIdentifier(col.OID))
self.fo.write(genClass(col, Objects.ColumnObject, initdict))
self.fo.write("\n")
self._genOIDItem(col.OID, col.name)
def genRows(self):
self.fo.write("# rows \n")
for row in self.smimodule.get_rows():
if row.status not in (SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
columns = "{%s}" % ", ".join(["%r: %s" % (s, s) for s in self._get_colnames(row)])
initdict["columns"] = columns
initdict["index"] = self._genIndexObjects(row)
rowstatus = row.rowstatus
if rowstatus:
initdict["rowstatus"] = row.rowstatus.name
initdict["OID"] = repr(Basetypes.ObjectIdentifier(row.OID))
self.fo.write(genClass(row, Objects.RowObject, initdict))
self.fo.write("\n")
def genMacros(self):
self.fo.write("# macros\n")
for node in self.smimodule.get_macros():
self.fo.write(genClass(node, Objects.MacroObject))
self.fo.write("\n")
def genNotifications(self):
self.fo.write("# notifications (traps) \n")
for notif in self.smimodule.get_notifications():
initdict = {"OID": repr(Basetypes.ObjectIdentifier(notif.OID))}
self.fo.write(genClass(notif, Objects.NotificationObject, initdict))
self._genOIDItem(notif.OID, notif.name)
def genGroups(self):
self.fo.write("# groups \n")
for group in self.smimodule.get_groups():
if group.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
initdict["OID"] = repr(Basetypes.ObjectIdentifier(group.OID))
grouplist = []
for el in group.get_elements():
n = el.get_node()
grouplist.append(n.name)
initdict["group"] = "[%s]" % ", ".join(grouplist)
self.fo.write(genClass(group, Objects.GroupObject, initdict))
self._genOIDItem(group.OID, group.name)
def genCompliances(self):
self.fo.write("# compliances \n")
for comp in self.smimodule.get_compliances():
if comp.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
mandlist = ListGenerator()
for el in comp.get_elements():
mandlist.append(el.get_node().name)
initdict["mandatory_group"] = mandlist
refs = ListGenerator()
for ref in comp.get_refinements(): # XXX
if ref.syntax:
n = ref.get_node()
refs.append(self._getSyntax(ref)) # XXX
initdict["refinements"] = repr(refs)
self.fo.write(genClass(comp, Objects.Compliance, initdict))
self.fo.write("\n")
def genCapabilities(self):
self.fo.write("# capabilities \n")
for cap in self.smimodule.get_capabilities():
if cap.status not in (SMI.SMI_STATUS_CURRENT,
SMI.SMI_STATUS_DEPRECATED,
SMI.SMI_STATUS_MANDATORY):
continue
initdict = {}
# XXX
self.fo.write(genClass(cap, Objects.Capability, initdict))
self.fo.write("\n")
# utility methods
def _get_colnames(self, row):
rv = []
for c in row.get_children():
if c.nodekind == SMI.SMI_NODEKIND_COLUMN:
rv.append(c.name)
return rv
def _genOIDItem(self, OID, classname):
self.oidfo.write('%r: %s.%s,\n' % (str(OID), self.pymodname, convert_name(classname)))
def _genIndexObjects(self, smirow):
index = smirow.get_index()
if index is None: # old, old v1 MIBS with no index
return
gen = IndexGenerator(implied=index.implied)
for n in index:
gen.append(n.name)
if smirow.indexkind == SMI.SMI_INDEX_AUGMENT:
for node in index:
mod = node.get_module()
self.fo.write("from %s import %s\n" % (convert_name(mod.name), node.name))
return repr(gen)
def _getSyntax(self, node):
syntax = node.syntax
if syntax is None:
print ("***** unable to get SYNTAX for node %s" % (node.name))
return "UNKNOWN"
if not syntax.name:
syntax = syntax.get_parent()
syntaxname = syntax.name
if not syntaxname:
syntaxname = syntax.snmptype
if hasattr(Objects, syntaxname):
cl = getattr(Objects, syntaxname)
return "%s.%s" % (cl.__module__, cl.__name__)
elif hasattr(Basetypes, syntaxname):
cl = getattr(Basetypes, syntaxname)
return "%s.%s" % (cl.__module__, cl.__name__)
# else must be a locally defined type.
return syntaxname
def genAll(self):
self.genImports()
self.genModule()
self.genNodes()
self.genMacros()
self.genTypes()
self.genScalars()
self.genColumns()
self.genRows()
self.genNotifications()
self.genGroups()
#self.genCompliances()
self.genCapabilities()
self.finalize()
# some modules require special handling. Crude, hopefully temporary, hack
def handle_specials(fo, smimodule):
fo.write("\n# special additions\n")
handler = {'SNMPv2-SMI': _handle_smi,
'SNMPv2-TC': _handle_tc}.get(smimodule.name, _handle_default)
handler(fo, smimodule)
def _handle_smi(fo, mod):
fo.write("\n")
for name in ("ObjectSyntax", "SimpleSyntax", "ApplicationSyntax"):
fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
def _handle_tc(fo, mod):
fo.write("\n")
for name in ("Bits", "BITS"):
fo.write("%s = pycopia.SMI.Basetypes.%s\n" % (name, name))
def _handle_default(fo, mod):
pass
def _compile_module(smimodule):
if not smimodule.name:
return # unnamed from where?
fname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+".py")
oidfname = os.path.join(USERMIBPATH, convert_name(smimodule.name)+"_OID.py")
if not os.path.exists(fname):
print ("Compiling module", smimodule.name)
fd = open(fname, "w")
oidfd = open(oidfname, "w")
generator = ObjectSourceGenerator(fd, oidfd, smimodule)
generator.genAll()
fd.close()
try:
py_compile.compile(fname)
except Exception as err:
print ("***", err)
else:
print (" +++ file %r exists, skipping." % (fname, ))
def compile_module(modname, preload=None, all=False):
if preload:
for pm in preload:
SMI.load_module(pm)
smimodule = SMI.get_module(modname)
if not smimodule:
print ("Could not load module", modname)
return
if all:
for dep in _get_dependents(smimodule):
_compile_module(SMI.get_module(dep))
_compile_module(smimodule)
def _get_dependents(module, hash=None):
h = hash or {}
for imp in module.get_imports():
h[imp.module] = True
_get_dependents(SMI.get_module(imp.module), h)
return h.keys()
def compile_everything(all=False):
count = 0
paths = SMI.get_path().split(":")
for dir in paths:
print ("Looking in", dir)
for modname in os.listdir(dir):
modpath = os.path.join(dir, modname)
if os.path.isfile(modpath):
print ("Found module", modname, "compiling...")
try:
compile_module(modname, None, all)
except SMI.SmiError as err:
print ("***[", err, "]***")
count += 1
SMI.clear() # clear out mememory
SMI.init()
print ("Found and compiled %d MIBS." % (count, ))
if __name__ == "__main__":
from pycopia import autodebug
compile_everything(True)
| 35.135371 | 108 | 0.59222 | 1,881 | 16,092 | 4.945774 | 0.213184 | 0.031603 | 0.043749 | 0.019349 | 0.193486 | 0.168118 | 0.127916 | 0.091153 | 0.06987 | 0.06987 | 0 | 0.001731 | 0.282066 | 16,092 | 457 | 109 | 35.212254 | 0.803514 | 0.106078 | 0 | 0.149296 | 0 | 0 | 0.114955 | 0.017857 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101408 | false | 0.005634 | 0.076056 | 0.005634 | 0.23662 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aed93f1001f0b20c9235af1ae0433ce9cdab764 | 960 | py | Python | burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py | bpedersen2/indico-plugins-cern | c4f06d11d981c316fc8de2892758484deb58e2f5 | [
"MIT"
] | null | null | null | burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py | bpedersen2/indico-plugins-cern | c4f06d11d981c316fc8de2892758484deb58e2f5 | [
"MIT"
] | null | null | null | burotel/indico_burotel/migrations/20211029_1202_564d660d4ddb_create_count_weekdays_function.py | bpedersen2/indico-plugins-cern | c4f06d11d981c316fc8de2892758484deb58e2f5 | [
"MIT"
] | null | null | null | """Create count_weekdays function
Revision ID: 564d660d4ddb
Revises:
Create Date: 2021-10-29 12:02:59.409012
"""
import textwrap
from alembic import op
from sqlalchemy.sql.ddl import CreateSchema, DropSchema
# revision identifiers, used by Alembic.
revision = '564d660d4ddb'
down_revision = None
branch_labels = None
depends_on = None
SQL_FUNCTION_COUNT_WEEKDAYS = textwrap.dedent('''
CREATE FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date)
RETURNS bigint
AS $$
SELECT COUNT(*)
FROM generate_series(from_date, to_date, '1 day'::interval) d
WHERE extract('dow' FROM d) NOT IN (0, 6)
$$
LANGUAGE SQL IMMUTABLE STRICT;
''')
def upgrade():
op.execute(CreateSchema('plugin_burotel'))
op.execute(SQL_FUNCTION_COUNT_WEEKDAYS)
def downgrade():
op.execute('DROP FUNCTION plugin_burotel.count_weekdays(from_date date, to_date date)')
op.execute(DropSchema('plugin_burotel'))
| 23.414634 | 91 | 0.730208 | 127 | 960 | 5.338583 | 0.503937 | 0.09587 | 0.044248 | 0.070796 | 0.165192 | 0.165192 | 0.165192 | 0.165192 | 0.165192 | 0.165192 | 0 | 0.046482 | 0.170833 | 960 | 40 | 92 | 24 | 0.805276 | 0.152083 | 0 | 0 | 0 | 0 | 0.511772 | 0.128872 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7aef5d40168535dab3efb9c32c0b60a29e7ae107 | 2,327 | py | Python | models/utils.py | ZephyrII/competitive_colaboration | a557d1e23ef2c0b8e3794f085a79bfffb860f9df | [
"MIT"
] | 357 | 2019-03-12T07:17:32.000Z | 2022-03-24T14:13:24.000Z | models/utils.py | DevLooptt/SJTU-CS386-2021Fall-DIP-Project | 2167e089be80ca01911ba55c07b83c9f26f147e7 | [
"MIT"
] | 27 | 2019-03-11T19:16:11.000Z | 2021-05-30T13:30:19.000Z | models/utils.py | DevLooptt/SJTU-CS386-2021Fall-DIP-Project | 2167e089be80ca01911ba55c07b83c9f26f147e7 | [
"MIT"
] | 66 | 2019-03-27T14:16:22.000Z | 2021-11-11T12:40:33.000Z | from __future__ import division
import math
import torch
import torch.nn as nn
def conv(in_planes, out_planes, stride=1, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes, eps=1e-3),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True),
nn.ReLU(inplace=True)
)
def deconv(in_planes, out_planes, batch_norm=False):
if batch_norm:
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_planes, eps=1e-3),
nn.ReLU(inplace=True)
)
else:
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=True),
nn.ReLU(inplace=True)
)
def predict_depth(in_planes, with_confidence):
return nn.Conv2d(in_planes, 2 if with_confidence else 1, kernel_size=3, stride=1, padding=1, bias=True)
def post_process_depth(depth, activation_function=None, clamp=False):
if activation_function is not None:
depth = activation_function(depth)
if clamp:
depth = depth.clamp(10, 80)
return depth[:,0]
def adaptative_cat(out_conv, out_deconv, out_depth_up):
out_deconv = out_deconv[:, :, :out_conv.size(2), :out_conv.size(3)]
out_depth_up = out_depth_up[:, :, :out_conv.size(2), :out_conv.size(3)]
return torch.cat((out_conv, out_deconv, out_depth_up), 1)
def init_modules(net):
for m in net.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2/n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
| 34.220588 | 107 | 0.644177 | 341 | 2,327 | 4.190616 | 0.211144 | 0.075577 | 0.083975 | 0.071379 | 0.550035 | 0.550035 | 0.550035 | 0.550035 | 0.475857 | 0.418474 | 0 | 0.029809 | 0.235926 | 2,327 | 67 | 108 | 34.731343 | 0.773903 | 0 | 0 | 0.346154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.076923 | 0.019231 | 0.326923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7af19d3043c8b6728ea0912a528bf1eada7c558c | 2,806 | py | Python | scripts/simulator_experiments/real_government_strategy_experiments.py | alfred100p/PandemicSimulator | 2cb22c4b5c55d54a420fd104c74918d76189feb9 | [
"Apache-2.0"
] | null | null | null | scripts/simulator_experiments/real_government_strategy_experiments.py | alfred100p/PandemicSimulator | 2cb22c4b5c55d54a420fd104c74918d76189feb9 | [
"Apache-2.0"
] | null | null | null | scripts/simulator_experiments/real_government_strategy_experiments.py | alfred100p/PandemicSimulator | 2cb22c4b5c55d54a420fd104c74918d76189feb9 | [
"Apache-2.0"
] | null | null | null | # Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
from matplotlib import pyplot as plt
import pandemic_simulator as ps
def eval_government_strategies(experiment_name: str, opts: ps.sh.EvaluationOpts) -> None:
data_saver = ps.data.H5DataSaver(experiment_name, path=opts.data_saver_path)
print('Running Swedish strategy')
ps.sh.experiment_main(sim_config=opts.default_sim_config,
sim_opts=ps.env.PandemicSimOpts(),
data_saver=data_saver,
pandemic_regulations=ps.sh.swedish_regulations,
stages_to_execute=swedish_strategy,
num_random_seeds=opts.num_seeds,
max_episode_length=opts.max_episode_length,
exp_id=0)
print('Running Italian strategy')
ps.sh.experiment_main(sim_config=opts.default_sim_config,
sim_opts=ps.env.PandemicSimOpts(),
data_saver=data_saver,
pandemic_regulations=ps.sh.italian_regulations,
stages_to_execute=italian_strategy,
num_random_seeds=opts.num_seeds,
max_episode_length=opts.max_episode_length,
exp_id=1)
if __name__ == '__main__':
swedish_strategy = [ps.data.StageSchedule(stage=0, end_day=3),
ps.data.StageSchedule(stage=1, end_day=None)]
italian_strategy = [ps.data.StageSchedule(stage=0, end_day=3),
ps.data.StageSchedule(stage=1, end_day=8),
ps.data.StageSchedule(stage=2, end_day=13),
ps.data.StageSchedule(stage=3, end_day=25),
ps.data.StageSchedule(stage=4, end_day=59),
ps.data.StageSchedule(stage=3, end_day=79),
ps.data.StageSchedule(stage=2, end_day=None)]
opts = ps.sh.EvaluationOpts(
num_seeds=30,
max_episode_length=180,
enable_warm_up=False
)
exp_name = 'swedish_italian_strategies'
try:
eval_government_strategies(exp_name, opts)
except ValueError:
# Expect a value error because we are reusing the same directory.
pass
ps.sh.make_evaluation_plots(exp_name=exp_name,
data_saver_path=opts.data_saver_path,
param_labels=['SWE', 'ITA'],
bar_plot_xlabel='Real Government Strategies',
annotate_stages=True,
show_cumulative_reward=False,
show_time_to_peak=False, show_pandemic_duration=True)
plt.show()
| 45.258065 | 89 | 0.580542 | 309 | 2,806 | 4.957929 | 0.36246 | 0.039164 | 0.111619 | 0.140992 | 0.45953 | 0.432115 | 0.432115 | 0.351175 | 0.351175 | 0.351175 | 0 | 0.017307 | 0.341055 | 2,806 | 61 | 90 | 46 | 0.811249 | 0.050962 | 0 | 0.2 | 0 | 0 | 0.042857 | 0.009774 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02 | false | 0.02 | 0.04 | 0 | 0.06 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7af7571d71981a894e8403a4dbebd261f79a20cf | 486 | py | Python | code/gits_version.py | ineelshah/GITS2.1-I.R.I.S | 12c4455ea55920e0de94a76f45b26e3e43cbcced | [
"MIT"
] | 1 | 2021-11-28T12:18:43.000Z | 2021-11-28T12:18:43.000Z | code/gits_version.py | ineelshah/GITS2.1-I.R.I.S | 12c4455ea55920e0de94a76f45b26e3e43cbcced | [
"MIT"
] | 20 | 2021-11-26T17:59:00.000Z | 2022-01-29T10:44:15.000Z | code/gits_version.py | jayrshah98/GITS2.1-I.R.I.S | 2891ba27b3309bbc7e8ff25ed221d3f1c78fb9d3 | [
"MIT"
] | 3 | 2021-11-28T21:48:50.000Z | 2022-01-05T15:44:06.000Z | import subprocess
from subprocess import PIPE
def gits_version(args):
try:
ver = list()
ver.append("git")
ver.append("--version")
process1 = subprocess.Popen(ver, stdout=PIPE, stderr=PIPE)
stdout, stderr = process1.communicate()
print(stdout.decode("UTF-8"))
except Exception as e:
print("ERROR: gits version command caught an exception")
print("ERROR: {}".format(str(e)))
return False
return True
| 24.3 | 66 | 0.615226 | 57 | 486 | 5.22807 | 0.614035 | 0.073826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008403 | 0.265432 | 486 | 19 | 67 | 25.578947 | 0.826331 | 0 | 0 | 0 | 0 | 0 | 0.150206 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.333333 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7af95ca39bf7740aabcc3f766de8108b97e29b27 | 921 | py | Python | sourcecode/src/vx/spix/Util.py | ivarvb/SPIX | 6c757b69c266f738d66164fa643a09f77721880d | [
"MIT"
] | null | null | null | sourcecode/src/vx/spix/Util.py | ivarvb/SPIX | 6c757b69c266f738d66164fa643a09f77721880d | [
"MIT"
] | null | null | null | sourcecode/src/vx/spix/Util.py | ivarvb/SPIX | 6c757b69c266f738d66164fa643a09f77721880d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import ujson
from datetime import datetime
class Util:
@staticmethod
def write(file, obj):
with open(file, "w") as filef:
filef.write(ujson.dumps(obj))
@staticmethod
def read(file):
data = {}
with open(file,"r") as filef:
data = (ujson.load(filef))
return data
@staticmethod
def now():
return datetime.now().strftime("%Y%m%d%H%M%S")
@staticmethod
def makedir(ndir):
if not os.path.exists(ndir):
os.makedirs(ndir)
@staticmethod
def splitname(filef):
base = os.path.basename(filef)
base = os.path.splitext(base)
#name = base[0]
return base
@staticmethod
def split(filef,separator):
return filef.split(separator) | 21.418605 | 54 | 0.591748 | 117 | 921 | 4.65812 | 0.504274 | 0.165138 | 0.044037 | 0.055046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004594 | 0.290988 | 921 | 43 | 55 | 21.418605 | 0.830015 | 0.061889 | 0 | 0.1875 | 0 | 0 | 0.016241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.1875 | 0.0625 | 0.53125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7afb55eccc74a4c6a98c9ac3dd757cd060175686 | 1,005 | py | Python | nmea_date_fix.py | hsur/NmeaDateFix | 4585dc9541bb2ae87dfc0160cc5006960813a0d3 | [
"BSD-2-Clause"
] | null | null | null | nmea_date_fix.py | hsur/NmeaDateFix | 4585dc9541bb2ae87dfc0160cc5006960813a0d3 | [
"BSD-2-Clause"
] | null | null | null | nmea_date_fix.py | hsur/NmeaDateFix | 4585dc9541bb2ae87dfc0160cc5006960813a0d3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# vim:fenc=utf-8 ff=unix ft=python ts=4 sw=4 sts=4 noet :
import os
import sys
import datetime
from pynmea2 import NMEASentence, ParseError
if len(sys.argv) != 2:
print("Usage: %s nmeafile.nmea" % sys.argv[0] )
sys.exit(1)
file_path = os.path.abspath(sys.argv[1])
tmp_path = os.path.dirname(file_path) + "/updated_" + os.path.basename(file_path)
start_datetime = None
with open(file_path) as f:
with open(tmp_path, mode='w',newline="\r\n") as t:
for line in f:
try:
nmea = NMEASentence.parse(line)
if hasattr(nmea, 'datestamp'):
nmea.datestamp = (nmea.datestamp + datetime.timedelta(weeks=1024)).strftime("%d%m%y")
if start_datetime == None:
start_datetime = nmea.datetime.strftime("%Y%m%d%H%M%S")
t.write(str(nmea))
t.write("\n")
#print(str(nmea))
except ParseError as e:
t.write(e.args[0][1])
#print(str(nmea))
os.rename(tmp_path, os.path.dirname(tmp_path) + "/%s.nma" % start_datetime)
| 29.558824 | 91 | 0.649751 | 163 | 1,005 | 3.92638 | 0.478528 | 0.05 | 0.046875 | 0.040625 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019536 | 0.185075 | 1,005 | 33 | 92 | 30.454545 | 0.761905 | 0.108458 | 0 | 0 | 0 | 0 | 0.084983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7afc056cd047372b424519a16da1dae8715cba34 | 1,103 | py | Python | coinkit/keyspace.py | mflaxman/coink | 8ce28ac4ff56e2320bf452d0559b83baf40b2b51 | [
"MIT"
] | 5 | 2017-09-06T11:59:50.000Z | 2019-02-17T21:02:47.000Z | coinkit/keyspace.py | shea256/coinkit | 81e86f4ea3dbf6622953c085016445fb4121fb44 | [
"MIT"
] | null | null | null | coinkit/keyspace.py | shea256/coinkit | 81e86f4ea3dbf6622953c085016445fb4121fb44 | [
"MIT"
] | 2 | 2015-01-23T03:10:25.000Z | 2021-11-18T01:58:31.000Z | # -*- coding: utf-8 -*-
"""
Coinkit
~~~~~
:copyright: (c) 2013 by Halfmoon Labs
:license: MIT, see LICENSE for more details.
"""
import re
def int_to_hex(i):
return re.sub(r'^0x|L$', '', hex(i))
def int_to_string(integer, keyspace_chars):
""" Turn a positive integer into a string. """
if not integer > 0:
raise ValueError('integer must be > 0')
output = ""
while integer > 0:
integer, digit = divmod(integer, len(keyspace_chars))
output += keyspace_chars[digit]
return output[::-1]
def string_to_int(string, keyspace_chars):
""" Turn a string into a positive integer. """
output = 0
for char in string:
output = output * len(keyspace_chars) + keyspace_chars.index(char)
return output
def change_keyspace(string, original_keyspace, target_keyspace):
""" Convert a string from one keyspace to another. """
assert isinstance(string, str)
intermediate_integer = string_to_int(string, original_keyspace)
output_string = int_to_string(intermediate_integer, target_keyspace)
return output_string | 29.810811 | 74 | 0.668178 | 145 | 1,103 | 4.910345 | 0.434483 | 0.109551 | 0.022472 | 0.050562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012717 | 0.215775 | 1,103 | 37 | 75 | 29.810811 | 0.810405 | 0.224841 | 0 | 0 | 0 | 0 | 0.030713 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.190476 | false | 0 | 0.047619 | 0.047619 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7afcbfcd3c3962e9a985188562c99110c3140f93 | 18,907 | py | Python | cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 571 | 2015-01-01T17:47:26.000Z | 2022-03-23T07:46:36.000Z | cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 37 | 2015-01-22T23:27:04.000Z | 2021-02-05T16:38:48.000Z | cinder/zonemanager/drivers/brocade/brcd_rest_fc_zone_client.py | lightsey/cinder | e03d68e42e57a63f8d0f3e177fb4287290612b24 | [
"Apache-2.0"
] | 841 | 2015-01-04T17:17:11.000Z | 2022-03-31T12:06:51.000Z | # (c) Copyright 2019 Brocade, a Broadcom Company
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Brocade south bound connector to communicate with switch using
REST over HTTP or HTTPS protocol.
"""
import json
from oslo_log import log as logging
from oslo_serialization import base64
import requests
import six
from cinder.i18n import _
from cinder.zonemanager.drivers.brocade import exception
from cinder.zonemanager.drivers.brocade import fc_zone_constants
from cinder.zonemanager.drivers.brocade import rest_constants
LOG = logging.getLogger(__name__)
class BrcdRestFCZoneClient(object):
def __init__(self, ipaddress, username,
password, port, vfid, protocol):
"""Initializing the client with the parameters passed.
:param ipaddress: IP Address of the device.
:param username: User id to login.
:param password: User password.
:param port: Device Communication port
:param vfid: Virtual Fabric ID.
:param protocol: Communication Protocol.
"""
self.sw_ip = ipaddress
self.sw_user = username
self.sw_pwd = password
self.protocol = protocol
self.vfid = vfid
self.status_code = ''
self.session = None
self._login()
def is_supported_firmware(self):
is_supported_firmware = False
fw_version = self._get_firmware_version()
ver = fw_version.split(".")
if len(ver[0]) > 1:
major_ver = ver[0]
ver[0] = major_ver[1]
if len(ver[2]) > 1:
patch_ver = ver[2]
ver[2] = patch_ver[0]
LOG.debug("Firmware version: %(version)s.", {'version': ver})
if int(ver[0] + ver[1] + ver[2]) > 820:
is_supported_firmware = True
return is_supported_firmware
def get_active_zone_set(self):
active_zone_set, checksum = self._get_effective_zone_set()
return active_zone_set
def get_nameserver_info(self):
return self._get_name_server()
def add_zones(self, add_zone_map, activate, active_zone_set=None):
self._add_zones(add_zone_map, activate)
def update_zones(self, update_zone_map, activate, operation,
active_zone_set=None):
self._update_zones(update_zone_map, activate, operation)
def delete_zones(self, zone_names_to_delete, activate,
active_zone_set=None):
self._delete_zones(zone_names_to_delete, activate)
def cleanup(self):
self._logout()
def _login(self):
if self.protocol == fc_zone_constants.REST_HTTPS:
self.protocol = fc_zone_constants.HTTPS
else:
self.protocol = fc_zone_constants.HTTP
if self.session is None:
self.session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=1,
pool_maxsize=1)
self.session.mount(self.protocol + '://', adapter)
credentials = base64.encode_as_text('%s:%s' % (self.sw_user,
self.sw_pwd)).replace('\n', '')
self.session.headers = {rest_constants.USER_AGENT:
rest_constants.ZONE_DRIVER,
rest_constants.ACCEPT: rest_constants.YANG,
rest_constants.AUTHORIZATION:
"Basic %s" % credentials}
response = self.session.post(self._build_url(rest_constants.LOGIN))
if response.status_code == 200:
auth = response.headers.get('Authorization')
LOG.info("REST login success, setting auth: %s", auth)
self.session.headers = {rest_constants.USER_AGENT:
rest_constants.ZONE_DRIVER,
rest_constants.ACCEPT: rest_constants.YANG,
rest_constants.CONTENT_TYPE:
rest_constants.YANG,
rest_constants.AUTHORIZATION: auth}
else:
msg = (_("REST login failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return response.status_code
def _logout(self):
response = self.session.post(self._build_url(rest_constants.LOGOUT))
if response.status_code == 204:
LOG.info("REST logout success")
else:
msg = (_("REST logout failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _get_firmware_version(self):
response = self.session.get(self._build_url(rest_constants.GET_SWITCH))
firmware_version = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
switch = json_response[rest_constants.SWITCH]
firmware_version = switch[rest_constants.FIRMWARE_VERSION]
LOG.info("REST firmware version: %s", firmware_version)
else:
msg = (_("REST get switch fw version failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return firmware_version
def _get_name_server(self):
port_names = []
url = self._build_url(rest_constants.GET_NAMESERVER)
response = self.session.get(url)
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
nsinfos = json_response[rest_constants.FC_NAME_SERVER]
i = 0
for nsinfo in nsinfos:
port_names.append(nsinfos[i][rest_constants.PORT_NAME])
i = i + 1
else:
msg = (_("REST get NS info failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return port_names
def _get_effective_zone_set(self):
active_zone_set = {}
zones_map = {}
url = self._build_url(rest_constants.GET_ACTIVE_ZONE_CFG)
response = self.session.get(url)
checksum = ''
active_cfg_name = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
effective_cfg = json_response[rest_constants.EFFECTIVE_CFG]
checksum = effective_cfg[rest_constants.CHECKSUM]
try:
active_cfg_name = effective_cfg[rest_constants.CFG_NAME]
zones = effective_cfg[rest_constants.ENABLED_ZONE]
if type(zones) is list:
for i, zone in enumerate(zones):
zones_map.update({zones[i][rest_constants.ZONE_NAME]:
zones[i][rest_constants.MEMBER_ENTRY]
[rest_constants.ENTRY_NAME]})
else:
zones_map.update({zones[rest_constants.ZONE_NAME]:
zones[rest_constants.MEMBER_ENTRY]
[rest_constants.ENTRY_NAME]})
except Exception:
active_cfg_name = ''
LOG.info("REST get effective zoneset success: "
"active cfg: %(cfg_name)s, checksum: %(chksum)s",
{'cfg_name': active_cfg_name, 'chksum': checksum})
else:
msg = (_("REST get effective zoneset failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
active_zone_set = {"active_zone_config": active_cfg_name,
"zones": zones_map}
return active_zone_set, checksum
def _add_zones(self, add_zone_map, activate):
active_zone_set, checksum = self._get_effective_zone_set()
# if activate, get the zones already configured in the active cfg
if activate:
zones_in_active_cfg = active_zone_set.get("zones")
# for each new zone, create a zone entry in defined zone db
for zone_name, members in add_zone_map.items():
if zone_name not in zones_in_active_cfg:
body = {rest_constants.MEMBER_ENTRY:
{rest_constants.ENTRY_NAME:
add_zone_map.get(zone_name)}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.POST_ZONE + zone_name)
response = self.session.post(url, data=json_str)
if response.status_code == 201:
LOG.info("REST create zone success: %s", zone_name)
else:
msg = (_("REST create zone failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# update the cfg with the new zones
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
active_zone_names = active_zones.keys()
active_zone_names.extend(add_zone_map.keys())
body = {rest_constants.MEMBER_ZONE:
{rest_constants.ZONE_NAME: active_zone_names}}
json_str = json.dumps(body)
if active_cfg_name == '':
active_cfg_name = fc_zone_constants.CFG_NAME
url = self._build_url(rest_constants.POST_CFG + active_cfg_name)
response = self.session.post(url, data=json_str)
if response.status_code == 201:
LOG.info("REST cfg create success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg create failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
else:
url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name)
response = self.session.patch(url, data=json_str)
# if update successful, save the configuration changes
if response.status_code == 204:
LOG.info("REST cfg update success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _update_zones(self, update_zone_map, activate, operation):
active_zone_set, checksum = self._get_effective_zone_set()
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
# for each zone, update the zone members in defined zone db
for zone_name, members in update_zone_map.items():
current_members = active_zones.get(zone_name)
if operation == "ADD":
new_members = set(members).difference(set(current_members))
if new_members:
update_zone_map.update({zone_name: new_members})
elif operation == "REMOVE":
new_members = set(current_members).difference(set(members))
if new_members:
update_zone_map.update({zone_name: new_members})
# for each zone to be updated, make REST PATCH call to update
for zone in update_zone_map.keys():
body = {rest_constants.MEMBER_ENTRY:
{rest_constants.ENTRY_NAME: update_zone_map.get(zone)}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.POST_ZONE + zone)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST zone update success: %s", zone)
else:
msg = (_("REST zone update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# save and activate the config changes
self._save_and_activate_cfg(checksum, activate, active_cfg_name)
def _delete_zones(self, zone_names_to_delete, activate):
zone_names_to_delete = zone_names_to_delete.split(";")
active_zone_set, checksum = self._get_effective_zone_set()
# for each zone name, make REST DELETE call
for zone in zone_names_to_delete:
url = self._build_url(rest_constants.DELETE_ZONE + zone)
response = self.session.delete(url)
if response.status_code == 204:
LOG.info("REST delete zone success: %s", zone)
else:
msg = (_("REST delete zone failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# update the cfg removing the deleted zones
active_cfg_name = active_zone_set.get("active_zone_config")
active_zones = active_zone_set.get("zones")
active_zone_names = active_zones.keys()
if len(active_zone_names) == len(zone_names_to_delete):
# disable the cfg
url = self._build_url(rest_constants.PATCH_CFG_DISABLE)
body = {"checksum": checksum}
json_str = json.dumps(body)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg disable success")
else:
msg = (_("REST cfg disable failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# delete the cfg
url = self._build_url(rest_constants.DELETE_CFG + active_cfg_name)
response = self.session.delete(url)
if response.status_code == 204:
LOG.info("REST cfg delete success: %s", active_cfg_name)
else:
msg = (_("REST cfg delete failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
checksum = self._get_checksum()
self._save_and_activate_cfg(checksum, False, active_cfg_name)
else:
# update the cfg by removing the deleted zones
zone_names_in_cfg = list(set(active_zone_names)
.difference(set(zone_names_to_delete)))
body = {rest_constants.MEMBER_ZONE:
{rest_constants.ZONE_NAME: zone_names_in_cfg}}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG + active_cfg_name)
response = self.session.patch(url, data=json_str)
# if update successful, save the configuration changes
if response.status_code == 204:
LOG.info("REST cfg update success: %s", active_cfg_name)
self._save_and_activate_cfg(checksum, activate,
active_cfg_name)
else:
msg = (_("REST cfg update failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _save_and_activate_cfg(self, checksum, activate, active_cfg_name):
body = {"checksum": checksum}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG_SAVE)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg save success")
else:
msg = (_("REST cfg save failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
# if activate=true, then enable the cfg changes to effective cfg
if activate:
checksum = self._get_checksum()
body = {"checksum": checksum}
json_str = json.dumps(body)
url = self._build_url(rest_constants.PATCH_CFG_ENABLE
+ active_cfg_name)
response = self.session.patch(url, data=json_str)
if response.status_code == 204:
LOG.info("REST cfg activate success: %s", active_cfg_name)
else:
msg = (_("REST cfg activate failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
def _get_checksum(self):
url = self._build_url(rest_constants.GET_CHECKSUM)
response = self.session.get(url)
checksum = ''
if response.status_code == 200:
data = response.json()
json_response = data[rest_constants.RESPONSE]
effective_cfg = json_response[rest_constants.EFFECTIVE_CFG]
checksum = effective_cfg[rest_constants.CHECKSUM]
LOG.info("REST get checksum success: %s", checksum)
else:
msg = (_("REST get checksum failed: %s")
% six.text_type(response.text))
LOG.error(msg)
raise exception.BrocadeZoningRestException(reason=msg)
return checksum
def _build_url(self, path):
url = '%s://%s%s' % (self.protocol, self.sw_ip, path)
if self.vfid is not None:
url = '%s?vf-id=%s' % (url, self.vfid)
return url
| 45.779661 | 79 | 0.587825 | 2,140 | 18,907 | 4.93271 | 0.11729 | 0.070197 | 0.03202 | 0.024252 | 0.60648 | 0.566029 | 0.526904 | 0.503221 | 0.480296 | 0.41673 | 0 | 0.006436 | 0.326123 | 18,907 | 412 | 80 | 45.890777 | 0.82207 | 0.086582 | 0 | 0.491279 | 0 | 0 | 0.065634 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055233 | false | 0.005814 | 0.026163 | 0.002907 | 0.110465 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb00617057f78b6193060f769f38f73ce61320ea | 1,232 | py | Python | eas_lookup/__init__.py | SFDigitalServices/address-microservice-fn-py | 37bba144df2cc5a95822ba79c90de48c9dc7beb1 | [
"MIT"
] | null | null | null | eas_lookup/__init__.py | SFDigitalServices/address-microservice-fn-py | 37bba144df2cc5a95822ba79c90de48c9dc7beb1 | [
"MIT"
] | null | null | null | eas_lookup/__init__.py | SFDigitalServices/address-microservice-fn-py | 37bba144df2cc5a95822ba79c90de48c9dc7beb1 | [
"MIT"
] | null | null | null | """ eas/lookup init file """
import os
import json
import logging
import requests
import azure.functions as func
from shared_code.common import func_json_response
def main(req: func.HttpRequest) -> func.HttpResponse:
""" main function for eas/lookup """
logging.info('EAS Lookup processed a request.')
try:
params = req.params.copy()
if params['search'] :
params['$where'] = \
"address like upper('{}%') AND parcel_number IS NOT NULL"\
.format(params['search'])
del params['search']
response = requests.get(
os.getenv('EAS_API_URL'),
params=params,
headers={'X-App-Token': os.getenv('EAS_APP_TOKEN')}
)
headers = {
"Cache-Control": "s-maxage=1, stale-while-revalidate, max-age={}"\
.format(os.getenv('EAS_CACHE_MAX_AGE')),
"Access-Control-Allow-Origin": "*"
}
return func_json_response(response, headers)
#pylint: disable=broad-except
except Exception as err:
logging.error("EAS Lookup error occurred: %s", err)
return func.HttpResponse(f"This endpoint encountered an error. {err}", status_code=500)
| 30.8 | 95 | 0.602273 | 145 | 1,232 | 5.02069 | 0.572414 | 0.049451 | 0.04533 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004444 | 0.269481 | 1,232 | 39 | 96 | 31.589744 | 0.804444 | 0.064935 | 0 | 0 | 0 | 0 | 0.28007 | 0.043898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.206897 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb006a911bddb51da85b674ee0ef5ef33b604be7 | 2,228 | py | Python | recipes-bsp/polyos-setup/polyos-setup/polyaudio.py | PolyVection/meta-polyvection | a24fb91c4144e4d6e5fcaa73f456f805e30b751b | [
"MIT"
] | null | null | null | recipes-bsp/polyos-setup/polyos-setup/polyaudio.py | PolyVection/meta-polyvection | a24fb91c4144e4d6e5fcaa73f456f805e30b751b | [
"MIT"
] | null | null | null | recipes-bsp/polyos-setup/polyos-setup/polyaudio.py | PolyVection/meta-polyvection | a24fb91c4144e4d6e5fcaa73f456f805e30b751b | [
"MIT"
] | 1 | 2018-04-13T22:32:38.000Z | 2018-04-13T22:32:38.000Z | #!/usr/bin/python3
# Copyright (c) 2017, PolyVection UG.
#
# Based on configure-edison, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# aplay -l | awk -F \: '/,/{print $2}' | awk '{print $1}' | uniq
import os
import sys
from sys import stdout
import time
import termios
import fcntl
import subprocess
import polyterminal
def selectSPDIF():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card imxspdif\n")
f.write("}\n")
f.close()
def selectLINE():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card pcm5121\n")
f.write("}\n")
f.close()
def selectAMP1():
f = open("/mnt/data/settings/audio/alsa/asound.conf", 'w')
f.write("ctl.!default {\n")
f.write("type hw\n")
f.write("card is31ap2121\n")
f.write("}\n")
f.write("pcm.!default {\n")
f.write("type hw\n")
f.write("card is31ap2121\n")
f.write("}\n")
f.close()
def chooseFTS():
polyterminal.reset("PolyOS - Audio Setup")
print("")
print("Please select the audio output:")
print("-----------------------------------------")
print("")
print("0 -\t TOSLINK \t(ZERO)")
print("1 -\t ANALOG \t(ZERO)")
print("2 -\t AMPLIFIER\t(AMP1)")
print("")
user = input("Enter either 0 or 1 to configure audio output: ")
if user == "0":
selectSPDIF()
if user == "1":
selectLINE()
if user == "2":
selectAMP1()
else:
selectSPDIF()
| 27.170732 | 76 | 0.606373 | 325 | 2,228 | 4.156923 | 0.387692 | 0.106588 | 0.108808 | 0.062176 | 0.399704 | 0.399704 | 0.352332 | 0.327905 | 0.327905 | 0.327905 | 0 | 0.024915 | 0.207361 | 2,228 | 81 | 77 | 27.506173 | 0.740091 | 0.270197 | 0 | 0.566667 | 0 | 0 | 0.368715 | 0.1018 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.2 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb015fa5e2c4d8430f9e89af39073bf40a1f5fc0 | 6,168 | py | Python | 3dcdrl/create_rollout_videos.py | NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA | 8e0a405589a9b0bd3bd543dda72bf2325ebc9126 | [
"MIT"
] | null | null | null | 3dcdrl/create_rollout_videos.py | NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA | 8e0a405589a9b0bd3bd543dda72bf2325ebc9126 | [
"MIT"
] | null | null | null | 3dcdrl/create_rollout_videos.py | NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA | 8e0a405589a9b0bd3bd543dda72bf2325ebc9126 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 09:19:33 2018
@author: anonymous
"""
import os
import torch
import numpy as np
from arguments import parse_a2c_args
from multi_env import MultiEnv
from models import CNNPolicy
from a2c_agent import *
from utils import initialize_logging
from doom_environment import DoomEnvironment
import cv2
import pickle
from moviepy.editor import ImageSequenceClip
from PIL import Image
def batch_from_obs(obs, batch_size=32):
"""Converts an obs (C,H,W) to a batch (B,C,H,W) of given size"""
if isinstance(obs, torch.Tensor):
if len(obs.shape)==3:
obs = obs.unsqueeze(0)
return obs.repeat(batch_size, 1, 1, 1)
if len(obs.shape)==3:
obs = np.expand_dims(obs, axis=0)
return np.repeat(obs, repeats=batch_size, axis=0)
def make_movie(policy, env, filename, args, n_runs=50, use_tta=False,
use_rot=False, use_gray=False, name='', view=None, txt_pos=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
time_taken = []
losses = []
for i in range(n_runs):
if use_tta:
if use_rot:
path = 'policy.pth.tar'
else:
path='policy_TTA_GRAY.pth.tar'
checkpoint = torch.load('tta_models/'+path, map_location=device)
policy = CNNPolicy((3,64,112), args).to(device)
policy.load_state_dict(checkpoint['model'])
policy.eval()
if use_rot or use_gray:
tta_agent = TTAAgent(use_rot=use_rot,obs_shape=(3,64,112), hidden_size=128)
tta_agent.load()
tta_agent.copy_conv_weights(policy.conv_head)
state = torch.zeros(1, args.hidden_size)
mask = torch.ones(1,1)
obss = []
pos_list = []
obs = env.reset().astype(np.float32)
done = False
while not done:
#Gamma correction
obs = 255*np.power(obs/255.0, args.gamma_val)
#Inverse image
if args.inverse:
obs = 255 - obs
obss.append(obs)
with torch.no_grad():
result = policy(torch.from_numpy(obs).unsqueeze(0), state, mask)
action = result['actions']
state = result['states']
obs, reward, done, _ = env.step(action.item())
if view != None and txt_pos != None:
x, y, _ = env.get_player_position()
pos_list.append([x, y])
if use_tta and (use_rot or use_gray):
batch_next_obs = batch_from_obs(torch.Tensor(obs).to(device), batch_size=16)
# Adapt using rotation prediction
losses.append(tta_agent.update_tta(batch_next_obs))
obs = obs.astype(np.float32)
time_taken.append(len(obss)/int(30/args.frame_skip))
if use_tta:
if use_rot:
tta_type='rotation'
elif use_gray:
tta_type='grayscale'
else:
tta_type = 'tta_OFF'
else:
tta_type='baseline'
pickle.dump(time_taken, open(f'TTA_videos/{tta_type}/{name}.pkl', 'wb'))
print(len(obss))
print(f'Average time taken: {np.mean(time_taken):.2f}s')
print(f'TTA mean loss: {np.mean(losses):.3f}')
observations = [o.transpose(1,2,0) for o in obss]
clip = ImageSequenceClip(observations, fps=int(30/args.frame_skip))
clip.write_videofile(filename)
if view != None and txt_pos != None:
# saving the view of the agent and the position
# of the last run
pos_txt = open(txt_pos, "w+")
for p in pos_list:
pos_txt.write("%d,%d\r\n" % (p[0], p[1]))
pos_txt.close()
for c, o in enumerate(observations):
im = Image.fromarray(o.astype(np.uint8))
fig_name = str(c) + ".png"
im.save(view + fig_name)
def evaluate_saved_model():
args = parse_a2c_args()
USE_TTA = args.use_tta
USE_ROT = args.use_rot
USE_GRAY = args.use_gray
exp_name = args.experiment_name
SV_VW_POS = args.save_view_position
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
obs_shape = (3, args.screen_height, args.screen_width)
policy = CNNPolicy(obs_shape, args).to(device)
#Load Agent
if USE_TTA:
if USE_ROT:
path = 'policy.pth.tar'
else:
path='policy_TTA_GRAY.pth.tar'
checkpoint = torch.load('tta_models/'+path, map_location=device)
else:
path = 'saved_models/labyrinth_9_checkpoint_0198658048.pth.tar'
checkpoint = torch.load(path, map_location=device)
policy.load_state_dict(checkpoint['model'])
policy.eval()
assert args.model_checkpoint, 'No model checkpoint found'
assert os.path.isfile(args.model_checkpoint), 'The model could not be loaded'
for i in range(args.num_mazes_test):
#env = MultiEnv(args.simulator, args.num_environments, args, is_train=True)
env = DoomEnvironment(args, idx=i, is_train=True, use_shaping=args.use_shaping, fixed_scenario=True)
name='False'
if USE_TTA:
if USE_ROT:
tta_type='rotation'
elif USE_GRAY:
tta_type='grayscale'
else:
tta_type = 'tta_OFF'
else:
tta_type = 'baseline'
print(tta_type)
if SV_VW_POS:
view_name = f'map_creation/TTA_view/{tta_type}/'
txt_pos_track_name = f'map_creation/TTA_position/{tta_type}/{exp_name}.txt'
print('Saving view and positions of the agent.')
else:
view_name = None
txt_pos_track_name = None
movie_name = f'TTA_videos/{tta_type}/{exp_name}.mp4'
print('Creating movie {}'.format(movie_name))
make_movie(policy, env, movie_name, args, n_runs=100,
use_tta=USE_TTA, use_rot=USE_ROT, use_gray=USE_GRAY, name=exp_name, view=view_name, txt_pos=txt_pos_track_name)
if __name__ == '__main__':
evaluate_saved_model()
| 33.521739 | 130 | 0.600681 | 850 | 6,168 | 4.137647 | 0.296471 | 0.022178 | 0.011373 | 0.011373 | 0.246801 | 0.194484 | 0.184817 | 0.171737 | 0.171737 | 0.143304 | 0 | 0.02116 | 0.287451 | 6,168 | 183 | 131 | 33.704918 | 0.779067 | 0.059176 | 0 | 0.289855 | 0 | 0 | 0.108094 | 0.051712 | 0 | 0 | 0 | 0 | 0.014493 | 1 | 0.021739 | false | 0 | 0.094203 | 0 | 0.130435 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb021a30b50060269f34e2949685cc9a7971c175 | 6,706 | py | Python | napi/tests.py | abakan-zz/napi | 314da65bd78e2c716b7efb6deaf3816d8f38f7fd | [
"MIT"
] | null | null | null | napi/tests.py | abakan-zz/napi | 314da65bd78e2c716b7efb6deaf3816d8f38f7fd | [
"MIT"
] | 1 | 2015-08-03T00:41:59.000Z | 2015-08-07T06:37:26.000Z | napi/tests.py | abakan/napi | 314da65bd78e2c716b7efb6deaf3816d8f38f7fd | [
"MIT"
] | null | null | null | from nose.tools import raises
import numpy as np
from napi import neval
from napi.transformers import NapiTransformer, LazyTransformer
from napi.transformers import short_circuit_and
TRANSFORMERS = [NapiTransformer]#, LazyTransformer]
randbools = lambda *n: np.random.randn(*n) < 0
def short_circuit_and_(arrays, shape):
assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0))
def short_circuit_or_(arrays, shape):
assert np.all(short_circuit_and(list(arrays), shape) == np.all(arrays, 0))
def test_short_circuit_and():
for func in [short_circuit_and_, short_circuit_or_]:
for shape in [(10,), (10, 3), (1, 10, 1, 4)]:
yield func, [randbools(*shape), randbools(*shape),
randbools(*shape)], shape
def check_napi_magic_configuration(func, line):
assert func(line) is None
def test_napi_magic_configuration():
from napi.magics import NapiMagics
magic = NapiMagics(None)
magic._remove = magic._append = lambda: None
func = magic.napi
for line in ['', '', 'on', 'off', '1', '0', 'sq', 'sq', 'sc', 'sc',
'sq on', 'sq off', 'sq 1', 'sq 0',
'sc 0', 'sc 10000']:
yield check_napi_magic_configuration, func, line
def check_logicops_of_python_types(source, debug=False, trans=None):
result, expect = neval(source, debug=debug, transformer=trans), eval(source)
assert result == expect, '{} != {}'.format(result, expect)
def test_logicops_of_python_types(debug=False):
for t in TRANSFORMERS:
for src in [
'1 and True', 'True and 1', '[] and True', 'True and []', '1 and [1]',
'0 or True', 'False or 1', '[] or True', 'True or []', 'False or [1]',
'True and [1] and 1 and {1: 1}',
'True and [1] and 1 and {1: 1} and {}',
'True and [1] and 0 or {} and {1: 1}',]:
yield check_logicops_of_python_types, src, debug, t
def check_logicops_of_arrays(source, expect, ns, debug=False, sc=10000):
result = neval(source, ns, debug=debug)
assert np.all(result == expect), '{} != {}'.format(result, expect)
def test_logicops_of_arrays(debug=False):
a = np.arange(10)
b = randbools(10)
bo = np.ones(10, bool)
bz = np.zeros(10, bool)
ns = locals()
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('b and b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a or b or b', np.logical_or(a, b)),
('not a', np.logical_not(a)),
('not b', np.logical_not(b)),
('a and not a', bz),
('b and not b', bz),
('b and True', b),
('(a or b) and False', bz),]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_array_squeezing(debug=False):
b = randbools(10)
b2d = randbools(1, 10)
b3d = randbools(1, 10, 1)
b5d = randbools(2, 1, 5, 1, 10)
b6d = randbools(1, 2, 1, 5, 1, 10, 1)
ns = locals()
for src, res in [
('b or b2d', np.logical_or(b, b2d.squeeze())),
('b or b2d and b3d', np.logical_or(b,
np.logical_and(b2d.squeeze(), b3d.squeeze()))),
('b5d and b6d', np.logical_and(b5d.squeeze(), b6d.squeeze())),
]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_logicops_with_arithmetics_and_comparisons(debug=False):
a = np.arange(10)
b = randbools(10)
ns = locals()
for src, res in [
('a >= 0 and a + 1', np.logical_and(a >= 0, a + 1)),
('-a <= 0 and a**2 + 1', np.logical_and(-a <= 0, a**2 + 1)),
('---a - 1 <= 0 and b', np.logical_and(---a - 1<= 0, b)),
]:
yield check_logicops_of_arrays, src, res, ns, debug
def test_short_circuiting(debug=False):
arr = [randbools(10000) for i in range(5)]
a, b, c, d, e = arr
ns = locals()
for sc in (False, 10000):
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a and b and c and d and e', np.all(arr, 0)),
('a or b or c or d or e', np.any(arr, 0)),
('a and b or c or d and e',
np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug, sc
def test_multidim_short_circuiting(debug=False):
arr = [randbools(10, 100, 10) for i in range(5)]
a, b, c, d, e = arr
ns = locals()
for sc in (False, 10000):
for src, res in [
('a and a', np.logical_and(a, a)),
('b and b', np.logical_and(b, b)),
('a and b', np.logical_and(a, b)),
('a or a', np.logical_or(a, a)),
('b or b', np.logical_or(b, b)),
('a or b', np.logical_or(a, b)),
('a and b and c and d and e', np.all(arr, 0)),
('a or b or c or d or e', np.any(arr, 0)),
('a and b or c or d and e',
np.any([np.logical_and(a, b), c, np.logical_and(d, e)], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug, sc
def test_comparison_chaining(debug=False):
"""`a < b < c < d`"""
a = np.arange(10) - 4
b, c, d = a * 2, a * 3, a * 4
ns = locals()
for src, res in [
('a < b < c < d', np.all([a < b, b < c, c < d], 0)),
('a == b == c == d', np.all([a == b, b == c, c == d], 0)),
('0 == a == 0 == b', np.all([a == 0, b == 0,], 0)),
]:
yield check_logicops_of_arrays, src, res, ns, debug
@raises(ValueError)
def check_array_problems(source, ns, debug=False):
neval(source, ns, debug=debug)
def test_array_problems(debug=False):
a5 = randbools(5)
a9 = randbools(9)
a9by5 = randbools(9, 5)
ns = locals()
for src in [
'a5 and a9',
'a9 or a5',
'a9 or a9by5',
]:
yield check_array_problems, src, ns, debug
@raises(NameError)
def test_name_problem(debug=False):
neval('a and b', {}, debug=debug)
'''
def test_or_not(debug=False):
a = booleans(10)
assert all(eval('a or not a', locals(), debug=debug) ==
any([a, invert(a)], 0))
def test_equal(debug=False):
a = arange(10)
assert all(eval('a == 1 and a', locals(), debug=debug) ==
all([a == 1, a], 0))
''' | 27.710744 | 82 | 0.53892 | 1,061 | 6,706 | 3.287465 | 0.111216 | 0.085149 | 0.065367 | 0.040998 | 0.502007 | 0.461009 | 0.409117 | 0.372133 | 0.372133 | 0.316227 | 0 | 0.036967 | 0.290039 | 6,706 | 242 | 83 | 27.710744 | 0.695652 | 0.00507 | 0 | 0.423841 | 0 | 0 | 0.121598 | 0 | 0 | 0 | 0 | 0 | 0.033113 | 1 | 0.112583 | false | 0 | 0.039735 | 0 | 0.152318 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb037b1a6312e7d97680adcc0f7b31cd176631f1 | 8,621 | py | Python | src/api/impl/review.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | src/api/impl/review.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | src/api/impl/review.py | fekblom/critic | a6b60c9053e13d4c878d50531860d7389568626d | [
"Apache-2.0"
] | null | null | null | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2014 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import api
import api.impl.filters
class Review(object):
def __init__(self, review_id, repository_id, branch_id, state, summary,
description):
self.id = review_id
self.__repository_id = repository_id
self.__branch_id = branch_id
self.state = state
self.summary = summary
self.description = description
self.__owners_ids = None
self.__reviewers_ids = None
self.__watchers_ids = None
self.__filters = None
self.__commits = None
self.__rebases = None
def getRepository(self, critic):
return api.repository.fetch(critic, repository_id=self.__repository_id)
def getBranch(self, critic):
return api.branch.fetch(critic, branch_id=self.__branch_id)
def __fetchOwners(self, critic):
if self.__owners_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid
FROM reviewusers
WHERE review=%s
AND owner""",
(self.id,))
self.__owners_ids = frozenset(user_id for (user_id,) in cursor)
def getOwners(self, critic):
self.__fetchOwners(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__owners_ids)
def __fetchReviewers(self, critic):
if self.__reviewers_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT DISTINCT uid
FROM reviewuserfiles
JOIN reviewfiles ON (reviewfiles.id=reviewuserfiles.file)
WHERE reviewfiles.review=%s""",
(self.id,))
assigned_reviewers = frozenset(user_id for (user_id,) in cursor)
cursor.execute("""SELECT DISTINCT uid
FROM reviewfilechanges
JOIN reviewfiles ON (reviewfiles.id=reviewfilechanges.file)
WHERE reviewfiles.review=%s""",
(self.id,))
actual_reviewers = frozenset(user_id for (user_id,) in cursor)
self.__reviewers_ids = assigned_reviewers | actual_reviewers
def getReviewers(self, critic):
self.__fetchReviewers(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__reviewers_ids)
def __fetchWatchers(self, critic):
if self.__watchers_ids is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid
FROM reviewusers
WHERE review=%s""",
(self.id,))
associated_users = frozenset(user_id for (user_id,) in cursor)
self.__fetchOwners(critic)
self.__fetchReviewers(critic)
non_watchers = self.__owners_ids | self.__reviewers_ids
self.__watchers_ids = associated_users - non_watchers
def getWatchers(self, critic):
self.__fetchWatchers(critic)
return frozenset(api.user.fetch(critic, user_id=user_id)
for user_id in self.__watchers_ids)
def getFilters(self, critic):
if self.__filters is None:
cursor = critic.getDatabaseCursor()
cursor.execute("""SELECT uid, type, path, id, review, creator
FROM reviewfilters
WHERE review=%s""",
(self.id,))
impls = [api.impl.filters.ReviewFilter(*row) for row in cursor]
self.__filters = [api.filters.ReviewFilter(critic, impl)
for impl in impls]
return self.__filters
def getCommits(self, critic):
if self.__commits is None:
cursor = critic.getDatabaseCursor()
# Direct changesets: no merges, no rebase changes.
cursor.execute(
"""SELECT DISTINCT commits.id, commits.sha1
FROM commits
JOIN changesets ON (changesets.child=commits.id)
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
WHERE reviewchangesets.review=%s
AND changesets.type='direct'""",
(self.id,))
commit_ids_sha1s = set(cursor)
# Merge changesets, excluding those added by move rebases.
cursor.execute(
"""SELECT DISTINCT commits.id, commits.sha1
FROM commits
JOIN changesets ON (changesets.child=commits.id)
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
LEFT OUTER JOIN reviewrebases ON (reviewrebases.review=%s
AND reviewrebases.equivalent_merge=commits.id)
WHERE reviewchangesets.review=%s
AND changesets.type='merge'
AND reviewrebases.id IS NULL""",
(self.id, self.id))
commit_ids_sha1s.update(cursor)
repository = self.getRepository(critic)
commits = [api.commit.fetch(repository, commit_id, sha1)
for commit_id, sha1 in commit_ids_sha1s]
self.__commits = api.commitset.create(critic, commits)
return self.__commits
def getRebases(self, wrapper):
return api.log.rebase.fetchAll(wrapper.critic, wrapper)
def wrap(self, critic):
return api.review.Review(critic, self)
def make(critic, args):
for (review_id, repository_id, branch_id,
state, summary, description) in args:
def callback():
return Review(review_id, repository_id, branch_id,
state, summary, description).wrap(critic)
yield critic._impl.cached(api.review.Review, review_id, callback)
def fetch(critic, review_id, branch):
cursor = critic.getDatabaseCursor()
if review_id is not None:
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE reviews.id=%s""",
(review_id,))
else:
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE branches.id=%s""",
(int(branch),))
row = cursor.fetchone()
if not row:
if review_id is not None:
raise api.review.InvalidReviewId(review_id)
else:
raise api.review.InvalidReviewBranch(branch)
return next(make(critic, [row]))
def fetchAll(critic, repository, state):
cursor = critic.getDatabaseCursor()
conditions = ["TRUE"]
values = []
if repository is not None:
conditions.append("branches.repository=%s")
values.append(repository.id)
if state is not None:
conditions.append("reviews.state IN (%s)"
% ", ".join(["%s"] * len(state)))
values.extend(state)
cursor.execute("""SELECT reviews.id, branches.repository, branches.id,
state, summary, description
FROM reviews
JOIN branches ON (branches.id=reviews.branch)
WHERE """ + " AND ".join(conditions) + """
ORDER BY reviews.id""",
values)
return list(make(critic, cursor))
| 43.540404 | 91 | 0.570931 | 884 | 8,621 | 5.401584 | 0.211538 | 0.021361 | 0.039791 | 0.019058 | 0.409843 | 0.366073 | 0.347853 | 0.334031 | 0.308063 | 0.226597 | 0 | 0.002834 | 0.345088 | 8,621 | 197 | 92 | 43.761421 | 0.842898 | 0.083981 | 0 | 0.32 | 0 | 0 | 0.248283 | 0.032055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113333 | false | 0 | 0.013333 | 0.033333 | 0.213333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb03c2e16d10a68817b5577a1dbdeba0377e167e | 8,833 | py | Python | runner/runner/main.py | BinalModi/reproserver | 2c1f86b67ba57473b507217a3289d92697a09665 | [
"BSD-3-Clause"
] | null | null | null | runner/runner/main.py | BinalModi/reproserver | 2c1f86b67ba57473b507217a3289d92697a09665 | [
"BSD-3-Clause"
] | null | null | null | runner/runner/main.py | BinalModi/reproserver | 2c1f86b67ba57473b507217a3289d92697a09665 | [
"BSD-3-Clause"
] | null | null | null | from common import database
from common import TaskQueues, get_object_store
from common.utils import setup_logging, shell_escape
from hashlib import sha256
import logging
import os
import shutil
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import functions
import subprocess
import tempfile
SQLSession = None
object_store = None
# IP as understood by Docker daemon, not this container
DOCKER_REGISTRY = os.environ.get('REGISTRY', 'localhost:5000')
def run_cmd_and_log(session, run_id, cmd):
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.stdin.close()
for line in iter(proc.stdout.readline, ''):
logging.info("> %s", line)
session.add(database.RunLogLine(
run_id=run_id,
line=line.rstrip()))
session.commit()
return proc.wait()
def run_request(channel, method, _properties, body):
"""Process a run task.
Lookup a run in the database, get the input files from S3, then do the run
from the Docker image, upload the log and the output files.
"""
logging.info("Run request received: %r", body)
# Look up the run in the database
session = SQLSession()
exp = joinedload(database.Run.experiment)
run = (session.query(database.Run)
.options(joinedload(database.Run.parameter_values),
joinedload(database.Run.input_files),
exp.joinedload(database.Experiment.parameters),
exp.joinedload(database.Experiment.paths))
.get(int(body)))
if not run:
logging.error("Got a run request but couldn't get the run from the "
"database (body=%r)", body)
# ACK anyway
channel.basic_ack(delivery_tag=method.delivery_tag)
return
# Update status in database
if run.started:
logging.warning("Starting run which has already been started")
else:
run.started = functions.now()
session.commit()
# Remove previous info
run.log[:] = []
run.output_files[:] = []
def set_error(msg):
logging.warning("Got error: %s", msg)
run.done = functions.now()
session.add(database.RunLogLine(run_id=run.id, line=msg))
session.commit()
channel.basic_ack(delivery_tag=method.delivery_tag)
if run.experiment.status != database.Status.BUILT:
return set_error("Experiment to run is not BUILT")
# Make build directory
directory = tempfile.mkdtemp('build_%s' % run.experiment_hash)
container = None
fq_image_name = '%s/%s' % (DOCKER_REGISTRY, run.experiment.docker_image)
try:
# Get list of parameters
params = {}
params_unset = set()
for param in run.experiment.parameters:
if not param.optional:
params_unset.add(param.name)
params[param.name] = param.default
# Get parameter values
for param in run.parameter_values:
if param.name in params:
logging.info("Param: %s=%r", param.name, param.value)
params[param.name] = param.value
params_unset.discard(param.name)
else:
return set_error("Got parameter value for parameter %s which "
"does not exist" % param.name)
if params_unset:
return set_error("Missing value for parameters: %s" %
", ".join(params_unset))
# Get paths
paths = {}
for path in run.experiment.paths:
paths[path.name] = path.path
# Get input files
inputs = []
for input_file in run.input_files:
if input_file.name not in paths:
return set_error("Got an unknown input file %s" %
input_file.name)
inputs.append((input_file,
paths[input_file.name]))
logging.info("Using %d input files: %s", len(inputs),
", ".join(f.name for f, p in inputs))
# Create container
container = 'run_%s' % body
logging.info("Creating container %s with image %s",
container, run.experiment.docker_image)
# Turn parameters into a command-line
cmdline = []
for k, v in params.iteritems():
if k.startswith('cmdline_'):
i = k[8:]
cmdline.extend(['cmd', v, 'run', i])
cmdline = ['docker', 'create', '-i', '--name', container,
'--', fq_image_name] + cmdline
logging.info('$ %s', ' '.join(shell_escape(a) for a in cmdline))
subprocess.check_call(cmdline)
for input_file, path in inputs:
local_path = os.path.join(directory, 'input_%s' % input_file.hash)
# Download file from S3
logging.info("Downloading input file: %s, %s, %d bytes",
input_file.name, input_file.hash, input_file.size)
object_store.download_file('inputs', input_file.hash, local_path)
# Put file in container
logging.info("Copying file to container")
subprocess.check_call(['docker', 'cp', '--',
local_path,
'%s:%s' % (container, path)])
# Remove local file
os.remove(local_path)
# Start container using parameters
logging.info("Starting container")
try:
ret = run_cmd_and_log(session, run.id,
['docker', 'start', '-ai', '--', container])
except IOError:
return set_error("Got IOError running experiment")
if ret != 0:
return set_error("Error: Docker returned %d" % ret)
run.done = functions.now()
# Get output files
for path in run.experiment.paths:
if path.is_output:
local_path = os.path.join(directory, 'output_%s' % path.name)
# Copy file out of container
logging.info("Getting output file %s", path.name)
ret = subprocess.call(['docker', 'cp', '--',
'%s:%s' % (container, path.path),
local_path])
if ret != 0:
logging.warning("Couldn't get output %s", path.name)
session.add(database.RunLogLine(
run_id=run.id,
line="Couldn't get output %s" % path.name))
continue
with open(local_path, 'rb') as fp:
# Hash it
hasher = sha256()
chunk = fp.read(4096)
while chunk:
hasher.update(chunk)
chunk = fp.read(4096)
filehash = hasher.hexdigest()
# Rewind it
filesize = fp.tell()
fp.seek(0, 0)
# Upload file to S3
logging.info("Uploading file, size: %d bytes" % filesize)
object_store.upload_fileobj('outputs', filehash, fp)
# Add OutputFile to database
run.output_files.append(
database.OutputFile(hash=filehash, name=path.name,
size=filesize))
# Remove local file
os.remove(local_path)
# ACK
session.commit()
channel.basic_ack(delivery_tag=method.delivery_tag)
logging.info("Done!")
except Exception:
logging.exception("Error processing run!")
if True:
set_error("Internal error!")
else:
# Set database status back to QUEUED
run.status = database.Status.QUEUED
session.commit()
# NACK the task in RabbitMQ
channel.basic_nack(delivery_tag=method.delivery_tag)
finally:
# Remove container if created
if container is not None:
subprocess.call(['docker', 'rm', '-f', '--', container])
# Remove image
subprocess.call(['docker', 'rmi', '--', fq_image_name])
# Remove build directory
shutil.rmtree(directory)
def main():
setup_logging('REPROSERVER-RUNNER')
# SQL database
global SQLSession
engine, SQLSession = database.connect()
# AMQP
tasks = TaskQueues()
# Object storage
global object_store
object_store = get_object_store()
logging.info("Ready, listening for requests")
tasks.consume_run_tasks(run_request)
| 35.051587 | 78 | 0.551681 | 975 | 8,833 | 4.895385 | 0.246154 | 0.02996 | 0.017599 | 0.020951 | 0.132202 | 0.11586 | 0.092814 | 0.058873 | 0.049864 | 0.023465 | 0 | 0.004518 | 0.348466 | 8,833 | 251 | 79 | 35.191235 | 0.824848 | 0.093513 | 0 | 0.143678 | 0 | 0 | 0.11072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022989 | false | 0 | 0.063218 | 0 | 0.132184 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb085c85e7782e1a789b1bec7a2c6a71e68ad8d9 | 4,574 | py | Python | tests/parser_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | 1 | 2021-04-01T20:22:36.000Z | 2021-04-01T20:22:36.000Z | tests/parser_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | 1 | 2020-11-20T22:24:38.000Z | 2020-11-20T22:24:38.000Z | tests/parser_test.py | OtavioHenrique/yalul | ce99e32365ed5607527b9f2f39705ad5d9e20ba2 | [
"MIT"
] | null | null | null | from yalul.parser import Parser
from yalul.parsers.ast.nodes.statements.expressions.binary import Binary
from yalul.lex.token import Token
from yalul.lex.token_type import TokenType
from yalul.parsers.ast.nodes.statements.expressions.values.integer import Integer
class TestParserBinary:
"""Test parser generating binary operations expressions"""
def test_parser_run_generates_correct_ast_complex_binary_expression_with_multi_precedence(self):
"""
Validates if parser is generating a correct AST to a binary expressions with multi precedence, like 39 * 2 + 42
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, "*"),
Token(TokenType.INTEGER, 2),
Token(TokenType.SUM, '+'),
Token(TokenType.INTEGER, 42),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
asts = parser_response.ast.statements
ast = asts[0]
assert type(ast) is Binary
assert ast.operator.type is TokenType.SUM
assert type(ast.left) is Binary
assert ast.left.operator.type is TokenType.MULTIPLY
assert type(ast.left.left) is Integer
assert ast.left.left.value == 39
assert type(ast.left.right) is Integer
assert ast.left.right.value == 2
assert type(ast.right) is Integer
assert ast.right.value == 42
class TestParserGenerateErrors:
"""Test parser generating correct parser errors"""
def test_parser_run_generates_correct_parser_errors(self):
"""
Validates if parser is generating a correct parser errors
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.LEFT_PAREN, 'Left Paren'),
Token(TokenType.INTEGER, 41),
Token(TokenType.SUM, '+'),
Token(TokenType.INTEGER, 1),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expected a RIGHT PAREN ) after expression'
class TestParserGenerateUnfinishedExpressionErrors:
"""Test parser generating correct parser errors"""
def test_parse_run_generates_correct_error_unfinished_expression(self):
"""
Validates if parser if generating correct error to unfinished expressions
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.INTEGER, 41),
Token(TokenType.SUM, '+'),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect Expression after TokenType.SUM, Value: +'
class TestParserGenerateUnopenedOperatorError:
"""Test parser generating correct parser errors"""
def test_parse_run_generates_correct_error_unopened_operators_right_paren(self):
"""
Validates if parser if generating correct error to unopened operators
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.RIGHT_PAREN, ')'),
Token(TokenType.INTEGER, 41),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect a open operator for TokenType.RIGHT_PAREN, Value: )'
def test_parse_run_generates_correct_error_unopened_operators_right_brace(self):
"""
Validates if parser if generating correct error to unopened operators
"""
tokens = [
Token(TokenType.INTEGER, 39),
Token(TokenType.MULTIPLY, '*'),
Token(TokenType.RIGHT_BRACE, '}'),
Token(TokenType.INTEGER, 41),
Token(TokenType.END_STATEMENT, 'End of Statement'),
Token(TokenType.EOF, 'End of File')
]
parser_response = Parser(tokens).parse()
errors = parser_response.errors()
assert errors[0] == 'Expect a open operator for TokenType.RIGHT_BRACE, Value: }'
| 34.134328 | 119 | 0.636205 | 496 | 4,574 | 5.739919 | 0.149194 | 0.162276 | 0.088514 | 0.036881 | 0.700386 | 0.675097 | 0.639269 | 0.604145 | 0.559185 | 0.517035 | 0 | 0.011022 | 0.266069 | 4,574 | 133 | 120 | 34.390977 | 0.837057 | 0.124836 | 0 | 0.487805 | 0 | 0 | 0.092837 | 0.011378 | 0 | 0 | 0 | 0 | 0.170732 | 1 | 0.060976 | false | 0 | 0.060976 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0a2166fba41078615dd96cf7ca319832633515 | 4,036 | py | Python | tests/core/daemon/test_daemon_alerts.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | 1 | 2021-05-28T01:38:23.000Z | 2021-05-28T01:38:23.000Z | tests/core/daemon/test_daemon_alerts.py | hoffmang9/chia-blockchain | edc5f1dfe57ecd81d00b5ba4477024309b1231de | [
"Apache-2.0"
] | null | null | null | tests/core/daemon/test_daemon_alerts.py | hoffmang9/chia-blockchain | edc5f1dfe57ecd81d00b5ba4477024309b1231de | [
"Apache-2.0"
] | null | null | null | import dataclasses
import pytest
from blspy import PrivateKey
from src.server.outbound_message import NodeType
from src.types.peer_info import PeerInfo
from src.util.block_tools import BlockTools
from src.util.hash import std_hash
from src.util.ints import uint16
from src.util.validate_alert import create_alert_file, create_not_ready_alert_file
from tests.core.full_node.test_full_sync import node_height_at_least
from tests.setup_nodes import self_hostname, setup_daemon, setup_full_system
from tests.simulation.test_simulation import test_constants_modified
from tests.time_out_assert import time_out_assert, time_out_assert_custom_interval
from tests.util.alert_server import AlertServer
no_genesis = dataclasses.replace(test_constants_modified, GENESIS_CHALLENGE=None)
b_tools = BlockTools(constants=no_genesis)
b_tools_1 = BlockTools(constants=no_genesis)
master_int = 5399117110774477986698372024995405256382522670366369834617409486544348441851
master_sk: PrivateKey = PrivateKey.from_bytes(master_int.to_bytes(32, "big"))
pubkey_alert = bytes(master_sk.get_g1()).hex()
alert_url = "http://127.0.0.1:59000/status"
new_config = b_tools._config
new_config["CHIA_ALERTS_PUBKEY"] = pubkey_alert
new_config["ALERTS_URL"] = alert_url
new_config["daemon_port"] = 55401
new_config["network_overrides"]["constants"][new_config["selected_network"]]["GENESIS_CHALLENGE"] = None
b_tools.change_config(new_config)
new_config_1 = b_tools_1._config
new_config_1["CHIA_ALERTS_PUBKEY"] = pubkey_alert
new_config_1["ALERTS_URL"] = alert_url
new_config_1["daemon_port"] = 55402
new_config_1["network_overrides"]["constants"][new_config_1["selected_network"]]["GENESIS_CHALLENGE"] = None
b_tools_1.change_config(new_config_1)
class TestDaemonAlerts:
@pytest.fixture(scope="function")
async def get_daemon(self):
async for _ in setup_daemon(btools=b_tools):
yield _
@pytest.fixture(scope="function")
async def get_daemon_1(self):
async for _ in setup_daemon(btools=b_tools_1):
yield _
@pytest.fixture(scope="function")
async def simulation(self):
async for _ in setup_full_system(b_tools_1.constants, b_tools=b_tools, b_tools_1=b_tools_1):
yield _
@pytest.mark.asyncio
async def test_daemon_alert_simulation(self, simulation, get_daemon, get_daemon_1):
node1, node2, _, _, _, _, _, _, _, server1 = simulation
await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
daemon = get_daemon
daemon_1 = get_daemon_1
alert_file_path = daemon.root_path / "alert.txt"
alert_server = await AlertServer.create_alert_server(alert_file_path, 59000)
create_not_ready_alert_file(alert_file_path, master_sk)
await alert_server.run()
selected = daemon.net_config["selected_network"]
async def num_connections():
count = len(node2.server.connection_by_type[NodeType.FULL_NODE].items())
return count
await time_out_assert_custom_interval(60, 1, num_connections, 1)
preimage = "This is test preimage!"
expected_genesis = std_hash(bytes(preimage, "utf-8")).hex()
alert_file_path.unlink()
create_alert_file(alert_file_path, master_sk, "This is test preimage!")
def check_genesis(expected):
deamon_updated = (
daemon.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected
)
deamon_1_updated = (
daemon_1.net_config["network_overrides"]["constants"][selected]["GENESIS_CHALLENGE"] == expected
)
return deamon_updated and deamon_1_updated
await time_out_assert(15, check_genesis, True, expected_genesis)
def check_initialized():
return node1.full_node.initialized is True and node2.full_node.initialized is True
await time_out_assert(15, check_initialized, True)
await time_out_assert(1500, node_height_at_least, True, node2, 7)
| 39.960396 | 112 | 0.745045 | 544 | 4,036 | 5.143382 | 0.257353 | 0.045032 | 0.032523 | 0.025733 | 0.338456 | 0.233024 | 0.196569 | 0.10436 | 0.073624 | 0 | 0 | 0.046657 | 0.166254 | 4,036 | 100 | 113 | 40.36 | 0.784844 | 0 | 0 | 0.076923 | 0 | 0 | 0.102081 | 0 | 0 | 0 | 0 | 0 | 0.064103 | 1 | 0.025641 | false | 0 | 0.179487 | 0.012821 | 0.25641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0b659ef2cc14f0a9cf674d122a6ce8c529a3af | 8,196 | py | Python | words.py | GuyKabiri/language-models | 63b368aaee74ec077abcc8f66f0c8fbd4c712a07 | [
"MIT"
] | 41 | 2018-08-25T14:00:46.000Z | 2022-01-20T05:21:03.000Z | words.py | GuyKabiri/language-models | 63b368aaee74ec077abcc8f66f0c8fbd4c712a07 | [
"MIT"
] | 5 | 2018-07-06T11:34:22.000Z | 2021-11-15T20:32:34.000Z | words.py | GuyKabiri/language-models | 63b368aaee74ec077abcc8f66f0c8fbd4c712a07 | [
"MIT"
] | 17 | 2018-07-24T19:40:19.000Z | 2022-01-25T09:14:11.000Z | import keras
import keras.backend as K
from keras.datasets import imdb
from keras.layers import LSTM, Embedding, TimeDistributed, Input, Dense
from keras.models import Model
from tensorflow.python.client import device_lib
from tqdm import tqdm
import os, random
from argparse import ArgumentParser
import numpy as np
from tensorboardX import SummaryWriter
import util
CHECK = 5
def generate_seq(model : Model, seed, size, temperature=1.0):
"""
:param model: The complete RNN language model
:param seed: The first few wordas of the sequence to start generating from
:param size: The total size of the sequence to generate
:param temperature: This controls how much we follow the probabilities provided by the network. For t=1.0 we just
sample directly according to the probabilities. Lower temperatures make the high-probability words more likely
(providing more likely, but slightly boring sentences) and higher temperatures make the lower probabilities more
likely (resulting is weirder sentences). For temperature=0.0, the generation is _greedy_, i.e. the word with the
highest probability is always chosen.
:return: A list of integers representing a samples sentence
"""
ls = seed.shape[0]
# Due to the way Keras RNNs work, we feed the model a complete sequence each time. At first it's just the seed,
# zero-padded to the right length. With each iteration we sample and set the next character.
tokens = np.concatenate([seed, np.zeros(size - ls)])
for i in range(ls, size):
probs = model.predict(tokens[None,:])
# Extract the i-th probability vector and sample an index from it
next_token = util.sample_logits(probs[0, i-1, :], temperature=temperature)
tokens[i] = next_token
return [int(t) for t in tokens]
def sparse_loss(y_true, y_pred):
return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
def go(options):
tbw = SummaryWriter(log_dir=options.tb_dir)
if options.seed < 0:
seed = random.randint(0, 1000000)
print('random seed: ', seed)
np.random.seed(seed)
else:
np.random.seed(options.seed)
if options.task == 'wikisimple':
x, w21, i2w = \
util.load_words(util.DIR + '/datasets/wikisimple.txt', vocab_size=options.top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
numwords = len(i2w)
print('max sequence length ', x_max_len)
print(numwords, 'distinct words')
x = util.batch_pad(x, options.batch, add_eos=True)
elif options.task == 'file':
x, w21, i2w = \
util.load_words(options.data_dir, vocab_size=options.top_words, limit=options.limit)
# Finding the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x])
numwords = len(i2w)
print('max sequence length ', x_max_len)
print(numwords, 'distinct words')
x = util.batch_pad(x, options.batch, add_eos=True)
else:
raise Exception('Task {} not recognized.'.format(options.task))
def decode(seq):
return ' '.join(i2w[id] for id in seq)
print('Finished data loading. ', sum([b.shape[0] for b in x]), ' sentences loaded')
## Define model
input = Input(shape=(None, ))
embedding = Embedding(numwords, options.lstm_capacity, input_length=None)
embedded = embedding(input)
decoder_lstm = LSTM(options.lstm_capacity, return_sequences=True)
h = decoder_lstm(embedded)
if options.extra is not None:
for _ in range(options.extra):
h = LSTM(options.lstm_capacity, return_sequences=True)(h)
fromhidden = Dense(numwords, activation='linear')
out = TimeDistributed(fromhidden)(h)
model = Model(input, out)
opt = keras.optimizers.Adam(lr=options.lr)
lss = sparse_loss
model.compile(opt, lss)
model.summary()
## Training
#- Since we have a variable batch size, we make our own training loop, and train with
# model.train_on_batch(...). It's a little more verbose, but it gives us more control.
epoch = 0
instances_seen = 0
while epoch < options.epochs:
for batch in tqdm(x):
n, l = batch.shape
batch_shifted = np.concatenate([np.ones((n, 1)), batch], axis=1) # prepend start symbol
batch_out = np.concatenate([batch, np.zeros((n, 1))], axis=1) # append pad symbol
loss = model.train_on_batch(batch_shifted, batch_out[:, :, None])
instances_seen += n
tbw.add_scalar('lm/batch-loss', float(loss), instances_seen)
epoch += 1
# Show samples for some sentences from random batches
for temp in [0.0, 0.9, 1, 1.1, 1.2]:
print('### TEMP ', temp)
for i in range(CHECK):
b = random.choice(x)
if b.shape[1] > 20:
seed = b[0,:20]
else:
seed = b[0, :]
seed = np.insert(seed, 0, 1)
gen = generate_seq(model, seed, 60, temperature=temp)
print('*** [', decode(seed), '] ', decode(gen[len(seed):]))
if __name__ == "__main__":
## Parse the command line options
parser = ArgumentParser()
parser.add_argument("-e", "--epochs",
dest="epochs",
help="Number of epochs.",
default=20, type=int)
parser.add_argument("-E", "--embedding-size",
dest="embedding_size",
help="Size of the word embeddings on the input layer.",
default=300, type=int)
parser.add_argument("-o", "--output-every",
dest="out_every",
help="Output every n epochs.",
default=1, type=int)
parser.add_argument("-l", "--learn-rate",
dest="lr",
help="Learning rate",
default=0.001, type=float)
parser.add_argument("-b", "--batch-size",
dest="batch",
help="Batch size",
default=128, type=int)
parser.add_argument("-t", "--task",
dest="task",
help="Task",
default='wikisimple', type=str)
parser.add_argument("-D", "--data-directory",
dest="data",
help="Data file. Should contain one sentence per line.",
default='./data', type=str)
parser.add_argument("-L", "--lstm-hidden-size",
dest="lstm_capacity",
help="LSTM capacity",
default=256, type=int)
parser.add_argument("-m", "--max_length",
dest="max_length",
help="Max length",
default=None, type=int)
parser.add_argument("-w", "--top_words",
dest="top_words",
help="Top words",
default=10000, type=int)
parser.add_argument("-I", "--limit",
dest="limit",
help="Character cap for the corpus",
default=None, type=int)
parser.add_argument("-T", "--tb-directory",
dest="tb_dir",
help="Tensorboard directory",
default='./runs/words', type=str)
parser.add_argument("-r", "--random-seed",
dest="seed",
help="RNG seed. Negative for random (seed is printed for reproducability).",
default=-1, type=int)
parser.add_argument("-x", "--extra-layers",
dest="extra",
help="Number of extra LSTM layers.",
default=None, type=int)
options = parser.parse_args()
print('OPTIONS', options)
go(options) | 33.453061 | 120 | 0.571986 | 1,001 | 8,196 | 4.593407 | 0.298701 | 0.027403 | 0.051762 | 0.031318 | 0.193127 | 0.15659 | 0.142236 | 0.113093 | 0.094389 | 0.094389 | 0 | 0.013934 | 0.316984 | 8,196 | 245 | 121 | 33.453061 | 0.807431 | 0.171669 | 0 | 0.12 | 0 | 0 | 0.133195 | 0.003568 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.08 | 0.013333 | 0.126667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0cafc42dfe07c8f9f0ade59b4efb5599bbec9d | 979 | py | Python | synthdnm/backend.py | james-guevara/synthdnm | 8510cfd91438452da553d35894b63c5d75cdd47e | [
"MIT"
] | 6 | 2021-02-22T08:29:49.000Z | 2021-11-26T21:24:49.000Z | build/lib/synthdnm/backend.py | james-guevara/synthdnm | 8510cfd91438452da553d35894b63c5d75cdd47e | [
"MIT"
] | 1 | 2021-10-04T19:22:34.000Z | 2021-11-16T21:22:12.000Z | synthdnm/backend.py | james-guevara/synthdnm | 8510cfd91438452da553d35894b63c5d75cdd47e | [
"MIT"
] | 1 | 2020-11-06T18:57:57.000Z | 2020-11-06T18:57:57.000Z | import re
# Splits a line by spaces or tabs, returns a list object
def tokenize(line):
linesplit = line.rstrip().split("\t")
if len(linesplit) == 1: linesplit = line.rstrip().split(" ")
else: return linesplit
# Returns dictionary containing information of pedigree file
def process_ped(fam_filepath):
ped_dictionary = {} # ped_dictionary[iid] = (fid, iid, father_iid, mother_iid, sex, phen)
with open(fam_filepath) as f:
for line in f:
linesplit = tokenize(line)
fid,iid,father_iid,mother_iid,sex,phen = linesplit[0],linesplit[1],linesplit[2],linesplit[3],linesplit[4],linesplit[5]
if sex != "1" and sex != "2": continue
if father_iid == "0" and mother_iid == "0": continue
ped_dictionary[iid] = (fid,iid,father_iid,mother_iid,sex,phen)
return ped_dictionary
# Convert spaces to tabs
def tabbit(line):
linesplit = re.split("\s+", line.rstrip())
return "\t".join(linesplit[1:])
| 40.791667 | 130 | 0.657814 | 138 | 979 | 4.557971 | 0.413043 | 0.082671 | 0.057234 | 0.071542 | 0.198728 | 0.198728 | 0.198728 | 0.198728 | 0.149444 | 0.149444 | 0 | 0.015484 | 0.208376 | 979 | 23 | 131 | 42.565217 | 0.796129 | 0.208376 | 0 | 0 | 0 | 0 | 0.015584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.055556 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0cd1bc006214dc6eb22b166f68a1596ea8baca | 1,925 | py | Python | ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py | dattaray-basab/RLGames | b12263fe7a4a246be02fc20ed20cfb9fda40d29b | [
"MIT"
] | null | null | null | ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py | dattaray-basab/RLGames | b12263fe7a4a246be02fc20ed20cfb9fda40d29b | [
"MIT"
] | null | null | null | ws/RLAgents/B_ValueBased/Bootstrapping/OnPolicy/sarsa/impl_mgt.py | dattaray-basab/RLGames | b12263fe7a4a246be02fc20ed20cfb9fda40d29b | [
"MIT"
] | null | null | null | from ws.RLAgents.B_ValueBased.Bootstrapping.qtable_mgt import qtable_mgt
def impl_mgt(app_info):
_env = app_info.ENV
Display = app_info.ENV.Display
fn_get_qval, fn_set_qval, fn_get_q_actions, fn_get_max_q_actions = qtable_mgt()
def _fn_update_knowledge(state, action, reward, next_state, next_action):
current_q = fn_get_qval(state, action)
next_state_q = fn_get_qval(next_state, next_action)
new_q = (current_q + app_info.LEARNING_RATE *
(reward + app_info.DISCOUNT_FACTOR * next_state_q - current_q))
fn_set_qval(state, action, new_q)
def fn_run_sarsa():
episode_num = 0
while True:
episode_num += 1
episode_status = _fn_run_episode(Display.fn_move_cursor)
print('episode number: {} status = {}'.format(episode_num, episode_status))
if 'TEST_MODE' in app_info:
if app_info.TEST_MODE: # ONLY 1 episode needed
break
pass
def _fn_run_episode(fn_move_cursor):
new_state = None
state = _env.fn_reset_env()
action = fn_get_max_q_actions(state, app_info.EPSILON)
Display.fn_update_qvalue(state, fn_get_q_actions(state))
continue_running = True
while continue_running:
new_state, reward, done, _ = _env.fn_take_step(action)
continue_running = reward == 0
if fn_move_cursor is not None:
fn_move_cursor(state, new_state)
new_action = fn_get_max_q_actions(new_state, app_info.EPSILON)
_fn_update_knowledge(state, action, reward, new_state, new_action)
Display.fn_update_qvalue(state, fn_get_q_actions(state))
action = new_action
state = new_state
if fn_move_cursor is not None:
fn_move_cursor(new_state)
return continue_running
return fn_run_sarsa
| 37.019231 | 89 | 0.652987 | 266 | 1,925 | 4.270677 | 0.255639 | 0.055458 | 0.06338 | 0.034331 | 0.276408 | 0.237676 | 0.139085 | 0.139085 | 0.139085 | 0.139085 | 0 | 0.002869 | 0.275844 | 1,925 | 51 | 90 | 37.745098 | 0.812052 | 0.010909 | 0 | 0.097561 | 0 | 0 | 0.021556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0.02439 | 0.02439 | 0 | 0.170732 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0e76409aeef486ffdd393f481e6ae875e7c355 | 7,338 | py | Python | test/pytest/test_model_archiver.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | 2 | 2022-03-26T05:17:45.000Z | 2022-03-26T05:44:53.000Z | test/pytest/test_model_archiver.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | null | null | null | test/pytest/test_model_archiver.py | vvekic/serve | f02a56bf1f0de1705fd9f399c1115d36e343c90c | [
"Apache-2.0"
] | 1 | 2020-09-14T08:31:34.000Z | 2020-09-14T08:31:34.000Z | import subprocess
import time
import os
import glob
import requests
import json
import test_utils
MODEL_SFILE_NAME = 'resnet18-f37072fd.pth'
def setup_module(module):
test_utils.torchserve_cleanup()
response = requests.get('https://download.pytorch.org/models/' + MODEL_SFILE_NAME, allow_redirects=True)
open(test_utils.MODEL_STORE + "/" + MODEL_SFILE_NAME, 'wb').write(response.content)
def teardown_module(module):
test_utils.torchserve_cleanup()
def model_archiver_command_builder(model_name=None, version=None, model_file=None, serialized_file=None, handler=None, extra_files=None, force=False):
cmd = "torch-model-archiver"
if model_name:
cmd += " --model-name {0}".format(model_name)
if version:
cmd += " --version {0}".format(version)
if model_file:
cmd += " --model-file {0}".format(model_file)
if serialized_file:
cmd += " --serialized-file {0}".format(serialized_file)
if handler:
cmd += " --handler {0}".format(handler)
if extra_files:
cmd += " --extra-files {0}".format(extra_files)
if force:
cmd += " --force"
cmd += " --export-path {0}".format(test_utils.MODEL_STORE)
return cmd
def create_resnet_archive(model_name="resnset-18", version="1.0", force=False):
cmd = model_archiver_command_builder(
model_name,
version,
"{}/examples/image_classifier/resnet_18/model.py".format(test_utils.CODEBUILD_WD),
"{}resnet18-f37072fd.pth".format(test_utils.MODEL_STORE),
"image_classifier",
"{}/examples/image_classifier/index_to_name.json".format(test_utils.CODEBUILD_WD),
force
)
print(cmd)
cmd = cmd.split(" ")
return subprocess.run(cmd).returncode
def clean_mar_file(mar_name):
path = "{}{}".format(test_utils.MODEL_STORE, mar_name)
if os.path.exists(path):
os.remove(path)
def test_multiple_model_versions_registration():
# Download resnet-18 model
create_resnet_archive("resnet-18", "1.0")
create_resnet_archive("resnet-18_v2", "2.0")
test_utils.start_torchserve(no_config_snapshots=True)
response = requests.get('http://localhost:8081/models/resnet18/all')
print(response.content)
test_utils.register_model("resnet18", "resnet-18.mar")
test_utils.register_model("resnet18", "resnet-18_v2.mar")
response = requests.get('http://localhost:8081/models/resnet18/all')
time.sleep(5)
# Verify that we can use the list models api to get all versions of resnet-18
assert len(json.loads(response.content)) == 2
def test_duplicate_model_registration_using_local_url_followed_by_http_url():
# Registration through local mar url is already complete in previous test case.
# Now try to register same model using http url in this next step
response = test_utils.register_model("resnet18", "https://torchserve.pytorch.org/mar_files/resnet-18.mar")
time.sleep(15)
if json.loads(response.content)['code'] == 500 and \
json.loads(response.content)['type'] == "InternalServerException":
assert True, "Internal Server Exception, " \
"Model file already exists!! Duplicate model registration request"
test_utils.unregister_model("resnet18")
time.sleep(10)
else:
assert False, "Something is not right!! Successfully re-registered existing model "
def test_duplicate_model_registration_using_http_url_followed_by_local_url():
# Register using http url
clean_mar_file("resnet-18.mar")
response = test_utils.register_model("resnet18", "https://torchserve.pytorch.org/mar_files/resnet-18.mar")
create_resnet_archive()
response = test_utils.register_model("resnet18", "resnet-18.mar")
if json.loads(response.content)['code'] == 409 and \
json.loads(response.content)['type'] == "ConflictStatusException":
assert True, "Conflict Status Exception, " \
"Duplicate model registration request"
response = test_utils.unregister_model("resnet18")
time.sleep(10)
else:
assert False, "Something is not right!! Successfully re-registered existing model "
def test_model_archiver_to_regenerate_model_mar_without_force():
clean_mar_file("resnet-18.mar")
response = create_resnet_archive("resnet-18", "1.0")
response = create_resnet_archive("resnet-18", "1.0")
try:
assert (0 != response), "Mar file couldn't be created.use -f option"
finally:
for f in glob.glob("resnet*.mar"):
os.remove(f)
def test_model_archiver_to_regenerate_model_mar_with_force():
clean_mar_file("resnet-18.mar")
response = create_resnet_archive("resnet-18", "1.0")
response = create_resnet_archive("resnet-18", "1.0", force=True)
try:
assert (0 == response), "Successfully created Mar file by using -f option"
finally:
for f in glob.glob("resnet*.mar"):
os.remove(f)
def test_model_archiver_without_handler_flag():
cmd = model_archiver_command_builder(
"resnet-18",
"1.0",
"{}/examples/image_classifier/resnet_18/model.py".format(test_utils.CODEBUILD_WD),
"{}/resnet18-f37072fd.pth".format(test_utils.MODEL_STORE),
None,
"{}/examples/image_classifier/index_to_name.json".format(test_utils.CODEBUILD_WD)
)
cmd = cmd.split(" ")
try:
assert (0 != subprocess.run(cmd).returncode), "Mar file couldn't be created." \
"No handler specified"
finally:
for f in glob.glob("resnet*.mar"):
os.remove(f)
def test_model_archiver_without_model_name_flag():
cmd = model_archiver_command_builder(
None,
"1.0",
"{}/examples/image_classifier/resnet_18/model.py".format(test_utils.CODEBUILD_WD),
"{}/resnet18-f37072fd.pth".format(test_utils.MODEL_STORE),
"image_classifier",
"{}/examples/image_classifier/index_to_name.json".format(test_utils.CODEBUILD_WD)
)
cmd = cmd.split(" ")
assert (0 != subprocess.run(cmd).returncode), "Mar file couldn't be created." \
"No model_name specified"
def test_model_archiver_without_model_file_flag():
cmd = model_archiver_command_builder(
"resnet-18",
"1.0",
None,
"{}/resnet18-f37072fd.pth".format(test_utils.MODEL_STORE),
"image_classifier",
"{}/examples/image_classifier/index_to_name.json".format(test_utils.CODEBUILD_WD),
True
)
cmd = cmd.split(" ")
try:
assert (0 == subprocess.run(cmd).returncode)
finally:
for f in glob.glob("resnet*.mar"):
os.remove(f)
def test_model_archiver_without_serialized_flag():
cmd = model_archiver_command_builder(
"resnet-18",
"1.0",
"{}/examples/image_classifier/resnet_18/model.py".format(test_utils.CODEBUILD_WD),
None,
"image_classifier",
"{}/examples/image_classifier/index_to_name.json".format(test_utils.CODEBUILD_WD)
)
cmd = cmd.split(" ")
assert (0 != subprocess.run(cmd).returncode), "Mar file couldn't be created." \
"No serialized flag specified"
| 34.613208 | 150 | 0.661897 | 924 | 7,338 | 5.022727 | 0.175325 | 0.052359 | 0.048481 | 0.046542 | 0.632622 | 0.615385 | 0.520362 | 0.50097 | 0.471881 | 0.449041 | 0 | 0.027725 | 0.213546 | 7,338 | 211 | 151 | 34.777251 | 0.776469 | 0.03625 | 0 | 0.468354 | 0 | 0 | 0.268186 | 0.082791 | 0 | 0 | 0 | 0 | 0.06962 | 1 | 0.088608 | false | 0 | 0.044304 | 0 | 0.14557 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb0fd7461ac334e4ca39e6b98dfce5e7a36f1733 | 2,176 | py | Python | scripts/isuag/zero_daily_precip.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | 1 | 2019-10-07T17:01:24.000Z | 2019-10-07T17:01:24.000Z | scripts/isuag/zero_daily_precip.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | scripts/isuag/zero_daily_precip.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """Sometimes we need to completely zero out precip for a day
Likely due to water being dumped into the tipping bucket to clean it :/
"""
from __future__ import print_function
import sys
import datetime
import pytz
from pyiem.util import get_dbconn
def zero_hourly(station, sts, ets):
"""Zero out the hourly data"""
pgconn = get_dbconn('isuag')
cursor = pgconn.cursor()
for table in ['sm_hourly', 'sm_15minute']:
cursor.execute("""
UPDATE """ + table + """
SET rain_mm_tot_qc = 0, rain_mm_tot_f = 'Z', rain_mm_tot = 0
WHERE station = %s and valid > %s and valid <= %s
""", (station, sts, ets))
print("%s updated %s rows" % (table, cursor.rowcount))
cursor.close()
pgconn.commit()
def zero_daily(station, date):
"""Zero out the daily data"""
pgconn = get_dbconn('isuag')
cursor = pgconn.cursor()
cursor.execute("""
UPDATE sm_daily
SET rain_mm_tot_qc = 0, rain_mm_tot_f = 'Z', rain_mm_tot = 0
WHERE station = %s and valid = %s
""", (station, date))
print("sm_daily updated %s rows" % (cursor.rowcount, ))
cursor.close()
pgconn.commit()
def zero_iem(station, date):
"""Zero out the hourly data"""
pgconn = get_dbconn('iem')
cursor = pgconn.cursor()
cursor.execute("""
UPDATE summary s
SET pday = 0
FROM stations t
WHERE s.iemid = t.iemid and t.id = %s and t.network = 'ISUSM'
and day = %s
""", (station, date))
print("summary updated %s rows" % (cursor.rowcount, ))
cursor.close()
pgconn.commit()
def main(argv):
"""Go Main"""
station = argv[1]
date = datetime.date(int(argv[2]), int(argv[3]), int(argv[4]))
# Our weather stations are in CST, so the 'daily' precip is for a 6z to 6z
# period and not calendar day, the hourly values are in the rears
sts = datetime.datetime(date.year, date.month, date.day, 6)
sts = sts.replace(tzinfo=pytz.utc)
ets = sts + datetime.timedelta(hours=24)
zero_hourly(station, sts, ets)
zero_daily(station, date)
zero_iem(station, date)
if __name__ == '__main__':
main(sys.argv)
| 29.405405 | 78 | 0.619485 | 312 | 2,176 | 4.182692 | 0.346154 | 0.027586 | 0.041379 | 0.043678 | 0.451341 | 0.390038 | 0.308812 | 0.308812 | 0.168582 | 0.168582 | 0 | 0.009792 | 0.249081 | 2,176 | 73 | 79 | 29.808219 | 0.788862 | 0.160846 | 0 | 0.346154 | 0 | 0.019231 | 0.320356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.096154 | 0 | 0.173077 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb123dfd4bea8d7fa36ad8f1991ad5d0085d8387 | 15,843 | py | Python | Fibonnacci GUI VERSION.py | Nuklear-s-Team/fibonacciThing | 2b32f10a87faf87b6702e7caa20c69d8df6873e3 | [
"CC0-1.0"
] | null | null | null | Fibonnacci GUI VERSION.py | Nuklear-s-Team/fibonacciThing | 2b32f10a87faf87b6702e7caa20c69d8df6873e3 | [
"CC0-1.0"
] | null | null | null | Fibonnacci GUI VERSION.py | Nuklear-s-Team/fibonacciThing | 2b32f10a87faf87b6702e7caa20c69d8df6873e3 | [
"CC0-1.0"
] | 1 | 2020-04-08T18:32:54.000Z | 2020-04-08T18:32:54.000Z | # Copyright 2020 Khang Nguyen and Tim Merrill
# Improved by Luzgog
# Made with the same translate function as the original.
import tkinter as tkinter
from tkinter import messagebox
from tkinter import scrolledtext
from Fibonacci_Encoder import encode, decode, encodeReversed, decodeReversed, randomGen, encodeRandom, generatefromkey, decodeRandom
root = tkinter.Tk(className="Fibonacci Encoder")
root.resizable(0, 0)
help = tkinter.Toplevel(width=90, height=90)
help.withdraw()
translations = tkinter.Toplevel(width=90, height=90)
translations.withdraw()
lol = tkinter.StringVar()
mode = tkinter.StringVar()
lastTranslation = tkinter.StringVar()
remTrue = tkinter.IntVar()
key = tkinter.StringVar()
lastTranslation.set("Last Tranlsation: None")
currentTranslation = ""
#root.iconphoto(False, tkinter.PhotoImage(file="icon.png"))
decodeNames = ["de", "decode", "De", "Decode", "d", "D"]
encodeNames = ["en", "encode", "En", "Encode", "e", "E"]
def showHelp():
help.deiconify()
def closeHelp():
help.withdraw()
def error():
messagebox.showerror("Error", "Invalid Input")
def keyMissingError():
messagebox.showerror("Error", "No Key")
def translate():
global currentTranslation
task = lol.get()
message = T.get()
version = mode.get()
getKey = RandomEncode.get()
global key
T.delete(0, tkinter.END)
if task in encodeNames:
if version == "Regular":
try:
totranslate= encode(message)
except BaseException:
error()
elif version == "Reversed":
try:
totranslate = encodeReversed(message)
except BaseException:
error()
elif version == "Random":
try:
if getKey == "":
randomDict, key2 = randomGen()
key.set(key2)
key2 = ''
totranslate = encodeRandom(message, randomDict)
getKey = ""
randomDict = {}
else:
totranslate = encodeRandom(message, generatefromkey(getKey))
getKey = ""
except BaseException:
error()
elif task in decodeNames:
if version == "Regular":
try:
totranslate = decode(message)
except BaseException:
error()
elif version == "Reversed":
try:
totranslate = decodeReversed(message)
except BaseException:
error()
elif version == "Random":
if getKey == "":
keyMissingError()
else:
totranslate = decodeRandom(message, generatefromkey(getKey))
T.insert(tkinter.END, totranslate)
link.config(state=tkinter.NORMAL)
keyDisplay.config(state=tkinter.NORMAL)
link.delete(1.0, tkinter.END)
keyDisplay.delete(1.0, tkinter.END)
mode2 = mode.get()
link.insert(1.0, "Last Translation: (" + mode2 + " Mode) " + message + " <-> " + totranslate)
currentTranslation = "(" + mode2 + " Mode) " + message + " <-> " + totranslate
keyDisplay.insert(1.0, str(key.get()))
key.set("")
link.config(state=tkinter.DISABLED)
keyDisplay.config(state=tkinter.DISABLED)
def updateSaves():
saves.config(state=tkinter.NORMAL)
saves.delete(1.0, tkinter.END)
with open("savedTranslations.txt", "r") as file:
textIn = file.read()
saves.insert(1.0, textIn)
saves.config(state=tkinter.DISABLED)
# insert text
def saveTranslation():
global currentTranslation
with open("savedTranslations.txt", 'a') as file:
file.write("\n\n")
file.write(currentTranslation)
updateSaves()
# save file
def clearSaves():
with open("savedTranslations.txt", "w") as file:
file.write("")
updateSaves()
def openTrans():
translations.deiconify()
def closeTrans():
translations.withdraw()
def copyKey():
root.clipboard_clear()
root.clipboard_append(keyDisplay.get(1.0, tkinter.END))
def remember():
if remTrue.get() == 0:
with open("savedKey.txt", "w") as file:
file.write("")
else:
with open("savedKey.txt", "w") as file:
file.write(RandomEncode.get())
def initialize():
saveTranslation()
with open("savedKey.txt", "r") as file:
key3 = file.read()
if key3 == "":
pass
else:
RandomEncode.insert(1, key3)
Encode = tkinter.Radiobutton(root, text='Encode', variable=lol, value="Encode", indicatoron=0, width=42, selectcolor="light green").grid(row=1, column=0, sticky=tkinter.W)
Decode = tkinter.Radiobutton(root, text='Decode', variable=lol, value="Decode", indicatoron=0, width=42, selectcolor="light green").grid(row=1, column=1, sticky=tkinter.E)
Regular = tkinter.Radiobutton(root, text='Regular', variable=mode, value="Regular", indicatoron=0, width=42, selectcolor="cyan").grid(row=4, column=0, sticky=tkinter.W)
Reversed = tkinter.Radiobutton(root, text='Reversed', variable=mode, value="Reversed", indicatoron=0, width=42, selectcolor="cyan").grid(row=4, column=1, sticky=tkinter.E)
Random = tkinter.Radiobutton(root, text="Use Random Dictionary", variable=mode, value="Random", indicatoron=0, width=42, selectcolor="gold").grid(row=5, column=0, sticky=tkinter.N)
RandomEncode = tkinter.Entry(root, width=48)
RandomEncode.grid(row=5, column=1)
J = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="--------------------------------------------------------Mode--------------------------------------------------------")
J.set(100)
J.grid(row=0, sticky=tkinter.N, columnspan=2)
R = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="--------------------------------------------Key Sets--------------------------------------------")
R.set(100)
R.grid(row=3, sticky=tkinter.N, columnspan=2)
T = tkinter.Entry(root, width=100)
L = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="------------------------------------------Input Below------------------------------------------")
L.set(100)
L.grid(row=6, sticky=tkinter.N, columnspan=2)
T.grid(row=7, column=0, sticky=tkinter.W, columnspan=2)
X = tkinter.Scale(root, state=tkinter.DISABLED, length=500, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200)
X.set(100)
X.grid(row=8, sticky=tkinter.N, columnspan=2)
Translate = tkinter.Button(root, text="Translate", command=translate, width=42, activebackground="light green").grid(row=9, column=0, sticky=tkinter.W)
Quit = tkinter.Button(root, text="Quit", command=root.quit, width=85, activebackground="red").grid(row=10, columnspan=2, sticky=tkinter.E)
T.insert(tkinter.END, "")
Help = tkinter.Button(root, text="Help", command=showHelp, width=42, activebackground="blue").grid(row=9, column=1)
X = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200)
X.set(100)
X.grid(row=11, sticky=tkinter.N, columnspan=2)
link = scrolledtext.ScrolledText(root, width=65, height=5, wrap="word", font="consolas", state=tkinter.DISABLED)
link.grid(row=12, sticky=tkinter.W, columnspan=2)
uti = tkinter.Scale(root, state=tkinter.DISABLED, length=600, troughcolor="black", width=1, orient=tkinter.HORIZONTAL, showvalue=0, sliderlength=200, label="-------------------------------------------------------Utilities-------------------------------------------------------")
uti.set(100)
uti.grid(row=13, sticky=tkinter.N, columnspan=2)
save = tkinter.Button(root, text="Save this translation", command=saveTranslation, width=42, activebackground="light green").grid(row=14, column=0, sticky=tkinter.N)
copy = tkinter.Button(root, text="Copy this key", command=copyKey, width=42, activebackground="light green").grid(row=14, column=1, sticky=tkinter.N)
openTranslations = tkinter.Button(root, text="Open Saved Translations", command=openTrans, width=42, activebackgroun="light green").grid(row=15, column=0, sticky=tkinter.N)
rememberKey = tkinter.Checkbutton(root, text="Remember my key", var=remTrue, command=remember).grid(row=15, column=1, sticky=tkinter.N)
keyDisplay = scrolledtext.ScrolledText(root, height=0.5, width=73, wrap="word", state=tkinter.DISABLED)
keyDisplay.grid(row=16, sticky=tkinter.W, columnspan=2)
info = tkinter.Label(root, text="Created by Khang Nguyen and Luzgog. Github link: https://github.com/PG-Development/Fibonacci-Encoder")
info.grid(row=17, sticky=tkinter.E, columnspan=2)
# help window below
title = tkinter.Label(help, text="Help Menu").grid(row=0, sticky=tkinter.N)
helpText = scrolledtext.ScrolledText(help, width=65, height=20, wrap="word")
helpText.grid(row=1, sticky=tkinter.N)
helpText.insert(1.0, "Thanks for downloading this encoder! My team and I have worked hard on it. \n \n"
"Important: When you want to close this, do NOT press the x at the top right. Press the exit help menu button at the bottom.\n \n"
"When you download this repository, you should have gotten 2 .py files: the Fibonacci_Encoder, and the Fibonacci GUI Version file. "
"Make sure they are in the same folder. The GUI Version is the much more convenient version of this program, but it uses the same functions. "
"When you open up the GUI file, you should see a small window pop up on your screen. This is the main application window, built with tkinter. "
"You select your mode at the top, choosing from either Encode, Decode, or Decode from Random. To encode from a random, select Encode from random in the keysets. "
"Below those, you should see keysets. You have regular, reversed, and encode from random. Next to both of the random choices you see inputs. "
"Below the modes and keysets you see an input, to place your text inside, and this is where the message will come out. \n \n"
"Regular Mode\n"
"Regular mode is the base mode of this encoder. It uses a set dictionary of keys and items, to encode your message. "
"To use this mode, simply choose the \"Encode\" mode and the \"Regular\" Keyset. Once you press translate, your message will be replaced "
"by the encoded version. Below your output, there is a last translation box which shows you the original. "
"To decode, just switch your mode to decode, and input the code you received from your friend into the box. "
"It should change before your very eyes into comprehensible text.\n\n"
"Reversed Mode\n"
"Reversed mode is a separate, different keyset then the regular mode. It takes the code for a letter, and switches it around to the opposite letter. "
"For example, the code for a is now the code for z, and the code for b is now the code for y. This means the code for z is now the code for a, and so on. "
"To use this keyset, choose which mode you want, and then instead of selecting the \"Regular\" keyset, choose the \"Reversed\" keyset.\n\n"
"Random Mode\n"
"Random Mode scrambles the codes for the letters to random locations. The total number of possible dictionaries is 403,291,461,126,605,635,584,000,000, aka 403 septillion. "
"That's a lot of possible combinations! And every time you use it, it generates a random choice. Now, that's cool, but say you want to retrieve an already generated "
"dictionary. That's easy! You see, whenever you generate a new dictionary, a key will appear in the lower text box. Just press the \"Copy this key\" buttton "
"to copy the key.\n\n"
"To encode using this mode, you first select the \"Encode\" Button. Then select the \"Use Random Dictionary\" choice. If you already have a key, put it in the "
"entry box next to the button. If you do not have a key, simply leave the box blank. Then press translate. You should get a result and a key. If you want to now encode "
"more messages using the same key, just copy the key and put it in the box. When you send messages to someone else, send them the key privately, so then you can"
" send them the message in public and other people will get gibberish.\n\n"
"To decode using this mode, you must have a key, or else you will get an error. Put the key in the box next to \"Use Random Dictionary\". Then select \"Decode\" "
"and \"Use Random Dictionary\". Finally, put in the message in the lower entry box. When you press Translate, you should get a good message.\n\n"
"Utilities\n"
"There are 3 utilities buttons: the \"Save this translation\" button, the \"Copy this key\" button, and the \"Open Saved Translations\" button. "
"These are here to help you use the app more efficiently.\n\n"
"The \"Save This Translations\" button takes the translation you just did and puts it into another text window that you can open. This text resets everytime you "
"close the app, so keep the app open go save your translations. This feature will be improved in the future to save the translation to a text file. To open this text "
"window, just press the \"Open Saved Translations\" button. The \"Copy This Key\" button just copies the key if you have one.\n\n"
"A new feature is the \"Remember my key\" feature, which can save your key for another time. Whenever you want to save your key, just check it. To update your "
"key, you must uncheck it and then recheck it to make changes to the .txt file. If you want to use this feature, you must download the savedKey.txt file. To "
"reset your key, you can just uncheck it again.")
helpText.tag_add("important", "3.0", "3.9")
helpText.tag_add("regularTag", "7.0", "7.12", "10.0", "10.13", "13.0", "13.12", "20.0", "20.9")
helpText.tag_config("important", foreground="red", font=("Consolas", 13, "bold", "italic"))
helpText.tag_config("regularTag", foreground="blue", font=("Consolas", 12, "bold", "italic"))
helpText.config(state=tkinter.DISABLED)
closeHelpButton = tkinter.Button(help, text="Exit Help Menu", command=closeHelp, activebackground="red").grid(row=2, sticky=tkinter.N)
# saved translations below
# saved translations below
titleTrans = tkinter.Label(translations, text="Saved Translations").grid(row=0, sticky=tkinter.N)
saves = scrolledtext.ScrolledText(translations, width=65, height=20, wrap="word")
saves.grid(row=1, sticky=tkinter.N)
saves.config(state=tkinter.DISABLED)
closeTransButton = tkinter.Button(translations, text="Exit Saved Translations", command=closeTrans, activebackground="red", width=38).grid(row=2, sticky=tkinter.W)
clearSavesButton = tkinter.Button(translations, text="Clear Saves", command=clearSaves, activebackground="red", width=38).grid(row=2, sticky=tkinter.E)
initialize()
tkinter.mainloop() | 57.194946 | 279 | 0.640977 | 2,059 | 15,843 | 4.92812 | 0.211753 | 0.020696 | 0.022075 | 0.013797 | 0.249433 | 0.183601 | 0.14487 | 0.131467 | 0.13127 | 0.093032 | 0 | 0.023367 | 0.222054 | 15,843 | 277 | 280 | 57.194946 | 0.799919 | 0.016727 | 0 | 0.201794 | 0 | 0.107623 | 0.359119 | 0.034199 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058296 | false | 0.004484 | 0.03139 | 0 | 0.089686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb1432cc0316cac36dc4f14023445d368e2c8a51 | 790 | py | Python | Binary Search/081. Search in Rotated Sorted Array II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 138 | 2020-02-08T05:25:26.000Z | 2021-11-04T11:59:28.000Z | Binary Search/081. Search in Rotated Sorted Array II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | null | null | null | Binary Search/081. Search in Rotated Sorted Array II.py | beckswu/Leetcode | 480e8dc276b1f65961166d66efa5497d7ff0bdfd | [
"MIT"
] | 24 | 2021-01-02T07:18:43.000Z | 2022-03-20T08:17:54.000Z | """
81. Search in Rotated Sorted Array II
"""
class Solution:
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: bool
"""
l, r, n = 0, len(nums)-1, len(nums)
while l<=r:
while r>l and nums[l+1]==nums[l]:
l+=1
while r>l and nums[r-1] == nums[r]:
r-=1
mid = (l+r)>>1
if nums[mid] == target: return True
if target > nums[mid]:
if nums[l]>nums[mid] and nums[l]<=target: r = mid-1
else: l = mid+1
else:
if nums[mid]>nums[r] and nums[r]>=target: l = mid+1
else: r = mid-1
return False
| 29.259259 | 68 | 0.405063 | 106 | 790 | 3.018868 | 0.301887 | 0.0875 | 0.075 | 0.0625 | 0.0875 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030374 | 0.458228 | 790 | 26 | 69 | 30.384615 | 0.71729 | 0.113924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb16c061583288ca3f3f83e5792419424b095ace | 2,777 | py | Python | methods/ham.py | vickyyu90/maxnet | 38c49c39dbe77b2984d8cdb2f4087310b2220593 | [
"Apache-2.0"
] | null | null | null | methods/ham.py | vickyyu90/maxnet | 38c49c39dbe77b2984d8cdb2f4087310b2220593 | [
"Apache-2.0"
] | null | null | null | methods/ham.py | vickyyu90/maxnet | 38c49c39dbe77b2984d8cdb2f4087310b2220593 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import torch
from torch.autograd import Variable
import torch.nn.functional as F
class FeatureExtractor():
def __init__(self, model, intermediate_layers):
self.model = model
self.intermediate_layers = intermediate_layers[::-1]
self.weights = []
self.num = len(self.intermediate_layers)
self.activations = []
def save_weight(self, grad):
self.weights.append(grad)
def __call__(self, x, intermediate_layers, class_idx):
hams = []
logit, conv3, conv4, conv5 = self.model(x)
logit = F.softmax(logit)
score = logit[:, class_idx].squeeze()
if torch.cuda.is_available():
score = score.cuda()
self.model.zero_grad()
score.backward(retain_graph=True)
saliency_map = torch.mul(conv3, self.model.grads['conv3']).mean(dim=1, keepdim=True)
norm_saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())
hams.append(F.relu(norm_saliency_map, inplace=True))
saliency_map = torch.mul(conv4, self.model.grads['conv4']).mean(dim=1, keepdim=True)
norm_saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())
hams.append(F.relu(norm_saliency_map, inplace=True))
saliency_map = torch.mul(conv5, self.model.grads['conv5']).mean(dim=1, keepdim=True)
norm_saliency_map = (saliency_map - saliency_map.min()) / (saliency_map.max() - saliency_map.min())
hams.append(F.relu(norm_saliency_map, inplace=True))
return hams
class HAM():
def __init__(self, model, intermediate_layers):
self.model = model
self.intermediate_layers = intermediate_layers
self.extractor = FeatureExtractor(self.model, intermediate_layers)
def __call__(self, input, device, intermediate_layers, class_idx):
hams = self.extractor(input, intermediate_layers, class_idx)
for i in range(len(intermediate_layers)):
if i == 0:
aggregated_ham = hams[i]
else:
tmp = F.interpolate(hams[i], hams[0].shape[-3:], mode='trilinear', align_corners=False)
aggregated_ham = torch.mul(tmp, torch.ge(tmp, aggregated_ham)) + torch.mul(aggregated_ham, torch.ge(aggregated_ham, tmp))
B, L, C, H, W = aggregated_ham.shape
aggregated_ham = aggregated_ham.view(B, -1)
aggregated_ham -= aggregated_ham.min(dim=1, keepdim=True)[0]
aggregated_ham /= aggregated_ham.max(dim=1, keepdim=True)[0]
aggregated_ham = aggregated_ham.view(B, L, C, H, W)
aggregated_ham = F.interpolate(aggregated_ham, input.shape[-3:], mode='trilinear', align_corners=False)
return aggregated_ham | 43.390625 | 137 | 0.658985 | 358 | 2,777 | 4.882682 | 0.24581 | 0.132151 | 0.051487 | 0.075515 | 0.519451 | 0.471968 | 0.451945 | 0.39016 | 0.39016 | 0.342105 | 0 | 0.010096 | 0.21534 | 2,777 | 64 | 138 | 43.390625 | 0.792106 | 0 | 0 | 0.192308 | 0 | 0 | 0.011879 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096154 | false | 0 | 0.076923 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb177b31aa9101d899611eb71a8b7cb4ca24eeba | 1,173 | py | Python | wicket/commands/__init__.py | GeekMasher/wicket-discord-docker-bot | 5f7bfa466718018fba3babe1357115ac19a8ec18 | [
"MIT"
] | null | null | null | wicket/commands/__init__.py | GeekMasher/wicket-discord-docker-bot | 5f7bfa466718018fba3babe1357115ac19a8ec18 | [
"MIT"
] | null | null | null | wicket/commands/__init__.py | GeekMasher/wicket-discord-docker-bot | 5f7bfa466718018fba3babe1357115ac19a8ec18 | [
"MIT"
] | null | null | null | import discord
from wicket.commands.list import botListServices
from wicket.commands.docker_commands import *
async def botHelp(cxt, message: discord.Message, **kargvs):
TEXT = "Help Options:\n"
for name, command in COMMANDS.items():
TEXT += f" - `{cxt.__PREFIX__} {name}` - {command.get('description')}"
if command.get("auth"):
TEXT += " (auth required)"
TEXT += "\n"
await message.channel.send(TEXT)
COMMANDS = {
"help": {"func": botHelp, "auth": False, "description": "Get help with the bot"},
"list": {
"func": botListServices,
"auth": False,
"description": "Get a list of services",
},
"start": {
"func": botStartServices,
"auth": True,
"description": "Start a service",
},
"restart": {
"func": botRestartServices,
"auth": True,
"description": "Restart a service",
},
"stop": {
"func": botStopServices,
"auth": True,
"description": "Stop a service",
},
"update": {
"func": botUpdateServices,
"auth": True,
"description": "Update the service",
},
}
| 24.4375 | 85 | 0.553282 | 113 | 1,173 | 5.699115 | 0.451327 | 0.049689 | 0.118012 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.287298 | 1,173 | 47 | 86 | 24.957447 | 0.770335 | 0 | 0 | 0.102564 | 0 | 0 | 0.295823 | 0.02387 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb186d9ac7e63873279e945b49b97d3ba19eab1c | 534 | py | Python | P0003.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | P0003.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | P0003.py | sebastianaldi17/ProjectEuler | 19562fba3456ec904bcc264fb786a92610e42622 | [
"MIT"
] | null | null | null | # Largest prime factor
# https://projecteuler.net/problem=3
# Sieve of erasthotenes apporach
# But still slow due to iteration to sqrt(n)
from math import sqrt
question = 600851475143
def solve(n):
ans = 1
limit = int(sqrt(n))
prime = [True for i in range(limit+1)]
p = 2
while p <= limit:
if prime[p]:
for i in range(p*2, limit+1, p):
prime[i] = False
p += 1
for i in range(2, limit+1):
if n%i==0 and prime[i]: ans = i
return ans
print(solve(question)) | 25.428571 | 44 | 0.586142 | 87 | 534 | 3.597701 | 0.517241 | 0.038339 | 0.057508 | 0.105431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058511 | 0.29588 | 534 | 21 | 45 | 25.428571 | 0.773936 | 0.241573 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb1bdf6c26b937672babc629910c4f289f158546 | 3,893 | py | Python | test/unit/extractor/test_sticky_relations.py | willangenent/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | 8 | 2016-10-19T14:15:34.000Z | 2020-06-23T09:37:02.000Z | test/unit/extractor/test_sticky_relations.py | freewilll/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | null | null | null | test/unit/extractor/test_sticky_relations.py | freewilll/abridger | 6daa80f7360339376b38544ce60694c5addaa30f | [
"MIT"
] | null | null | null | import pytest
from abridger.extraction_model import Relation
from abridger.schema import SqliteSchema
from test.unit.extractor.base import TestExtractorBase
class TestExtractorStickyRelations(TestExtractorBase):
@pytest.fixture()
def schema_out(self):
'''
test1 -> sticky -> test3 <- test2
-> non_sticky -> test3 <- test2
'''
for stmt in [
'''
CREATE TABLE non_sticky (
id INTEGER PRIMARY KEY,
test3_id INTEGER REFERENCES test3
);
''', '''
CREATE TABLE sticky (
id INTEGER PRIMARY KEY,
test3_id INTEGER REFERENCES test3
);
''', '''
CREATE TABLE test1 (
id INTEGER PRIMARY KEY,
sticky INTEGER REFERENCES sticky,
non_sticky INTEGER REFERENCES non_sticky
);
''', '''
CREATE TABLE test2 (
id INTEGER PRIMARY KEY,
test3_id INTEGER REFERENCES test3
);
''', '''
CREATE TABLE test3 (
id INTEGER PRIMARY KEY
);
''',
]:
self.database.execute(stmt)
return SqliteSchema.create_from_conn(self.database.connection)
@pytest.fixture()
def data_out(self, schema_out):
non_sticky = schema_out.tables[0]
sticky = schema_out.tables[1]
table1 = schema_out.tables[2]
table2 = schema_out.tables[3]
table3 = schema_out.tables[4]
rows = [
(table3, (1,)),
(table3, (2,)),
(table2, (1, 1)),
(table2, (2, 2)),
(sticky, (1, 1)),
(non_sticky, (1, 2)),
(table1, (1, 1, None)),
(table1, (2, None, 1)),
]
self.database.insert_rows(rows)
self.data_everything_except_table2 = (
rows[0:2] + # table 3
rows[4:6] + # sticky and non_sticky
rows[6:8] # table 1
)
self.data_everything_except_table2_non_sticky_row = (
rows[0:2] + # table 3
rows[4:6] + # sticky and non_sticky
rows[6:8] + # table 1
rows[2:3] # table 2, row 1
)
return rows
def test1(self, schema_out, data_out):
# Check fetch without any relations, which won't grab any rows in
# table 2
table = {'table': 'test1'}
self.check_one_subject(schema_out, [table],
self.data_everything_except_table2)
def test2(self, schema_out, data_out):
# Check fetch without sticky relations, which grabs everything
table = {'table': 'test1'}
relation = {'table': 'test2', 'column': 'test3_id'}
self.check_one_subject(schema_out, [table], data_out,
relations=[relation])
def test3(self, schema_out, data_out):
# Check fetch without sticky relations, but flag test2 as sticky
# this should not fetch anything in test2 since there is no sticky
# trail
table = {'table': 'test1'}
def outgoing_sticky_rel(table, col):
return {'table': table, 'column': col, 'sticky': True,
'type': Relation.TYPE_OUTGOING}
relations = [
outgoing_sticky_rel('test1', 'sticky'),
outgoing_sticky_rel('sticky', 'test3_id'),
outgoing_sticky_rel('test2', 'test3_id'),
{'table': 'test2', 'column': 'test3_id', 'sticky': True},
]
self.check_one_subject(
schema_out, [table],
self.data_everything_except_table2_non_sticky_row,
relations=relations)
| 33.273504 | 74 | 0.512972 | 397 | 3,893 | 4.853904 | 0.224181 | 0.060716 | 0.041515 | 0.049299 | 0.371043 | 0.331604 | 0.331604 | 0.314478 | 0.267255 | 0.267255 | 0 | 0.034714 | 0.385821 | 3,893 | 116 | 75 | 33.560345 | 0.771225 | 0.113537 | 0 | 0.244186 | 0 | 0 | 0.253958 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.046512 | 0.011628 | 0.162791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb1d0fc3b94fe211964f09e6d84f5d3d39abde23 | 6,375 | py | Python | edl/gtdb.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-05-14T09:36:36.000Z | 2022-03-30T14:32:21.000Z | edl/gtdb.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 1 | 2015-07-14T11:47:25.000Z | 2015-07-17T01:45:26.000Z | edl/gtdb.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-07-25T22:29:29.000Z | 2022-03-01T21:26:14.000Z | """
Tools for parsing GTDB headers into a taxonomy model
"""
import os
import re
import sys
import logging
from edl.taxon import TaxNode, Taxonomy
from edl.silva import writeDumpFiles
from edl.util import treeGenerator
logger = logging.getLogger(__name__)
GTDB = 'gtdb'
GTDBTAB = 'gtdb_table'
PHYLODB = 'phylodb'
def generate_taxdump(fasta=None, table=None, dump=".", **kwargs):
""" convert a GTDB faa file to ncbi style taxdumps """
if fasta is not None:
tax_file = fasta
fmt = 'fasta'
elif table is not None:
tax_file = table
fmt = 'table'
else:
raise Exception("Please supply 'fasta' or 'table' file")
tax_args = {k: v for k, v in kwargs.items() if k in ['style']}
taxonomy = parse_lineages(tax_file, fmt, **tax_args)
dump_args = {k: v for k, v in kwargs.items() if k in ['map_file_name']}
dump_taxonomy(taxonomy, dump, **dump_args)
def generate_gtdb_lineages_from_table(tax_file):
""" return acc,lineage tuple from file """
with open(tax_file) as table_h:
# skip header
try:
next(table_h)
except StopIteration:
raise Exception("Table is empty!\n" + tax_file)
for line in table_h:
org, _species, lineage = \
[x.strip()
for x in line.split('\t', 4)[:3]]
yield (org, lineage)
def generate_gtdb_lineages(fasta_file):
""" return acc,lineage tuple from file """
with open(fasta_file) as fasta_h:
for line in fasta_h:
if line.startswith(">"):
# in GTDB headers, lineage is second chunk
acc, lineage = line[1:].split(None, 2)[:2]
yield (acc, lineage)
def generate_phylodb_lineages(fasta_file):
""" return acc,lineage tuple from file """
with open(fasta_file) as fasta_h:
for line in fasta_h:
if line.startswith(">"):
# in GTDB headers, lineage is second chunk
acc, lineage = line[1:].split("\t", 2)[:2]
yield (acc, lineage)
def parse_lineages(tax_file, fmt='fasta', style=GTDB):
""" returns taxonomy object """
id_map = {}
root = TaxNode('root', None, None)
tree = {'root': root}
logger.debug("Parsing %s", tax_file)
if style == GTDB:
add_lineage_to_tree = add_gtdb_lineage_to_tree
generate_lineages = generate_gtdb_lineages
else:
add_lineage_to_tree = add_phylodb_lineage_to_tree
generate_lineages = generate_phylodb_lineages
# generate taxonomy tree
for acc, lineage in generate_lineages(tax_file):
# create TaxNode
node = add_lineage_to_tree(lineage, tree)
id_map[acc] = node
logger.debug("Adding id numbers to %d nodes", len(tree))
# assign numeric IDs
i = 0
for node in treeGenerator(root):
i += 1
node.id = i
logger.debug("Added %d id numbers", i)
return Taxonomy(id_map, None, None, tax_file, root)
RANK_LIST = ['domain', 'phylum', 'class',
'order', 'family', 'genus', 'species']
def add_phylodb_lineage_to_tree(lineage, tree):
""" parse given lineage
create new TaxNode objects as needed
assumes that there are 7 elements in lineage, one for each rank
return leaf node """
last_node = tree['root']
sub_lineage = []
if lineage.startswith('Euk'):
# There is an extra entr in the PhyloDB Euk lineages
ranks = [RANK_LIST[0], None] + RANK_LIST[1:]
else:
ranks = RANK_LIST
for rank, taxon_string in zip(ranks, lineage.split(';')):
sub_lineage.append(taxon_string)
taxon = ';'.join(sub_lineage)
try:
last_node = tree[taxon]
except KeyError:
new_node = TaxNode(taxon, last_node.id, rank)
new_node.name = taxon_string
new_node.setParent(last_node)
tree[taxon] = new_node
last_node = new_node
return last_node
RANK_DICT = {'d': 'domain', 'p': 'phylum', 'c': 'class',
'o': 'order', 'f': 'family', 'g': 'genus', 's': 'species'}
def add_gtdb_lineage_to_tree(lineage, tree):
""" parse given lineage
create new TaxNode objects as needed
assumes lineage names atart with x__ where x is a rank abbreviation
return leaf node """
last_node = tree['root']
sub_lineage = []
for taxon_string in lineage.split(';'):
rank_char, taxon_name = taxon_string.split('__')
rank_char = re.sub(r'^_', '', rank_char)
sub_lineage.append(taxon_string)
taxon = ';'.join(sub_lineage)
try:
last_node = tree[taxon]
except KeyError:
try:
rank = RANK_DICT[rank_char]
except KeyError:
print(lineage)
print(rank_char)
exit(-1)
new_node = TaxNode(taxon, last_node.id, rank)
new_node.name = taxon_name
new_node.setParent(last_node)
tree[taxon] = new_node
last_node = new_node
return last_node
def dump_taxonomy(taxonomy, dump_path, map_file_name='gtdb.acc.to.taxid'):
""" generate nodes.dmp and names.dmp """
# Write dump files
if not os.path.exists(dump_path):
os.makedirs(dump_path)
with open(os.path.sep.join((dump_path, 'nodes.dmp')), 'w') as nodes_h:
with open(os.path.sep.join((dump_path, 'names.dmp')), 'w') as names_h:
writeDumpFiles(taxonomy.root, nodes_h, names_h)
# Write hit->tax mapping file
if map_file_name is None:
return
with open(os.path.sep.join((dump_path, map_file_name)),
'w') as acc_map_h:
for (hitid, tax_node) in taxonomy.idMap.items():
acc_map_h.write("%s\t%d\n" % (hitid, tax_node.id))
if __name__ == '__main__':
""" convert a GTDB faa file to ncbi style taxdumps
kw arguments to generate_taxdump passed as args like:
python edl/grdb.py fasta=/path/to/x.faa dump=/path/to/dump
for reference:
generate_taxdump(fasta=None, table=None, dump="."):
"""
kwargs = dict(w.split("=", 1) for w in sys.argv[1:])
logging.basicConfig(level=logging.DEBUG)
logger.debug("args are: %r from:\n%s", kwargs, sys.argv)
# do the work:
generate_taxdump(**kwargs)
| 30.797101 | 78 | 0.604863 | 861 | 6,375 | 4.285714 | 0.221835 | 0.026016 | 0.024661 | 0.018428 | 0.434688 | 0.38103 | 0.350136 | 0.350136 | 0.303252 | 0.249865 | 0 | 0.003491 | 0.281098 | 6,375 | 206 | 79 | 30.946602 | 0.801658 | 0.127059 | 0 | 0.290076 | 0 | 0 | 0.070352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061069 | false | 0 | 0.053435 | 0 | 0.145038 | 0.015267 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb20d40fa7052cc2042b31919fc23245949ffc78 | 1,107 | py | Python | MeetmeApp/urls.py | Susan-Kathoni/Meet-Me | 530d14e98a1cb8d14af77397515fe32e31018041 | [
"MIT"
] | null | null | null | MeetmeApp/urls.py | Susan-Kathoni/Meet-Me | 530d14e98a1cb8d14af77397515fe32e31018041 | [
"MIT"
] | null | null | null | MeetmeApp/urls.py | Susan-Kathoni/Meet-Me | 530d14e98a1cb8d14af77397515fe32e31018041 | [
"MIT"
] | null | null | null | from django.urls import path
from django.conf.urls import url, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
from .views import *
urlpatterns=[
path('', views.home, name="home-page"),
path("home/", views.datePage, name="date-page"),
#User URL Routes
# path("register/", views.register, name="register"),
path("profile/", views.profile, name="profile-page"),
path("user/<int:pk>/", views.UserDetail.as_view(), name="user-detail"),
path("block/<int:pk>/", views.blockUser, name="block"),
#Posts URL Routes
path("like/<int:pk>/", views.likePost, name="like"),
path("new-message/<str:username>",views.WriteMessage.as_view(), name="write-message"),
path("view-messages/", views.ViewMessages, name="view-messages"),
path("mark-read/<int:pk>", views.MarkAsRead, name="mark-read"),
path("delete/<int:pk>", views.DeleteMessage.as_view(), name="delete-message")
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT) | 39.535714 | 90 | 0.666667 | 144 | 1,107 | 5.083333 | 0.347222 | 0.034153 | 0.068306 | 0.04918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154472 | 1,107 | 28 | 91 | 39.535714 | 0.782051 | 0.074977 | 0 | 0 | 0 | 0 | 0.223092 | 0.02544 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb218330c462c5abdbf828eb5bc82a026234a3e6 | 14,872 | py | Python | gui/popups.py | Kameone/katrain | 6d2f210a1997bf2f8108ca1c0678df6788867022 | [
"MIT"
] | null | null | null | gui/popups.py | Kameone/katrain | 6d2f210a1997bf2f8108ca1c0678df6788867022 | [
"MIT"
] | null | null | null | gui/popups.py | Kameone/katrain | 6d2f210a1997bf2f8108ca1c0678df6788867022 | [
"MIT"
] | null | null | null | from collections import defaultdict
from typing import Dict, List, DefaultDict, Tuple
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from core.common import OUTPUT_DEBUG, OUTPUT_ERROR
from core.engine import KataGoEngine
from core.game import Game, GameNode
from gui.kivyutils import (
BackgroundLabel,
LabelledCheckBox,
LabelledFloatInput,
LabelledIntInput,
LabelledObjectInputArea,
LabelledSpinner,
LabelledTextInput,
LightHelpLabel,
ScaledLightLabel,
StyledButton,
StyledSpinner,
)
class InputParseError(Exception):
pass
class QuickConfigGui(BoxLayout):
def __init__(self, katrain, popup: Popup, initial_values: Dict = None, **kwargs):
super().__init__(**kwargs)
self.katrain = katrain
self.popup = popup
self.orientation = "vertical"
if initial_values:
self.set_properties(self, initial_values)
@staticmethod
def type_to_widget_class(value):
if isinstance(value, float):
return LabelledFloatInput
elif isinstance(value, bool):
return LabelledCheckBox
elif isinstance(value, int):
return LabelledIntInput
if isinstance(value, dict):
return LabelledObjectInputArea
else:
return LabelledTextInput
def collect_properties(self, widget):
if isinstance(widget, (LabelledTextInput, LabelledSpinner, LabelledCheckBox)):
try:
ret = {widget.input_property: widget.input_value}
except Exception as e:
raise InputParseError(f"Could not parse value for {widget.input_property} ({widget.__class__}): {e}")
else:
ret = {}
for c in widget.children:
for k, v in self.collect_properties(c).items():
ret[k] = v
return ret
def set_properties(self, widget, properties):
if isinstance(widget, (LabelledTextInput, LabelledSpinner)):
key = widget.input_property
if key in properties:
widget.text = str(properties[key])
for c in widget.children:
self.set_properties(c, properties)
class LoadSGFPopup(BoxLayout):
pass
class NewGamePopup(QuickConfigGui):
def __init__(self, katrain, popup: Popup, properties: Dict, **kwargs):
properties["RU"] = KataGoEngine.get_rules(katrain.game.root)
super().__init__(katrain, popup, properties, **kwargs)
self.rules_spinner.values = list(set(self.katrain.engine.RULESETS.values()))
self.rules_spinner.text = properties["RU"]
def new_game(self):
properties = self.collect_properties(self)
self.katrain.log(f"New game settings: {properties}", OUTPUT_DEBUG)
new_root = GameNode(properties={**Game.DEFAULT_PROPERTIES, **properties})
x, y = new_root.board_size
if x > 52 or y > 52:
self.info.text = "Board size too big, should be at most 52"
return
if self.restart.active:
self.katrain.log("Restarting Engine")
self.katrain.engine.restart()
self.katrain("new-game", new_root)
self.popup.dismiss()
class ConfigPopup(QuickConfigGui):
def __init__(self, katrain, popup: Popup, config: Dict, ignore_cats: Tuple = (), **kwargs):
self.config = config
self.ignore_cats = ignore_cats
self.orientation = "vertical"
super().__init__(katrain, popup, **kwargs)
Clock.schedule_once(self.build, 0)
def build(self, _):
props_in_col = [0, 0]
cols = [BoxLayout(orientation="vertical"), BoxLayout(orientation="vertical")]
for k1, all_d in sorted(self.config.items(), key=lambda tup: -len(tup[1])): # sort to make greedy bin packing work better
if k1 in self.ignore_cats:
continue
d = {k: v for k, v in all_d.items() if isinstance(v, (int, float, str, bool)) and not k.startswith("_")} # no lists . dict could be supported but hard to scale
cat = GridLayout(cols=2, rows=len(d) + 1, size_hint=(1, len(d) + 1))
cat.add_widget(Label(text=""))
cat.add_widget(ScaledLightLabel(text=f"{k1} settings", bold=True))
for k2, v in d.items():
cat.add_widget(ScaledLightLabel(text=f"{k2}:"))
cat.add_widget(self.type_to_widget_class(v)(text=str(v), input_property=f"{k1}/{k2}"))
if props_in_col[0] <= props_in_col[1]:
cols[0].add_widget(cat)
props_in_col[0] += len(d)
else:
cols[1].add_widget(cat)
props_in_col[1] += len(d)
col_container = BoxLayout(size_hint=(1, 0.9))
col_container.add_widget(cols[0])
col_container.add_widget(cols[1])
self.add_widget(col_container)
self.info_label = Label(halign="center")
self.apply_button = StyledButton(text="Apply", on_press=lambda _: self.update_config())
self.save_button = StyledButton(text="Apply and Save", on_press=lambda _: self.update_config(save_to_file=True))
btn_container = BoxLayout(orientation="horizontal", size_hint=(1, 0.1), spacing=1, padding=1)
btn_container.add_widget(self.apply_button)
btn_container.add_widget(self.info_label)
btn_container.add_widget(self.save_button)
self.add_widget(btn_container)
def update_config(self, save_to_file=False):
updated_cat = defaultdict(list) # type: DefaultDict[List[str]]
try:
for k, v in self.collect_properties(self).items():
k1, k2 = k.split("/")
if self.config[k1][k2] != v:
self.katrain.log(f"Updating setting {k} = {v}", OUTPUT_DEBUG)
updated_cat[k1].append(k2)
self.config[k1][k2] = v
self.popup.dismiss()
except InputParseError as e:
self.info_label.text = str(e)
self.katrain.log(e, OUTPUT_ERROR)
return
if save_to_file:
self.katrain.save_config()
engine_updates = updated_cat["engine"]
if "visits" in engine_updates:
self.katrain.engine.visits = engine_updates["visits"]
if {key for key in engine_updates if key not in {"max_visits", "max_time", "enable_ownership", "wide_root_noise"}}:
self.katrain.log(f"Restarting Engine after {engine_updates} settings change")
self.info_label.text = "Restarting engine\nplease wait."
self.katrain.controls.set_status(f"Restarted Engine after {engine_updates} settings change.")
def restart_engine(_dt):
old_engine = self.katrain.engine # type: KataGoEngine
old_proc = old_engine.katago_process
if old_proc:
old_engine.shutdown(finish=True)
new_engine = KataGoEngine(self.katrain, self.config["engine"])
self.katrain.engine = new_engine
self.katrain.game.engines = {"B": new_engine, "W": new_engine}
self.katrain.game.analyze_all_nodes() # old engine was possibly broken, so make sure we redo any failures
self.katrain.update_state()
Clock.schedule_once(restart_engine, 0)
self.katrain.debug_level = self.config["debug"]["level"]
self.katrain.update_state(redraw_board=True)
class ConfigAIPopup(QuickConfigGui):
def __init__(self, katrain, popup: Popup, settings):
super().__init__(katrain, popup, settings)
self.settings = settings
Clock.schedule_once(self.build, 0)
def build(self, _):
ais = list(self.settings.keys())
top_bl = BoxLayout()
top_bl.add_widget(ScaledLightLabel(text="Select AI to configure:"))
ai_spinner = StyledSpinner(values=ais, text=ais[0])
ai_spinner.fbind("text", lambda _, text: self.build_ai_options(text))
top_bl.add_widget(ai_spinner)
self.add_widget(top_bl)
self.options_grid = GridLayout(cols=2, rows=max(len(v) for v in self.settings.values()) - 1, size_hint=(1, 7.5), spacing=1) # -1 for help in 1 col
bottom_bl = BoxLayout(spacing=2)
self.info_label = Label()
bottom_bl.add_widget(StyledButton(text=f"Apply", on_press=lambda _: self.update_config(False)))
bottom_bl.add_widget(self.info_label)
bottom_bl.add_widget(StyledButton(text=f"Apply and Save", on_press=lambda _: self.update_config(True)))
self.add_widget(self.options_grid)
self.add_widget(bottom_bl)
self.build_ai_options(ais[0])
def build_ai_options(self, mode):
mode_settings = self.settings[mode]
self.options_grid.clear_widgets()
self.options_grid.add_widget(LightHelpLabel(size_hint=(1, 4), padding=(2, 2), text=mode_settings.get("_help_left", "")))
self.options_grid.add_widget(LightHelpLabel(size_hint=(1, 4), padding=(2, 2), text=mode_settings.get("_help_right", "")))
for k, v in mode_settings.items():
if not k.startswith("_"):
self.options_grid.add_widget(ScaledLightLabel(text=f"{k}"))
self.options_grid.add_widget(ConfigPopup.type_to_widget_class(v)(text=str(v), input_property=f"{mode}/{k}"))
for _ in range(self.options_grid.rows * self.options_grid.cols - len(self.options_grid.children)):
self.options_grid.add_widget(ScaledLightLabel(text=f""))
self.set_properties(self, self.settings)
def update_config(self, save_to_file=False):
try:
for k, v in self.collect_properties(self).items():
k1, k2 = k.split("/")
if self.settings[k1][k2] != v:
self.settings[k1][k2] = v
self.katrain.log(f"Updating setting {k} = {v}", OUTPUT_DEBUG)
if save_to_file:
self.katrain.save_config()
self.popup.dismiss()
except InputParseError as e:
self.info_label.text = str(e)
self.katrain.log(e, OUTPUT_ERROR)
return
self.popup.dismiss()
class ConfigTeacherPopup(QuickConfigGui):
def __init__(self, katrain, popup, **kwargs):
self.settings = katrain.config("trainer")
self.sgf_settings = katrain.config("sgf")
self.ui_settings = katrain.config("board_ui")
super().__init__(katrain, popup, self.settings, **kwargs)
Clock.schedule_once(self.build, 0)
self.spacing = 2
def build(self, _dt):
thresholds = self.settings["eval_thresholds"]
undos = self.settings["num_undo_prompts"]
colors = self.ui_settings["eval_colors"]
thrbox = GridLayout(spacing=1, padding=2, cols=5, rows=len(thresholds) + 1)
thrbox.add_widget(ScaledLightLabel(text="Point loss greater than", bold=True))
thrbox.add_widget(ScaledLightLabel(text="Gives this many undos", bold=True))
thrbox.add_widget(ScaledLightLabel(text="Color (fixed)", bold=True))
thrbox.add_widget(ScaledLightLabel(text="Show dots", bold=True))
thrbox.add_widget(ScaledLightLabel(text="Save in SGF", bold=True))
for i, (thr, undos, color) in enumerate(zip(thresholds, undos, colors)):
thrbox.add_widget(LabelledFloatInput(text=str(thr), input_property=f"eval_thresholds::{i}"))
thrbox.add_widget(LabelledFloatInput(text=str(undos), input_property=f"num_undo_prompts::{i}"))
thrbox.add_widget(BackgroundLabel(background=color[:3]))
thrbox.add_widget(LabelledCheckBox(text=str(color[3] == 1), input_property=f"alpha::{i}"))
thrbox.add_widget(LabelledCheckBox(size_hint=(0.5, 1), text=str(self.sgf_settings["save_feedback"][i]), input_property=f"save_feedback::{i}"))
self.add_widget(thrbox)
xsettings = BoxLayout(size_hint=(1, 0.15), spacing=2)
xsettings.add_widget(ScaledLightLabel(text="Show last <n> dots"))
xsettings.add_widget(LabelledIntInput(size_hint=(0.5, 1), text=str(self.settings["eval_off_show_last"]), input_property="eval_off_show_last"))
self.add_widget(xsettings)
xsettings = BoxLayout(size_hint=(1, 0.15), spacing=2)
xsettings.add_widget(ScaledLightLabel(text="Show dots/SGF comments for AI players"))
xsettings.add_widget(LabelledCheckBox(size_hint=(0.5, 1), text=str(self.settings["eval_show_ai"]), input_property="eval_show_ai"))
self.add_widget(xsettings)
xsettings = BoxLayout(size_hint=(1, 0.15), spacing=2)
xsettings.add_widget(ScaledLightLabel(text="Disable analysis while in teach mode"))
xsettings.add_widget(LabelledCheckBox(size_hint=(0.5, 1), text=str(self.settings["lock_ai"]), input_property="lock_ai"))
self.add_widget(xsettings)
bl = BoxLayout(size_hint=(1, 0.15), spacing=2)
bl.add_widget(StyledButton(text=f"Apply", on_press=lambda _: self.update_config(False)))
self.info_label = Label()
bl.add_widget(self.info_label)
bl.add_widget(StyledButton(text=f"Apply and Save", on_press=lambda _: self.update_config(True)))
self.add_widget(bl)
def update_config(self, save_to_file=False):
try:
for k, v in self.collect_properties(self).items():
if "::" in k:
k1, i = k.split("::")
i = int(i)
if "alpha" in k1:
v = 1.0 if v else 0.0
if self.ui_settings["eval_colors"][i][3] != v:
self.katrain.log(f"Updating alpha {i} = {v}", OUTPUT_DEBUG)
self.ui_settings["eval_colors"][i][3] = v
elif "save_feedback" in k1:
if self.sgf_settings[k1][i] != v:
self.sgf_settings[k1][i] = v
self.katrain.log(f"Updating setting sgf/{k1}[{i}] = {v}", OUTPUT_DEBUG)
else:
if self.settings[k1][i] != v:
self.settings[k1][i] = v
self.katrain.log(f"Updating setting trainer/{k1}[{i}] = {v}", OUTPUT_DEBUG)
else:
if self.settings[k] != v:
self.settings[k] = v
self.katrain.log(f"Updating setting {k} = {v}", OUTPUT_DEBUG)
if save_to_file:
self.katrain.save_config()
self.popup.dismiss()
except InputParseError as e:
self.info_label.text = str(e)
self.katrain.log(e, OUTPUT_ERROR)
return
self.katrain.update_state()
self.popup.dismiss()
| 46.043344 | 172 | 0.623924 | 1,851 | 14,872 | 4.819557 | 0.155592 | 0.050443 | 0.036431 | 0.04226 | 0.407578 | 0.355453 | 0.303553 | 0.255913 | 0.22901 | 0.205807 | 0 | 0.011589 | 0.257329 | 14,872 | 322 | 173 | 46.186335 | 0.796107 | 0.015533 | 0 | 0.233216 | 0 | 0 | 0.082411 | 0.004442 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060071 | false | 0.007067 | 0.038869 | 0 | 0.159011 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb235d579c9e8335bb32b2de2493cd8f2ac94ba2 | 606 | py | Python | Chessboard.py | choyai/basic-chess | 34cf581544f63b07dba4a87d7f1a3bb19a0d102e | [
"MIT"
] | null | null | null | Chessboard.py | choyai/basic-chess | 34cf581544f63b07dba4a87d7f1a3bb19a0d102e | [
"MIT"
] | null | null | null | Chessboard.py | choyai/basic-chess | 34cf581544f63b07dba4a87d7f1a3bb19a0d102e | [
"MIT"
] | null | null | null | import numpy as np
from enum import Enum
class Square:
def __init__( self, value, x, y ):
self.value = value
self.color = (x + y) % 2
self.x = x
self.y = y
def __repr__( self ):
return str(self.value) + ' ' + str((x, y))
class Chessboard:
def __init__(self):
self.squares = []
self.state = np.zeros((8, 8))
for i in range(8):
for j in range(8):
square = Square(0, i, j)
self.squares.append(square)
def __repr__(self):
return self.state.__repr__() | 22.444444 | 50 | 0.4967 | 79 | 606 | 3.556962 | 0.392405 | 0.096085 | 0.078292 | 0.120996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016086 | 0.384488 | 606 | 27 | 51 | 22.444444 | 0.737265 | 0 | 0 | 0.1 | 0 | 0 | 0.001647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0.1 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb26120df60c58054886a7da4810cc127da3d62a | 3,494 | py | Python | uai/operation/pack/base_pack_op.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 38 | 2017-04-26T04:00:09.000Z | 2022-02-10T02:51:05.000Z | uai/operation/pack/base_pack_op.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 17 | 2017-11-20T20:47:09.000Z | 2022-02-09T23:48:46.000Z | uai/operation/pack/base_pack_op.py | FinchZHU/uai-sdk | 78e06bebba2d18233ce6dcb5be619e940f7a7ef3 | [
"Apache-2.0"
] | 28 | 2017-07-08T05:23:13.000Z | 2020-08-18T03:12:27.000Z | # Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from datetime import datetime
from uai.utils.logger import uai_logger
from ucloud.ufile import putufile
from uai.operation.base_operation import BaseUaiServiceOp
from uai.operation.tar.base_tar_op import UaiServiceTarOp
UFILE_INFO = './ufile_info.log'
CURRENT_PATH = os.getcwd()
class UaiServicePackOp(UaiServiceTarOp):
""" The Base Pack Tool Class with UAI
"""
def __init__(self, parser):
super(UaiServicePackOp, self).__init__(parser)
self.conf_params = {}
self.filelist = []
self.ufile_info = []
self.platform = ''
def _add_ufile_args(self, pack_parser):
ufile_parse = pack_parser.add_argument_group(
'Ufile-Params', 'Ufile Parameters, help to upload file to Ufile automatically'
)
ufile_parse.add_argument(
'--bucket',
type=str,
required=True,
help='the name of ufile bucket')
def _add_args(self):
super(UaiServicePackOp, self)._add_args()
self._add_account_args(self.parser)
self._add_ufile_args(self.parser)
def _parse_args(self, args):
super(UaiServicePackOp, self)._parse_args(args)
self._parse_account_args(args)
if "ai_arch_v" in args:
if self.platform != args["ai_arch_v"].lower().split('-')[0]:
raise RuntimeError("ai_arch_v should be one version of " + self.platform)
self.bucket = args['bucket']
def _upload_to_ufile(self):
public_key = self.public_key
private_key = self.private_key
bucket = self.bucket
uai_logger.debug('Start upload file to the bucket {0}'.format(bucket))
handler = putufile.PutUFile(public_key, private_key)
local_file = os.path.join(CURRENT_PATH, self.pack_file_path, self.upload_name)
local_file = local_file.replace('\\', '/')
key = self.upload_name
uai_logger.info('Upload >> key: {0}, local file: {1}'.format(key, local_file))
ret, resp = handler.putfile(bucket, key, local_file)
uai_logger.debug('Ufile response: {0}'.format(resp))
assert resp.status_code == 200, 'upload seems something error'
uai_logger.debug('upload local file :{0} to ufile key= {1} successful'.
format(local_file, key))
current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.ufile_info.append('Upload Time: {0}\n'.format(current_time))
self.ufile_info.append('Path of Upload File: {0}\n'.format(key))
def _pack(self):
self._upload_to_ufile()
with open(UFILE_INFO, 'w') as f:
f.write(''.join(self.ufile_info))
def cmd_run(self, args):
self._parse_args(args)
super(UaiServicePackOp, self).cmd_run(args)
self._pack()
| 38.822222 | 90 | 0.647682 | 461 | 3,494 | 4.713666 | 0.357918 | 0.033134 | 0.046019 | 0.014726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007366 | 0.222954 | 3,494 | 89 | 91 | 39.258427 | 0.793002 | 0.199485 | 0 | 0 | 0 | 0 | 0.148722 | 0 | 0 | 0 | 0 | 0 | 0.016393 | 1 | 0.114754 | false | 0 | 0.098361 | 0 | 0.229508 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb27a05966e2cdc151b7cb1b1d26243619e72b33 | 778 | py | Python | reader/migrations/0005_managers.py | FroshOU/manga | 60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2 | [
"MIT"
] | 58 | 2019-03-04T09:22:42.000Z | 2022-02-18T09:11:57.000Z | reader/migrations/0005_managers.py | FroshOU/manga | 60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2 | [
"MIT"
] | 21 | 2019-03-07T19:34:53.000Z | 2021-12-19T12:46:40.000Z | reader/migrations/0005_managers.py | FroshOU/manga | 60ec24a007a7e9ebe0c152cf1f2a2aa0362f17f2 | [
"MIT"
] | 14 | 2019-06-06T09:53:13.000Z | 2021-12-17T14:34:13.000Z | from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reader', '0004_aliases'),
]
operations = [
migrations.AddField(
model_name='series',
name='manager',
field=models.ForeignKey(
help_text='The person who manages this series.',
blank=False, null=True, on_delete=models.SET_NULL,
to=settings.AUTH_USER_MODEL, limit_choices_to=models.Q(
('is_superuser', True),
('groups__name', 'Scanlator'),
_connector='OR'
),
),
),
]
| 29.923077 | 71 | 0.552699 | 72 | 778 | 5.75 | 0.694444 | 0.048309 | 0.077295 | 0.101449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007843 | 0.344473 | 778 | 25 | 72 | 31.12 | 0.803922 | 0 | 0 | 0.136364 | 0 | 0 | 0.12982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb2c5307cd59ead7a7f4bf7a140973af81e47bad | 2,099 | py | Python | mapss/static/packages/arches/arches/setup.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | mapss/static/packages/arches/arches/setup.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | mapss/static/packages/arches/arches/setup.py | MPI-MAPSS/MAPSS | 3a5c0109758801717aaa8de1125ca5e98f83d3b4 | [
"CC0-1.0"
] | null | null | null | import sys
import os
import subprocess
import shutil
import urllib.request, urllib.error, urllib.parse
import zipfile
import datetime
import platform
import tarfile
from arches import settings
here = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.dirname(here)
def install():
# CHECK PYTHON VERSION
if not sys.version_info >= (3, 7):
print("ERROR: Arches requires at least Python 3.7")
sys.exit(101)
else:
pass
return True
def unzip_file(file_name, unzip_location):
try:
# first assume you have a .tar.gz file
tar = tarfile.open(file_name, "r:gz")
tar.extractall(path=unzip_location)
tar.close()
except:
# next assume you have a .zip file
with zipfile.ZipFile(file_name, "r") as myzip:
myzip.extractall(unzip_location)
def get_version(version=None):
"Returns a PEP 440-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# major = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta and rc releases
major = get_major_version(version)
sub = ""
if version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "rc"}
sub = mapping[version[3]] + str(version[4])
return str(major + sub)
def get_major_version(version=None):
"Returns major version from VERSION."
version = get_complete_version(version)
parts = 3
major = ".".join(str(x) for x in version[:parts])
return major
def get_complete_version(version=None):
"""Returns a tuple of the django version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from arches import VERSION as version
else:
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
return version
if __name__ == "__main__":
install()
| 25.91358 | 80 | 0.623154 | 280 | 2,099 | 4.557143 | 0.403571 | 0.087774 | 0.04232 | 0.058777 | 0.10815 | 0.067398 | 0.067398 | 0 | 0 | 0 | 0 | 0.010478 | 0.272511 | 2,099 | 80 | 81 | 26.2375 | 0.825147 | 0.218199 | 0 | 0.078431 | 0 | 0 | 0.112064 | 0 | 0 | 0 | 0 | 0 | 0.039216 | 1 | 0.098039 | false | 0.019608 | 0.215686 | 0 | 0.392157 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb2f35d72860c941a4140869fd28c700aa0e181d | 19,950 | py | Python | parser/vo_item.py | gemoku/bolinas | dbdc2417d8ae82394f3f8e5e0758bad86b9cfd5a | [
"MIT"
] | 13 | 2015-12-05T16:30:48.000Z | 2021-04-15T14:38:24.000Z | parser/vo_item.py | karlmoritz/bolinas | 7a1a78bca0a6ba1cb9d80b13d2a87ce37993507f | [
"MIT"
] | 1 | 2015-10-18T03:50:53.000Z | 2015-10-18T03:58:28.000Z | parser/vo_item.py | karlmoritz/bolinas | 7a1a78bca0a6ba1cb9d80b13d2a87ce37993507f | [
"MIT"
] | 5 | 2015-10-18T03:10:07.000Z | 2018-06-29T17:40:43.000Z | from common.hgraph.hgraph import NonterminalLabel
from common import log
import itertools
# Some general advice for reading this file:
#
# Every rule specifies some fragment of the object (graph, string or both) that
# is being parsed, as well as a visit order on the individual elements of that
# fragment (tokens or edges respectively). The number of elements already
# visited is called the "size" of this item, and an item with nothing left to
# visit is "closed". The visit order specifies an implicit binarization of the
# rule in question, by allowing the item to consume only one other object (which
# we call the "outside" of the item) at any given time.
#
# In consuming this object, we either "shift" a terminal element or "complete" a
# nonterminal (actually a closed chart item). Each of these steps produces a new
# chart item.
class Item(object):
pass
class HergItem(Item):
"""
Chart item for a HRG parse.
"""
def __init__(self, rule, size=None, shifted=None, mapping=None, nodeset=None, nodelabels=False):
# by default start empty, with no part of the graph consumed
if size == None:
size = 0
if shifted == None:
shifted = frozenset()
if mapping == None:
mapping = dict()
if nodeset == None:
nodeset = frozenset()
self.rule = rule
self.size = size
self.shifted = shifted
self.mapping = mapping
self.nodeset = nodeset
self.rev_mapping = dict((val, key) for key, val in mapping.items())
self.nodelabels = nodelabels
# Store the nonterminal symbol and index of the previous complete
# on this item so we can rebuild the derivation easily
triples = rule.rhs1.triples(nodelabels = nodelabels)
self.outside_symbol = None
if size < len(triples):
# this item is not closed
self.outside_triple = triples[rule.rhs1_visit_order[size]]
self.outside_edge = self.outside_triple[1]
self.closed = False
self.outside_is_nonterminal = isinstance(self.outside_triple[1],
NonterminalLabel)
if self.outside_is_nonterminal:
# strip the index off of the nonterminal label
#self.outside_symbol = str(self.outside_triple[1])
#self.outside_symbol = self.outside_symbol[1:].split('[')[0]
self.outside_symbol = self.outside_triple[1].label
self.outside_nt_index = self.outside_triple[1].index
else:
# this item is closed
self.outside_triple = None
self.outside_edge = None
self.closed = True
self.outside_is_nonterminal = False
self.__cached_hash = None
def __hash__(self):
# memoize the hash function
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.rule) + 3 * self.size + \
5 * hash(self.shifted)
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, HergItem) and \
other.rule == self.rule and \
other.size == self.size and \
other.shifted == self.shifted and \
other.mapping == self.mapping
def uniq_str(self):
"""
Produces a unique string representation of this item. When representing
charts in other formats (e.g. when writing a tiburon RTG file) we have to
represent this item as a string, which we build from the rule id and list of
nodes.
"""
return 'R%d__%s' % (self.rule.rule_id, self.uniq_cover_str())
def uniq_cover_str(self):
edges = set()
for head, elabel, tail in self.shifted:
if tail:
edges.add('%s:%s' % (head[0], ':'.join([x[0] for x in tail])))
else:
edges.add(head[0])
return ','.join(sorted(list(edges)))
def __repr__(self):
return 'HergItem(%d, %d, %s, %s)' % (self.rule.rule_id, self.size, self.rule.symbol, len(self.shifted))
def __str__(self):
return '[%d, %d/%d, %s, {%s}]' % (self.rule.rule_id,
self.size,
len(self.rule.rhs1.triples()),
self.outside_symbol,
str([x for x in self.shifted]))
def can_shift(self, new_edge):
"""
Determines whether new_edge matches the outside of this item, and can be
shifted.
"""
#print "SHIFT", self, "<---", new_edge
# can't shift into a closed item
if self.closed:
return False
# can't shift an edge that is already inside this item
if new_edge in self.shifted:
return False
olabel = self.outside_triple[1]
nlabel = new_edge[1]
# make sure new_edge mathes the outside label
if olabel != nlabel:
return False
# make sure new_edge preserves a consistent mapping between the nodes of the
# graph and the nodes of the rule
if self.nodelabels:
#print o1
o1, o1_label = self.outside_triple[0]
n1, n1_label = new_edge[0]
if o1_label != n1_label:
return False
else:
o1 = self.outside_triple[0]
n1 = new_edge[0]
if o1 in self.mapping and self.mapping[o1] != n1:
return False
# if this node is not a node of this rule RHS, but of a subgraph it needs to have a mapping.
# otherwise, we can't attach.
if n1 in self.nodeset and n1 not in self.rev_mapping:
return False
if self.nodelabels:
if self.outside_triple[2]:
o2, o2_labels = zip(*self.outside_triple[2])
else: o2, o2_labels = [],[]
if new_edge[2]:
n2, n2_labels = zip(*new_edge[2])
else: n2, n2_labels = [],[]
if o2_labels != n2_labels:
return False
else:
o2 = self.outside_triple[2]
n2 = new_edge[2]
if len(o2) != len(n2):
return False
for i in range(len(o2)):
if o2[i] in self.mapping and self.mapping[o2[i]] != n2[i]:
return False
# Again, need to make sure this node is part of the rule RHS, not of a
# proper subgraph.
if n2[i] in self.nodeset and n2[i] not in self.rev_mapping:
return False
return True
def shift(self, new_edge):
"""
Creates the chart item resulting from a shift of new_edge. Assumes
can_shift returned true.
"""
olabel = self.outside_triple[1]
o1 = self.outside_triple[0][0] if self.nodelabels else self.outside_triple[0]
o2 = tuple(x[0] for x in self.outside_triple[2]) if self.nodelabels else self.outside_triple[2]
nlabel = new_edge[1]
n1 = new_edge[0][0] if self.nodelabels else new_edge[0]
n2 = tuple(x[0] for x in new_edge[2]) if self.nodelabels else new_edge[2]
new_nodeset = self.nodeset | set(n2) | set([n1])
assert len(o2) == len(n2)
new_size = self.size + 1
new_shifted = frozenset(self.shifted | set([new_edge]))
new_mapping = dict(self.mapping)
new_mapping[o1] = n1
for i in range(len(o2)):
new_mapping[o2[i]] = n2[i]
return HergItem(self.rule, new_size, new_shifted, new_mapping, new_nodeset, self.nodelabels)
def can_complete(self, new_item):
"""
Determines whether new_item matches the outside of this item (i.e. if the
nonterminals match and the node mappings agree).
"""
# can't add to a closed item
if self.closed:
#log.debug('fail bc closed')
return False
# can't shift an incomplete item
if not new_item.closed:
#log.debug('fail bc other not closed')
return False
# make sure labels agree
if not self.outside_is_nonterminal:
#log.debug('fail bc outside terminal')
return False
#Make sure items are disjoint
if any(edge in self.shifted for edge in new_item.shifted):
#log.debug('fail bc overlap')
return False
# make sure mappings agree
if self.nodelabels:
o1, o1label = self.outside_triple[0]
if self.outside_triple[2]:
o2, o2labels = zip(*self.outside_triple[2])
else:
o2, o2labels = [],[]
else:
o1 = self.outside_triple[0]
o2 = self.outside_triple[2]
if len(o2) != len(new_item.rule.rhs1.external_nodes):
#log.debug('fail bc hyperedge type mismatch')
return False
nroot = list(new_item.rule.rhs1.roots)[0]
#Check root label
if self.nodelabels and o1label != new_item.rule.rhs1.node_to_concepts[nroot]:
return False
if o1 in self.mapping and self.mapping[o1] != new_item.mapping[nroot]:
# new_item.mapping[new_item.rule.rhs1.roots[0]]:
#log.debug('fail bc mismapping')
return False
real_nroot = new_item.mapping[nroot]
real_ntail = None
for i in range(len(o2)):
otail = o2[i]
ntail = new_item.rule.rhs1.rev_external_nodes[i]
#Check tail label
if self.nodelabels and o2labels[i] != new_item.rule.rhs1.node_to_concepts[ntail]:
return False
if otail in self.mapping and self.mapping[otail] != new_item.mapping[ntail]:
#log.debug('fail bc bad mapping in tail')
return False
for node in new_item.mapping.values():
if node in self.rev_mapping:
onode = self.rev_mapping[node]
if not (onode == o1 or onode in o2):
return False
return True
def complete(self, new_item):
"""
Creates the chart item resulting from a complete of new_item. Assumes
can_shift returned true.
"""
olabel = self.outside_triple[1]
o1 = self.outside_triple[0][0] if self.nodelabels else self.outside_triple[0]
o2 = tuple(x[0] for x in self.outside_triple[2]) if self.nodelabels else self.outside_triple[2]
new_size = self.size + 1
new_shifted = frozenset(self.shifted | new_item.shifted)
new_mapping = dict(self.mapping)
new_mapping[o1] = new_item.mapping[list(new_item.rule.rhs1.roots)[0]]
for i in range(len(o2)):
otail = o2[i]
ntail = new_item.rule.rhs1.rev_external_nodes[i]
new_mapping[otail] = new_item.mapping[ntail]
new_nodeset = self.nodeset | new_item.nodeset
new = HergItem(self.rule, new_size, new_shifted, new_mapping, new_nodeset, self.nodelabels)
return new
class CfgItem(Item):
"""
Chart item for a CFG parse.
"""
def __init__(self, rule, size=None, i=None, j=None, nodelabels = False):
# until this item is associated with some span in the sentence, let i and j
# (the left and right boundaries) be -1
if size == None:
size = 0
if i == None:
i = -1
if j == None:
j = -1
self.rule = rule
self.i = i
self.j = j
self.size = size
self.shifted = []
assert len(rule.rhs1) != 0
if size == 0:
assert i == -1
assert j == -1
self.closed = False
self.outside_word = rule.rhs1[rule.rhs1_visit_order[0]]
elif size < len(rule.string):
self.closed = False
self.outside_word = rule.string[rule.rhs1_visit_order[self.size]]
else:
self.closed = True
self.outside_word = None
if self.outside_word and isinstance(self.outside_word, NonterminalLabel):
self.outside_is_nonterminal = True
self.outside_symbol = self.outside_word.label
self.outside_nt_index = self.outside_word.index
else:
self.outside_is_nonterminal = False
self.__cached_hash = None
def __hash__(self):
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.rule) + 3 * self.i + 5 * self.j
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, CfgItem) and \
other.rule == self.rule and \
other.i == self.i and \
other.j == self.j and \
other.size == self.size
def __repr__(self):
return 'CfgItem(%d, %d, %s, (%d, %d))' % (self.rule.rule_id, self.size, str(self.closed), self.i, self.j)
def __str__(self):
return '[%s, %d/%d, (%d,%d)]' % (self.rule,
self.size,
len(self.rule.rhs1),
self.i,self.j)
def uniq_str(self):
"""
Produces a unique string representation of this item (see note on uniq_str
in HergItem above).
"""
return '%d__%d_%d' % (self.rule.rule_id, self.i, self.j)
def can_shift(self, word, index):
"""
Determines whether word matches the outside of this item (i.e. is adjacent
and has the right symbol) and can be shifted.
"""
if self.closed:
return False
if self.i == -1:
return True
if index == self.i - 1:
return self.outside_word == word
elif index == self.j:
return self.outside_word == word
return False
def shift(self, word, index):
"""
Creates the chart item resulting from a shift of the word at the given
index.
"""
if self.i == -1:
return CfgItem(self.rule, self.size+1, index, index+1)
elif index == self.i - 1:
return CfgItem(self.rule, self.size+1, self.i-1, self.j)
elif index == self.j:
return CfgItem(self.rule, self.size+1, self.i, self.j+1)
assert False
def can_complete(self, new_item):
"""
Determines whether new_item matches the outside of this item.
"""
if self.closed:
return False
if not new_item.closed:
return False
if self.outside_symbol != new_item.rule.symbol:
return False
return self.i == -1 or new_item.i == self.j #or new_item.j == self.i
def complete(self, new_item):
"""
Creates the chart item resulting from a completion with the given item.
"""
if self.i == -1:
return CfgItem(self.rule, self.size+1, new_item.i, new_item.j)
elif new_item.i == self.j:
return CfgItem(self.rule, self.size+1, self.i, new_item.j)
elif new_item.j == self.i:
return CfgItem(self.rule, self.size+1, new_item.i, self.j)
assert False
class SynchronousItem(Item):
"""
Chart item for a synchronous CFG/HRG parse. (Just a wrapper for paired
CfgItem / HergItem.)
"""
def __init__(self, rule, item1class, item2class, item1 = None, item2 = None, nodelabels = False):
self.shifted = ([],[])
self.rule = rule
self.nodelabels = nodelabels
self.item1class = item1class
self.item2class = item2class
if item1:
self.item1 = item1
else:
self.item1 = item1class(rule.project_left(), nodelabels = nodelabels)
if item2:
self.item2 = item2
else:
self.item2 = item2class(rule.project_right(), nodelabels = nodelabels)
if self.item1.closed and self.item2.closed:
self.closed = True
else:
self.closed = False
# Now we potentially have two outsides---one in the graph and the other in
# the string. The visit order will guarantee that if we first consume all
# terminals in any order, the remainder of both string and graph visit
# orders will agree on the sequence in which to consume nonterminals. (See
# the Rule class.) Before consuming all terminals, it might be the case that
# one item has a terminal outside and the other a nonterminal; in that case
# we do not want an outside nonterminal associated with this item.
if item1class is CfgItem:
self.outside1_is_nonterminal = self.item1.outside_is_nonterminal
self.outside_object1 = self.item1.outside_word
else:
self.outside1_is_nonterminal = self.item1.outside_is_nonterminal
self.outside_object1= self.item1.outside_triple[1] if \
self.item1.outside_triple else None
if item2class is CfgItem:
self.outside2_is_nonterminal = self.item2.outside_is_nonterminal
self.outside_object2 = self.item2.outside_word
else:
self.outside2_is_nonterminal = self.item2.outside_is_nonterminal
self.outside_object2 = self.item2.outside_triple[1] if \
self.item2.outside_triple else None
self.outside_is_nonterminal = self.outside1_is_nonterminal and \
self.outside2_is_nonterminal
if self.outside_is_nonterminal:
assert self.outside_object1 == self.outside_object2
self.outside_symbol = self.item1.outside_symbol
self.outside_nt_index = self.item1.outside_nt_index
self.__cached_hash = None
def uniq_str(self):
"""
Produces a unique string representation of this item (see note on uniq_str
in HergItem above).
"""
edges = set()
if item1class is CfgItem:
item1cover = "%d,%d" % (self.item1.i, self.item1.j)
elif item1class is HergItem:
item1cover = item1.uniq_cover_str(self)
if item2class is CfgItem:
item2cover = "%d,%d" % (self.item2.i, self.item2.j)
elif item2class is HergItem:
item2cover = item2.uniq_cover_str(self)
return '%d__%s__%s' % (self.rule.rule_id, item1cover, item2cover)
def __hash__(self):
if not self.__cached_hash:
self.__cached_hash = 2 * hash(self.item1) + 7 * hash(self.item2)
return self.__cached_hash
def __eq__(self, other):
return isinstance(other, SynchronousItem) and other.item1 == self.item1 \
and other.item2 == self.item2
def __repr__(self):
return "(%s, %s, %s, %s)" % (self.item1.__repr__(), self.item2.__repr__(), str(self.item1.closed),str(self.item2.closed))
def can_shift_word1(self, word, index):
"""
Determines whether given word, index can be shifted onto the CFG item.
"""
assert isinstance(self.item1, CfgItem)
return self.item1.can_shift(word, index)
def can_shift_word2(self, word, index):
"""
Determines whether given word, index can be shifted onto the CFG item.
"""
assert isinstance(self.item2, CfgItem)
return self.item2.can_shift(word, index)
def shift_word1(self, word, index):
"""
Shifts onto the CFG item.
"""
assert isinstance(self.item1, CfgItem)
nitem = self.item1.shift(word, index)
self.shifted = (self.item1.shifted, self.item2.shifted)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem, self.item2, nodelabels = self.nodelabels)
def shift_word2(self, word, index):
"""
Shifts onto the CFG item.
"""
assert isinstance(self.item2, CfgItem)
nitem = self.item2.shift(word, index)
self.shifted = (self.item1.shifted, self.item2.shifted)
return SynchronousItem(self.rule, self.item1class, self.item2class, self.item1, nitem, nodelabels = self.nodelabels)
def can_shift_edge1(self, edge):
"""
Determines whether the given edge can be shifted onto the HERG item.
"""
assert isinstance(self.item1, HergItem)
self.shifted = (self.item1.shifted, self.item2.shifted)
return self.item1.can_shift(edge)
def can_shift_edge2(self, edge):
"""
Determines whether the given edge can be shifted onto the HERG item.
"""
assert isinstance(self.item2, HergItem)
self.shifted = (self.item1.shifted, self.item2.shifted)
return self.item2.can_shift(edge)
def shift_edge1(self, edge):
"""
Shifts onto the HERG item.
"""
nitem = self.item1.shift(edge)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem, self.item2, nodelabels = self.nodelabels)
def shift_edge2(self, edge):
"""
Shifts onto the HERG item.
"""
nitem = self.item2.shift(edge)
return SynchronousItem(self.rule, self.item1class, self.item2class, self.item1, nitem, nodelabels = self.nodelabels)
def can_complete(self, new_item):
"""
Determines whether given item can complete both sides.
"""
if not (self.item1.can_complete(new_item.item1) and
self.item2.can_complete(new_item.item2)):
return False
return True
def complete(self, new_item):
"""
Performs the synchronous completion, and gives back a new item.
"""
nitem1 = self.item1.complete(new_item.item1)
nitem2 = self.item2.complete(new_item.item2)
return SynchronousItem(self.rule, self.item1class, self.item2class, nitem1, nitem2, nodelabels = self.nodelabels)
| 33.473154 | 125 | 0.646516 | 2,842 | 19,950 | 4.406756 | 0.110486 | 0.05709 | 0.038007 | 0.014372 | 0.49984 | 0.411769 | 0.36434 | 0.314756 | 0.301821 | 0.274752 | 0 | 0.02013 | 0.250476 | 19,950 | 595 | 126 | 33.529412 | 0.817428 | 0.225915 | 0 | 0.453297 | 0 | 0 | 0.010206 | 0 | 0 | 0 | 0 | 0 | 0.035714 | 1 | 0.098901 | false | 0.002747 | 0.008242 | 0.021978 | 0.296703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb2f5ca3954e0ac0cd30c52e249acffaf9cb26a5 | 7,120 | py | Python | inc_cg_reporter/excel_writer.py | edwinsteele/inc-cg-reporter | 7bf63a9a0492b474526f6da54ea39b31a1a8d4fd | [
"MIT"
] | null | null | null | inc_cg_reporter/excel_writer.py | edwinsteele/inc-cg-reporter | 7bf63a9a0492b474526f6da54ea39b31a1a8d4fd | [
"MIT"
] | null | null | null | inc_cg_reporter/excel_writer.py | edwinsteele/inc-cg-reporter | 7bf63a9a0492b474526f6da54ea39b31a1a8d4fd | [
"MIT"
] | null | null | null | import datetime
import pathlib
from tempfile import NamedTemporaryFile
from typing import List, Dict
from backports.zoneinfo import ZoneInfo
from more_itertools import first
from openpyxl import Workbook
from openpyxl.styles import Alignment, Border, Side
from openpyxl.utils import get_column_letter
from openpyxl.worksheet.page import PrintPageSetup
from openpyxl.worksheet.worksheet import Worksheet
from inc_cg_reporter.connect_group import (
ConnectGroup,
ConnectGroupMembershipManager,
Person,
)
from inc_cg_reporter.field_definition import PERSONAL_ATTRIBUTE_NAME
class ConnectGroupWorksheetGenerator:
"""Creates a well formatted worksheet for a Connect Group"""
DATE_TYPE_COLUMN_WIDTH = 15
FIRST_COLUMN_WIDTH = 22
FIRST_ROW_HEIGHT = 2
HEADER_ROW_HEIGHT = 30
THIN_BORDER = Border(
left=Side(border_style="thin"),
right=Side(border_style="thin"),
top=Side(border_style="thin"),
bottom=Side(border_style="thin"),
)
def __init__(self, field_list: List[str]):
# column indexes start from 1 and enumerate uses zero-based counting,
# so we need to bump our column number by one
self._column_locations = {
col_name: col_number + 1 for col_number, col_name in (enumerate(field_list))
}
def person_as_row_values(self, person: Person) -> Dict[int, str]:
row = {}
# XXX this generator shouldn't need to know where to find the personal
# attributes on the person.
for column_name, value in person.personal_attributes.items():
row[self._column_locations[column_name]] = value
return row
def populate(self, ws: Worksheet, cg: ConnectGroup):
ws.title = cg.name
self.create_column_headers(ws)
for person in sorted(
cg.members, key=lambda p: p.personal_attributes[PERSONAL_ATTRIBUTE_NAME]
):
ws.append(self.person_as_row_values(person))
def create_column_headers(self, ws: Worksheet):
ws.cell(row=1, column=1, value="Name")
for col_name, col_index in self._column_locations.items():
ws.cell(row=1, column=col_index, value=col_name)
def insert_heading(self, ws: Worksheet):
# This can only be run after the table has been populated and styled
# given it prepends rows to the sheet. Ugh.
ws.insert_rows(0)
ws["A1"] = ws.title
ws.merge_cells("A1:{}1".format(get_column_letter(ws.max_column)))
# Style merged cells using the top left cell reference
ws["A1"].style = "Headline 1"
# alignment is overwritten by style, so set it afterwards
ws["A1"].alignment = Alignment(horizontal="center", vertical="center")
no_border = Side(border_style=None)
ws["A1"].border = Border(
left=no_border, right=no_border, top=no_border, outline=False
)
def style(self, ws: Worksheet):
# Give columns a fixed width so each sheet can print onto a single
# A4 in landscape mode.
for col_name, col_index in self._column_locations.items():
# column_dimensions requires a column name, not an index
ws.column_dimensions[
get_column_letter(col_index)
].width = self.DATE_TYPE_COLUMN_WIDTH
# Then override the first column width (it's like a header)
ws.column_dimensions["A"].width = self.FIRST_COLUMN_WIDTH
# Style the first column in a header-like way
for cell in first(ws.columns):
cell.style = "40 % - Accent1"
# Style header row (note the overlap with the name column... we're
# intentionally overwriting the style of A1 to be what is below)
for cell in first(ws.rows):
cell.style = "Accent1"
cell.alignment = Alignment(wrap_text=True, horizontal="center")
# Intended to be double height, with text wrap set in the loop below
ws.row_dimensions[1].height = self.HEADER_ROW_HEIGHT
# Style the data cells (non-header cells)
for row in ws.iter_rows(min_row=2, min_col=2):
for cell in row:
cell.alignment = Alignment(horizontal="center")
cell.border = self.THIN_BORDER
def setup_print_page_setup(self, ws):
# fitToWidth isn't recognised on numbers. Hardcoding a scale is ghastly,
# but it works and will update if it's inappropriate
ws.page_setup = PrintPageSetup(orientation="landscape", scale=75)
class ConnectGroupWorkbookManager:
"""An excel workbook, with sheets per connect group and a summary sheet"""
OUTPUT_FILENAME = "inc_cg.xlsx"
def __init__(
self,
membership_manager: ConnectGroupMembershipManager,
worksheet_generator: ConnectGroupWorksheetGenerator,
):
self._membership_manager = membership_manager
self._worksheet_generator = worksheet_generator
self._workbook = Workbook()
def insert_title_sheet(self) -> None:
about_sheet = self._workbook.create_sheet("About", 0)
about_sheet.title = "About"
about_sheet.column_dimensions["A"].width = 20
about_sheet.column_dimensions["B"].width = 40
now_au = datetime.datetime.now(tz=ZoneInfo("Australia/Sydney"))
about_sheet.append({"A": "Created:", "B": now_au.ctime()})
about_sheet.append(
{
"A": "Connect Group Count:",
"B": self._membership_manager.connect_groups_count,
}
)
about_sheet.append(
{
"A": "Connect Group Total Member Count:",
"B": self._membership_manager.connect_groups_member_count,
}
)
# Show a list of ConnectGroups
if self._membership_manager.connect_groups_member_count > 0:
about_sheet.append(
{"A": "Connect Group List:", "B": self._workbook.worksheets[1].title}
)
# Ignore the zeroth worksheet (this about page), and the first worksheet
# that we printed in the line about
for ws in self._workbook.worksheets[2:]:
about_sheet.append({"B": ws.title})
def create(self) -> None:
for connect_group in sorted(
self._membership_manager.connect_groups.values(), key=lambda x: x.name
):
ws = self._workbook.create_sheet()
self._worksheet_generator.populate(ws, connect_group)
self._worksheet_generator.style(ws)
self._worksheet_generator.insert_heading(ws)
self._worksheet_generator.setup_print_page_setup(ws)
# Remove the blank sheet that's created initially
self._workbook.remove(self._workbook.worksheets[0])
self.insert_title_sheet()
def save(self) -> pathlib.Path:
output_file = NamedTemporaryFile(delete=False)
self._workbook.save(output_file.name)
output_file.close()
output_path = pathlib.Path(output_file.name)
return output_path.rename(output_path.with_name(self.OUTPUT_FILENAME))
| 39.776536 | 88 | 0.658287 | 901 | 7,120 | 5.00222 | 0.283019 | 0.019969 | 0.027957 | 0.016863 | 0.090748 | 0.069004 | 0.0497 | 0.019525 | 0.019525 | 0.019525 | 0 | 0.00754 | 0.254916 | 7,120 | 178 | 89 | 40 | 0.842036 | 0.188062 | 0 | 0.0625 | 0 | 0 | 0.039492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085938 | false | 0 | 0.101563 | 0 | 0.265625 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb30048a187fc956b10e024a3231a64c928e715b | 1,969 | py | Python | position.py | wenwen0319/HIT_test | feba0c7dac4074ca3e602e499d3d946a6abce995 | [
"MIT"
] | null | null | null | position.py | wenwen0319/HIT_test | feba0c7dac4074ca3e602e499d3d946a6abce995 | [
"MIT"
] | null | null | null | position.py | wenwen0319/HIT_test | feba0c7dac4074ca3e602e499d3d946a6abce995 | [
"MIT"
] | null | null | null | # from numba import jit
import numpy as np
import logging
numba_logger = logging.getLogger('numba')
numba_logger.setLevel(logging.WARNING)
# @jit(nopython=True)
def nodets2key(batch: int, node: int, ts: float):
key = '-'.join([str(batch), str(node), float2str(ts)])
return key
# @jit(nopython=True)
def float2str(ts):
return str(int(round(ts)))
def make_batched_keys(node_record, t_record):
batch = node_record.shape[0]
support = node_record.shape[1]
batched_keys = make_batched_keys_l(node_record, t_record, batch, support)
batched_keys = np.array(batched_keys).reshape((batch, support))
# batched_keys = np.array([nodets2key(b, n, t) for b, n, t in zip(batch_matrix.ravel(), node_record.ravel(), t_record.ravel())]).reshape(batch, support)
return batched_keys
# @jit(nopython=True)
def make_batched_keys_l(node_record, t_record, batch, support):
batch_matrix = np.arange(batch).repeat(support).reshape((-1, support))
# batch_matrix = np.tile(np.expand_dims(np.arange(batch), 1), (1, support))
batched_keys = []
for i in range(batch):
for j in range(support):
b = batch_matrix[i, j]
n = node_record[i, j]
t = t_record[i, j]
batched_keys.append(nodets2key(b, n, t))
return batched_keys
# @jit(nopython=True)
def anonymize(node_records, batch, M, walk_len):
new_node_records = np.zeros_like(node_records)
for i in range(batch):
for j in range(M):
seen_nodes = []
for w in range(walk_len):
index = list_index(seen_nodes, node_records[i, j, w])
if index == len(seen_nodes):
seen_nodes.append(node_records[i, j, w])
new_node_records[i, j, w] = index
return new_node_records
# @jit(nopython=True)
def list_index(arr, item):
count = 0
for e in arr:
if e == item:
return count
count += 1
return count | 31.253968 | 156 | 0.638395 | 288 | 1,969 | 4.177083 | 0.253472 | 0.100582 | 0.062344 | 0.074813 | 0.270989 | 0.217789 | 0.177889 | 0.119701 | 0.119701 | 0.074813 | 0 | 0.007963 | 0.234637 | 1,969 | 63 | 157 | 31.253968 | 0.790312 | 0.175724 | 0 | 0.139535 | 0 | 0 | 0.003715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.139535 | false | 0 | 0.046512 | 0.023256 | 0.348837 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb34d4e88ec1cd6671f5adf6f293a9c837cc85a0 | 9,244 | py | Python | omdrivers/lifecycle/iDRAC/iDRACLicense.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 61 | 2018-02-21T00:02:20.000Z | 2022-01-26T03:47:19.000Z | omdrivers/lifecycle/iDRAC/iDRACLicense.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 31 | 2018-03-24T05:43:39.000Z | 2022-03-16T07:10:37.000Z | omdrivers/lifecycle/iDRAC/iDRACLicense.py | DanielFroehlich/omsdk | 475d925e4033104957fdc64480fe8f9af0ab6b8a | [
"Apache-2.0"
] | 25 | 2018-03-13T10:06:12.000Z | 2022-01-26T03:47:21.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
#
# Copyright © 2018 Dell Inc. or its subsidiaries. All rights reserved.
# Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
# Other trademarks may be trademarks of their respective owners.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Vaideeswaran Ganesan
#
import os
import re
import time
import xml.etree.ElementTree as ET
from enum import Enum
from datetime import datetime
from omsdk.sdkprint import PrettyPrint
from omsdk.sdkcenum import EnumWrapper, TypeHelper
from omsdk.lifecycle.sdklicenseapi import iBaseLicenseApi
from omdrivers.lifecycle.iDRAC.iDRACConfig import LicenseApiOptionsEnum
import base64
import sys
import logging
logger = logging.getLogger(__name__)
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
from pysnmp.hlapi import *
from pysnmp.smi import *
PySnmpPresent = True
except ImportError:
PySnmpPresent = False
from omdrivers.enums.iDRAC.iDRACEnums import *
class iDRACLicense(iBaseLicenseApi):
def __init__(self, entity):
if PY2:
super(iDRACLicense, self).__init__(entity)
else:
super().__init__(entity)
self._job_mgr = entity.job_mgr
self._config_mgr = entity.config_mgr
self._license_fqdds = []
def _get_license_json(self):
if not hasattr(self, 'license') or "License" not in self.license:
self.license = {}
self.entity._get_entries(self.license, iDRACLicenseEnum)
if "LicensableDevice" in self.license:
entries = self.license["LicensableDevice"]
if isinstance(entries, dict):
entries = [entries]
for entry in entries:
self._license_fqdds.append(entry["FQDD"])
return self.license
def _get_license_text(self, entitlementId):
retVal = self.entity._export_license(id=entitlementId)
ltext = self.entity._get_field_from_action(retVal,
"Data", "ExportLicense_OUTPUT", "LicenseFile")
if ltext:
retVal['License'] = base64.b64decode(ltext).decode("utf-8")
return retVal
def _save_license_text(self, entitlementId, folder):
retVal = self._get_license_text(entitlementId)
with open(os.path.join(folder, entitlementId), "wb") as output:
output.write(retVal['License'].encode('UTF-8'))
output.flush()
return os.path.join(folder, entitlementId)
def export_license(self, folder):
expLic = []
if not os.path.exists(folder):
os.makedirs(folder)
elif not os.path.isdir(folder):
# replace with exception
return []
self._get_license_json()
if not "License" in self.license:
# replace with exception
return []
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
for i in llist:
entitlementId = i["EntitlementID"]
expLic.append(self._save_license_text(entitlementId, folder))
return expLic
def export_license_share(self, license_share_path):
self._get_license_json()
if not "License" in self.license:
return {"l": False}
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
retval = {'Status': 'Success', 'Exported': 0, 'Failed to Export': 0}
for i in llist:
entitlementId = i["EntitlementID"]
rjson = self.entity._export_license_share(share=license_share_path,
creds=license_share_path.creds, id=entitlementId)
rjson = self._job_mgr._job_wait(rjson['Message'], rjson)
if rjson['Status'] == 'Success':
retval['Exported'] += 1
else:
retval['Failed to Export'] += 1
if retval['Exported'] == 0 and retval['Failed to Export'] > 0:
retval['Status'] = 'Failed'
return retval
def _import_license_fqdd(self, license_file, fqdd="iDRAC.Embedded.1", options=LicenseApiOptionsEnum.NoOptions):
if not os.path.exists(license_file) or not os.path.isfile(license_file):
logger.debug(license_file + " is not a file!")
return False
content = ''
with open(license_file, 'rb') as f:
content = f.read()
content = bytearray(base64.b64encode(content))
for i in range(0, len(content) + 65, 65):
content[i:i] = '\n'.encode()
return self.entity._import_license(fqdd=fqdd,
options=options, file=content.decode())
def _import_license_share_fqdd(self, license_share_path, fqdd="iDRAC.Embedded.1",
options=LicenseApiOptionsEnum.NoOptions):
self._get_license_json()
if not "License" in self.license:
return False
llist = self.license["License"]
if isinstance(self.license["License"], dict):
llist = [llist]
retval = {'Status': 'Success', 'Imported': 0, 'Failed to Import': 0}
for i in llist:
entitlementId = i["EntitlementID"]
rjson = self.entity._import_license_share(share=license_share_path,
creds=license_share_path.creds, name="Import",
fqdd=fqdd, options=options)
rjson = self._job_mgr._job_wait(rjson['Message'], rjson)
logger.debug(rjson)
if rjson['Status'] == 'Success':
retval['Imported'] += 1
else:
retval['Failed to Import'] += 1
if retval['Imported'] == 0 and retval['Failed to Import'] > 0:
retval['Status'] = 'Failed'
return retval
def _replace_license_fqdd(self, license_file, entitlementId, fqdd="iDRAC.Embedded.1",
options=LicenseApiOptionsEnum.NoOptions):
if not os.path.exists(license_file) or not os.path.isfile(license_file):
logger.debug(license_file + " is not a file!")
return False
content = ''
with open(license_file) as f:
content = f.read()
return self.entity._replace_license(id=entitlementId,
fqdd=fqdd, options=options, file=content)
def _delete_license_fqdd(self, entitlementId, fqdd="iDRAC.Embedded.1", options=LicenseApiOptionsEnum.NoOptions):
return self.entity._delete_license(id=entitlementId,
fqdd=fqdd, options=options)
@property
def LicensableDeviceFQDDs(self):
self._get_license_json()
return self._license_fqdds
@property
def LicensableDevices(self):
self._get_license_json()
return list(self._config_mgr._fqdd_to_comp(self._license_fqdds))
@property
def Licenses(self):
self._get_license_json()
return self.license["License"]
def import_license(self, license_file, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._import_license_fqdd(license_file, fqdd=fqddlist[0], options=options)
def import_license_share(self, license_share_path, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._import_license_share_fqdd(license_share_path, fqdd=fqddlist[0], options=options)
def replace_license(self, license_file, entitlementId, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._replace_license_fqdd(license_file, entitlementId, fqdd=fqddlist[0], options=options)
def delete_license(self, entitlementId, component="iDRAC", options=LicenseApiOptionsEnum.NoOptions):
fqddlist = self._config_mgr._comp_to_fqdd(self.LicensableDeviceFQDDs, component, default=[component])
return self._delete_license_fqdd(entitlementId, fqdd=fqddlist[0], options=options)
| 42.599078 | 120 | 0.623107 | 1,018 | 9,244 | 5.479371 | 0.220039 | 0.055217 | 0.022947 | 0.019362 | 0.491215 | 0.433489 | 0.373252 | 0.328612 | 0.300108 | 0.286124 | 0 | 0.008122 | 0.280723 | 9,244 | 216 | 121 | 42.796296 | 0.830651 | 0.092276 | 0 | 0.359756 | 0 | 0 | 0.071034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103659 | false | 0 | 0.182927 | 0.006098 | 0.426829 | 0.006098 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb36ec70c07ffa6bf2c091910c913a445feb66eb | 266 | py | Python | main.py | MustafaTheCoder/Basic-Discord-Bot | 7dcbd3f9746a1e1bbef45c34903cff0e23899411 | [
"MIT"
] | null | null | null | main.py | MustafaTheCoder/Basic-Discord-Bot | 7dcbd3f9746a1e1bbef45c34903cff0e23899411 | [
"MIT"
] | null | null | null | main.py | MustafaTheCoder/Basic-Discord-Bot | 7dcbd3f9746a1e1bbef45c34903cff0e23899411 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
client = commands.Bot(command_prefix="-")
@client.event
async def on_ready():
print("Bot is ready!")
@client.command
async def Hello(ctx):
await ctx.send("Hi")
client.run("BOT TOKEN")
| 14 | 42 | 0.654135 | 36 | 266 | 4.777778 | 0.638889 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.218045 | 266 | 18 | 43 | 14.777778 | 0.826923 | 0 | 0 | 0 | 0 | 0 | 0.101626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb39289b3f68ac6a3b7e2e131415a89dd368fbbf | 1,121 | py | Python | qoffeeapi/setup.py | foehlija/Qoffee-Maker | efad3af54b466c3ff9867e9105eceb2b3b08f592 | [
"Apache-2.0"
] | null | null | null | qoffeeapi/setup.py | foehlija/Qoffee-Maker | efad3af54b466c3ff9867e9105eceb2b3b08f592 | [
"Apache-2.0"
] | 3 | 2021-11-07T19:14:32.000Z | 2022-03-23T09:40:46.000Z | qoffeeapi/setup.py | foehlija/Qoffee-Maker | efad3af54b466c3ff9867e9105eceb2b3b08f592 | [
"Apache-2.0"
] | 2 | 2021-11-07T17:19:17.000Z | 2021-11-16T14:56:16.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from glob import glob
import os
from os.path import join as pjoin
from setuptools import setup, find_packages
HERE = os.path.dirname(os.path.abspath(__file__))
# The name of the project
name = 'qoffeeapi'
# Get the version
version_info = (0, 1, 0, 'dev')
version = ".".join(map(str, version_info))
setup_args = dict(
name = name,
version = version,
scripts = glob(pjoin('scripts', '*')),
packages = find_packages(),
author = 'Max Simon',
author_email = 'max.simon@ibm.com',
platforms = "Linux, Mac OS X, Windows",
include_package_data = True,
python_requires=">=3.6",
install_requires = [
'python-dotenv>=0.19.1'
],
extras_require = {
'test': [
'pytest>=4.6',
'pytest-cov',
'nbval',
]
},
entry_points = {
},
)
if __name__ == '__main__':
setup(**setup_args)
| 21.557692 | 58 | 0.592328 | 136 | 1,121 | 4.661765 | 0.625 | 0.028391 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01487 | 0.280107 | 1,121 | 51 | 59 | 21.980392 | 0.770756 | 0.152542 | 0 | 0 | 0 | 0 | 0.143008 | 0.022246 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.147059 | 0 | 0.147059 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb393599c47f4953a5b9b24677e38c55c3bc05b5 | 2,325 | py | Python | db_etl/processors/match_area_names.py | publichealthengland/coronavirus-dashboard-pipeline-etl | efcf5c77091afe8a0f7edce2e5934fe8c1f6dc1c | [
"MIT"
] | 7 | 2021-02-14T12:42:56.000Z | 2022-03-02T09:14:22.000Z | db_etl/processors/match_area_names.py | publichealthengland/coronavirus-dashboard-pipeline-etl | efcf5c77091afe8a0f7edce2e5934fe8c1f6dc1c | [
"MIT"
] | 19 | 2021-11-03T09:21:00.000Z | 2022-03-07T09:26:47.000Z | db_etl/processors/match_area_names.py | publichealthengland/coronavirus-dashboard-pipeline-etl | efcf5c77091afe8a0f7edce2e5934fe8c1f6dc1c | [
"MIT"
] | 2 | 2021-03-03T16:52:51.000Z | 2022-02-28T16:22:33.000Z | #!/usr/bin python3
"""
<Description of the programme>
Author: Pouria Hadjibagheri <pouria.hadjibagheri@phe.gov.uk>
Created: 16 Dec 2020
License: MIT
Contributors: Pouria Hadjibagheri
"""
# Imports
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python:
from io import StringIO
# 3rd party:
from pandas import DataFrame, read_csv, MultiIndex
# Internal:
try:
from __app__.utilities import get_storage_file
from __app__.utilities import func_logger
except ImportError:
from utilities import get_storage_file
from utilities import func_logger
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Header
__author__ = "Pouria Hadjibagheri"
__copyright__ = "Copyright (c) 2020, Public Health England"
__license__ = "MIT"
__version__ = "0.0.1"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__all__ = [
'match_area_names'
]
AREA_TYPE_NAMES = {
'nations': 'nation',
'nhsTrusts': 'nhsTrust',
'utlas': 'utla',
'ltlas': 'ltla',
'nhsRegions': 'nhsRegion',
'regions': 'region',
'uk': 'overview'
}
@func_logger("match area names")
def match_area_names(d: DataFrame, area_type_repls):
ref_names_io = StringIO(get_storage_file("pipeline", "assets/geoglist.csv"))
ref_names = read_csv(ref_names_io, usecols=["areaType", "areaCode", "areaName"])
ref_names.replace(area_type_repls, inplace=True)
ref_names = ref_names.loc[ref_names.areaCode.isin(d.areaCode), :]
ref_names.set_index(["areaType", "areaCode"], inplace=True)
# print(ref_names.areaType)
d = d.drop(columns=['areaType', 'areaName'])
print("post", d.shape)
result = (
d
# .drop(columns=['areaName'])
.join(ref_names, on=["areaType", "areaCode"], how="left")
)
return result
if __name__ == "__main__":
df = read_csv(
"/Users/pouria/Documents/Projects/coronavirus-data-etl/db_etl/test/v2/archive/processed_20201216-1545.csv",
usecols=["areaCode", "areaType", "date", "areaName", "newCasesBySpecimenDate"],
low_memory=False
).replace(AREA_TYPE_NAMES)
print(df.shape)
df = match_area_names(df, AREA_TYPE_NAMES)
print(df.shape)
print(df.tail(10).to_string())
| 28.353659 | 115 | 0.60172 | 252 | 2,325 | 5.230159 | 0.5 | 0.060698 | 0.042489 | 0.033384 | 0.088012 | 0.088012 | 0 | 0 | 0 | 0 | 0 | 0.0154 | 0.162151 | 2,325 | 81 | 116 | 28.703704 | 0.661191 | 0.242581 | 0 | 0.041667 | 0 | 0.020833 | 0.263339 | 0.072289 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.145833 | 0 | 0.1875 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb39acda6eb9959537beae46dae372efee98d23a | 478 | py | Python | Source/RemovePunctuation.py | jj-style/Cryptobreaker | c99b9cc35114e55c87b073ee2467c06bd95835a5 | [
"MIT"
] | null | null | null | Source/RemovePunctuation.py | jj-style/Cryptobreaker | c99b9cc35114e55c87b073ee2467c06bd95835a5 | [
"MIT"
] | null | null | null | Source/RemovePunctuation.py | jj-style/Cryptobreaker | c99b9cc35114e55c87b073ee2467c06bd95835a5 | [
"MIT"
] | null | null | null | import string
punctuation = string.punctuation + "’" + '“' + "‘" + "—"
def RemovePunctuation(text,remove_spaces=True,to_lower=True):
text = list(text)
while "\n" in text:
text.remove("\n")
text = "".join(text)
if remove_spaces:
text = text.replace(" ","")
if to_lower:
text=text.lower()
for letter in text:
if letter in punctuation:
text = text.replace(letter,"")
text = text.strip("\n")
return text
| 25.157895 | 61 | 0.573222 | 58 | 478 | 4.672414 | 0.396552 | 0.147601 | 0.110701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.269874 | 478 | 18 | 62 | 26.555556 | 0.773639 | 0 | 0 | 0 | 0 | 0 | 0.023013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb3a778fd080a8f387a45d5d5f462f403e83a6b3 | 399 | py | Python | month06/Machine_learning/day08/demo/02_img_read_save.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | null | null | null | month06/Machine_learning/day08/demo/02_img_read_save.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | null | null | null | month06/Machine_learning/day08/demo/02_img_read_save.py | chaofan-zheng/python_learning_code | 5d05848911d55aa49eaee4afd7ffd80536fad7aa | [
"Apache-2.0"
] | null | null | null | # 01_img_read_save.py
# 图像读取, 显示, 保存
import cv2
im = cv2.imread("../test_img/Linus.png", # 图像路径
1) # 1-彩色图像 0-灰度图像
print(type(im)) # 打印类型: ndarray
print(im.shape) # 打印图像形状
# 显示
cv2.imshow("im", # 窗体名称(如果重复,后面会覆盖前面)
im) # 图像数据, imread的返回值
# 保存
cv2.imwrite("Linus_new.png", # 保存的图片路径
im)# 图像数据
cv2.waitKey() # 等待用户敲击按键(阻塞函数)
cv2.destroyAllWindows() # 销毁所有创建的窗体 | 23.470588 | 47 | 0.62406 | 58 | 399 | 4.206897 | 0.689655 | 0.04918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035144 | 0.215539 | 399 | 17 | 48 | 23.470588 | 0.744409 | 0.380952 | 0 | 0.181818 | 0 | 0 | 0.154506 | 0.090129 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb3c72279c5fcf27f702b631f5d1c82f5eec0b49 | 728 | py | Python | ch9_1_dogs.py | codeplinth/python_crash_course | 865f326936dcfab12e77747be73bfcc632d4bde3 | [
"MIT"
] | null | null | null | ch9_1_dogs.py | codeplinth/python_crash_course | 865f326936dcfab12e77747be73bfcc632d4bde3 | [
"MIT"
] | null | null | null | ch9_1_dogs.py | codeplinth/python_crash_course | 865f326936dcfab12e77747be73bfcc632d4bde3 | [
"MIT"
] | null | null | null | class Dog:
"""Simple attempt to model a dog"""
def __init__(self,name,age):
"""Initialize name and age attributes"""
self.name = name
self.age = age
def sit(self):
"""Simulate a dog sitting in response to a command"""
print(f"{self.name} is now sitting !")
def roll_over(self):
"""Simulate rolling over in response to a command"""
print(f"{self.name} rolled over !")
my_dog = Dog('Willie',6)
your_dog = Dog('Lucy',2)
print(f"My dog's name is {my_dog.name}.")
print(f"My dog's age is {my_dog.age} years.")
my_dog.sit()
my_dog.roll_over()
print(f"Your dog's name is {my_dog.name}.")
print(f"Your dog's age is {my_dog.age} years.")
your_dog.sit()
| 24.266667 | 61 | 0.616758 | 123 | 728 | 3.528455 | 0.300813 | 0.103687 | 0.064516 | 0.059908 | 0.428571 | 0.373272 | 0.373272 | 0.373272 | 0.271889 | 0 | 0 | 0.003559 | 0.228022 | 728 | 29 | 62 | 25.103448 | 0.768683 | 0.218407 | 0 | 0 | 0 | 0 | 0.363803 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0 | 0 | 0.235294 | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb403acdcf0fd7c25a1e41c78341852ffd2414a4 | 3,461 | py | Python | folding/minimizer.py | bigict/ProFOLD | 8fac3e18d25348fc9d1fbb17552a3ce3ae1cb953 | [
"MIT"
] | 1 | 2021-08-02T07:36:03.000Z | 2021-08-02T07:36:03.000Z | folding/minimizer.py | bigict/ProFOLD | 8fac3e18d25348fc9d1fbb17552a3ce3ae1cb953 | [
"MIT"
] | null | null | null | folding/minimizer.py | bigict/ProFOLD | 8fac3e18d25348fc9d1fbb17552a3ce3ae1cb953 | [
"MIT"
] | null | null | null | import os
import threading
import queue
import numpy as np
from pyrosetta import (
pose_from_sequence,
MoveMap,
rosetta,
create_score_function,
SwitchResidueTypeSetMover,
)
from pyrosetta.rosetta.protocols.minimization_packing import MinMover
from score import score_it
def _random_dihedral():
phi = [-140, -72, -122, -82, -61, 57]
psi = [153, 145, 117, -14, -41, 39]
w = [0.135, 0.155, 0.073, 0.122, 0.497, 0.018]
p = np.random.choice(range(6), p=w)
return phi[p], psi[p]
def _set_random_dihedral(pose):
for i in range(1, pose.total_residue()):
phi, psi = _random_dihedral()
pose.set_phi(i, phi)
pose.set_psi(i, psi)
pose.set_omega(i, 180)
def _random_pose(seq, constraint):
pose = pose_from_sequence(seq, "centroid")
_set_random_dihedral(pose)
constraint.apply(pose)
return pose
def _add_noise(pose):
for i in range(1, pose.total_residue()):
phi = pose.phi(i) + np.random.normal(0, 60)
psi = pose.psi(i) + np.random.normal(0, 60)
pose.set_phi(i, phi)
pose.set_psi(i, psi)
def _minimize_step(sf, pose):
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(False)
mmap.set_jump(True)
min_mover = MinMover(mmap, sf, "lbfgs_armijo_nonmonotone", 0.0001, True)
min_mover.max_iter(1000)
min_mover.apply(pose)
def _worker(seq, constraint, sf, run_dir, pose_pool, pool_size, task_queue, mutex):
while True:
try:
idx = task_queue.get(block=False)
print("Start minimize %i ................." % idx)
mutex.acquire()
if len(pose_pool) < pool_size or np.random.random() < 0.1:
pose = _random_pose(seq, constraint)
else:
p = np.random.randint(len(pose_pool))
pose = pose_pool[p].clone()
_add_noise(pose)
mutex.release()
_minimize_step(sf, pose)
mutex.acquire()
pose_pool.append(pose)
if len(pose_pool) > pool_size:
pose_pool.sort(key=lambda x: score_it(sf, x))
del pose_pool[-1]
print("Score %i: %f" % (idx, score_it(sf, pose)))
mutex.release()
except queue.Empty:
break
def repeat_minimize(seq, constraints, sf, run_dir, n_workers, n_structs, n_iter):
pose_pool = []
mutex = threading.Lock()
q = queue.Queue()
for i in range(1, n_iter + 1):
q.put(i)
threads = []
for i in range(n_workers):
thread = threading.Thread(
target=_worker,
args=(seq, constraints, sf, run_dir, pose_pool, n_structs, q, mutex),
)
thread.start()
threads.append(thread)
for x in threads:
x.join()
pose_pool.sort(key=lambda x: score_it(sf, x))
return pose_pool
def relax(pose):
sf = create_score_function("ref2015")
sf.set_weight(rosetta.core.scoring.atom_pair_constraint, 5)
sf.set_weight(rosetta.core.scoring.dihedral_constraint, 1)
sf.set_weight(rosetta.core.scoring.angle_constraint, 1)
mmap = MoveMap()
mmap.set_bb(True)
mmap.set_chi(True)
mmap.set_jump(True)
relax = rosetta.protocols.relax.FastRelax()
relax.set_scorefxn(sf)
relax.max_iter(200)
relax.dualspace(True)
relax.set_movemap(mmap)
switch = SwitchResidueTypeSetMover("fa_standard")
switch.apply(pose)
relax.apply(pose)
| 27.91129 | 83 | 0.613118 | 480 | 3,461 | 4.225 | 0.30625 | 0.047337 | 0.011834 | 0.021696 | 0.248028 | 0.20858 | 0.127219 | 0.127219 | 0.127219 | 0.093688 | 0 | 0.034834 | 0.261774 | 3,461 | 123 | 84 | 28.138211 | 0.758904 | 0 | 0 | 0.176471 | 0 | 0 | 0.028027 | 0.006934 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078431 | false | 0 | 0.068627 | 0 | 0.176471 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4056293343dbab19d3e2d5e53479051955458c | 883 | py | Python | sendfile.py | ponpoko094/Colorful-MHX3gx | a795266f6872eee9eb2ea3dac2bc567cbb35dcfb | [
"MIT"
] | 4 | 2021-12-12T02:20:11.000Z | 2021-12-12T02:20:16.000Z | sendfile.py | ponpoko094/Colorful-MHX3gx | a795266f6872eee9eb2ea3dac2bc567cbb35dcfb | [
"MIT"
] | 2 | 2021-12-04T04:02:41.000Z | 2021-12-07T04:40:57.000Z | sendfile.py | ponpoko094/MHX3gx | 9abd1f9758013c81622334c88c799482cf79e4b2 | [
"MIT"
] | 2 | 2021-11-12T00:21:32.000Z | 2021-11-12T07:08:51.000Z | # -*- coding: utf-8 -*-
""" What is sendfile.py
This module is send 3gx to 3ds via FTP.
このモジュールはFTPで3gxを3dsに送信します。
"""
import ftplib
from ftplib import FTP
print("--------------------------")
print("Trying to Send the Plugin over FTP...")
get_ftp = FTP()
HOST_ADDRESS = "192.168.0.50"
PORT = 5000
TIME_OUT = 30.0
try:
get_ftp.connect(HOST_ADDRESS, PORT, TIME_OUT)
except ftplib.all_errors:
print("Failed to Connect on " + HOST_ADDRESS + " : " + str(PORT))
PATH = "luma/plugins/0004000000155400"
PLUGIN = "/Colorful-MHX3gx.3gx"
try:
print("Successfully Logged in " + HOST_ADDRESS + "\n")
print("Response : " + get_ftp.getwelcome())
get_ftp.login()
get_ftp.storbinary("STOR " + PATH + PLUGIN,
open(PLUGIN.replace("/", ""), "rb"))
print("Sending Plugin to " + PATH + "\n")
except ftplib.all_errors:
print("Login Failed!\n")
| 24.527778 | 69 | 0.628539 | 117 | 883 | 4.632479 | 0.538462 | 0.055351 | 0.055351 | 0.077491 | 0.095941 | 0 | 0 | 0 | 0 | 0 | 0 | 0.054622 | 0.191393 | 883 | 35 | 70 | 25.228571 | 0.704482 | 0.123443 | 0 | 0.173913 | 0 | 0 | 0.296345 | 0.071802 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.304348 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb41c4d99b6843964899cf16c40a4d079031229f | 5,972 | py | Python | UnstructuredMesh/DataLoaderUnstructuredMesh.py | MaximeRedstone/UnstructuredCAE-DA | b54bd53540c11aa1b70e5160751905141f463217 | [
"MIT"
] | null | null | null | UnstructuredMesh/DataLoaderUnstructuredMesh.py | MaximeRedstone/UnstructuredCAE-DA | b54bd53540c11aa1b70e5160751905141f463217 | [
"MIT"
] | null | null | null | UnstructuredMesh/DataLoaderUnstructuredMesh.py | MaximeRedstone/UnstructuredCAE-DA | b54bd53540c11aa1b70e5160751905141f463217 | [
"MIT"
] | null | null | null |
""" Data Loader for CAEs on Unstructured Meshes """
import sys, os, pickle, argparse
import numpy as np
from tabulate import tabulate
from datetime import datetime
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import DBSCAN
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
from UnstructuredCAEDA.train import TrainAE
from UnstructuredCAEDA.VarDA import BatchDA
from UnstructuredCAEDA.data import GetData
from UnstructuredCAEDA.settings.models.CLIC import CLIC
from UnstructuredCAEDA.fluidity import vtktools
from UnstructuredCAEDA.UnstructuredMesh.Localisation import *
from UnstructuredCAEDA.UnstructuredMesh.HelpersUnstructuredMesh import *
class DataLoaderUnstructuredMesh(GetData):
""" Class to load data from data files (.vtu) for AEs Training or Data Assimilation
Abreviations: fp = file path
v = vertices
f = faces
ug = unstructured grid """
def __init__(self):
pass
def get_X(self, settings):
""" Args: settings: (CLICUnstructuredMesh class)
Returns: np.array of dimensions time steps x localised points """
fps = DataLoaderUnstructuredMesh.get_sorted_fps_U(settings.getDataDir())
filepath = getXFilePath(settings)
filepathIdxMatch = getMeshToNetworkIdxMatchingLocations(settings)
filepathIdxAll = getMeshToNetworkIdxLocations(settings)
filepathIdxOverlap = getIdxOverlap(settings)
if os.path.isfile(filepath):
print("Reading pickle file: ", filepath)
with open(filepath, "rb") as f:
X = pickle.load(f)
if settings.CLIC_UNSTRUCTURED: #Only read if localisation took place first
with open(filepathIdxMatch, "rb") as f:
idxDict = pickle.load(f)
settings.setMatchIdxMeshToIdxNetwork(idxDict)
with open(filepathIdxAll, "rb") as f:
idxDict = pickle.load(f)
settings.setIdxMeshToIdxNetwork(idxDict)
with open(filepathIdxOverlap, "rb") as f:
idxOverlap = pickle.load(f)
settings.setOverlappingRegions(idxOverlap)
else:
print("Creating pickle file for percentage: ", settings.getPercentOfVertices())
X = Localiser.createX(fps, settings)
outfile = open(filepath, 'wb')
pickle.dump(X, outfile)
outfile.close()
print("Shape read X: ", np.shape(X))
networkInput= DataLoaderUnstructuredMesh.toNetworkSpace(X, settings)
return networkInput
@staticmethod
def get_sorted_fps_U(data_dir):
""" Creates and returns list of .vtu filepaths sorted according
to timestamp in name.
Input files in data_dir must be of the
form LSBU_<TIMESTEP INDEX>_<SUBDOMAIN>.vtu """
fps = [f for f in os.listdir(data_dir) if not f.startswith('.')]
#Extract Subdomain number from data_dir
_, subdomain = data_dir.split("_") #split returns ["LSBU", "<Subdomain>"]
subdomain.replace("/", "")
#Extract index of timestep from file name
idx_fps = []
for fp in fps:
if not fp.startswith("."):
lsbu, timestep, extension = fp.split("_")
idx = int(timestep)
idx_fps.append(idx)
#sort by timestep
assert len(idx_fps) == len(fps)
zipped_pairs = zip(idx_fps, fps)
fps_sorted = [x for _, x in sorted(zipped_pairs)]
#add absolute path
fps_sorted = [data_dir + x for x in fps_sorted]
return fps_sorted
@staticmethod
def toNetworkSpace(X, settings):
""" Convert X to appropriate shape for AEs """
if settings.getDim() == 1:
networkInput = X
elif settings.getDim() == 2:
networkInput = DataLoaderUnstructuredMesh.reshape(X)
else:
raise NotImplementedError("Converting to network space only works for specific cases (see function description)")
return networkInput
# DEPRECATED CODE
# Below are functions used when experimenting with Tucodec 2D model that required minimum dimensions
# of 90 x 92 for size of convolution kernels to be smaller than data input size at all layers
@staticmethod
def reshape(X):
""" Reshape X of shape Timesteps x Scalars to Timesteps x nx x ny """
nbTimeSteps, nbVertices = np.shape(X)[0], np.shape(X)[1]
newshape, idxEnd = DataLoaderUnstructuredMesh.setNetworkSpace(nbVertices, np.ndim(X))
print("X entering network reshaping len = {} when will become = {}".format(nbVertices, idxEnd))
Xidx = []
for timestep in X:
timestep = timestep[:idxEnd]
Xidx.append(timestep)
for idx in range(nbTimeSteps):
oneTimestep = Xidx[idx]
oneTimestepReshaped = np.reshape(oneTimestep, newshape, order='F')
if idx == 0:
#fix length of vectors and initialize the output array:
nsize = np.shape(oneTimestepReshaped)
size = (nbTimeSteps,) + nsize
output = np.zeros(size)
output[idx] = oneTimestepReshaped
print("In network space, Xreshaped = ", np.shape(output))
return output
@staticmethod
def setNetworkSpace(n, dim):
if dim not in [2]:
raise NotImplementedError("function can only reshape 2D inputs")
nbOfVertices = int(n - (n % 10))
x, y = 92, 90
x, y = DataLoaderUnstructuredMesh.getXfromY(nbOfVertices, y)
newshape = x, y
idxEnd = x * y
return newshape, idxEnd
@staticmethod
def getXfromY(n, ymin):
xres = n // ymin
if (xres % 2 != 0):
xres = xres - 1
return xres, ymin | 39.03268 | 125 | 0.627595 | 649 | 5,972 | 5.719569 | 0.379045 | 0.039601 | 0.005388 | 0.015356 | 0.016703 | 0.016703 | 0.016703 | 0.016703 | 0 | 0 | 0 | 0.005436 | 0.291527 | 5,972 | 153 | 126 | 39.03268 | 0.871898 | 0.179672 | 0 | 0.101852 | 0 | 0 | 0.061654 | 0 | 0 | 0 | 0 | 0 | 0.009259 | 1 | 0.064815 | false | 0.009259 | 0.148148 | 0 | 0.277778 | 0.046296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb41f1bb72ae99e6bb69d3bb195a661a2dbb73cb | 18,832 | py | Python | projects/text_classification/dataset/utils_glue.py | khloe-zhang/libai | 1787a6d920b09d3aed8b04cecb84535612f388b8 | [
"Apache-2.0"
] | 55 | 2021-12-10T08:47:06.000Z | 2022-03-28T09:02:15.000Z | projects/text_classification/dataset/utils_glue.py | khloe-zhang/libai | 1787a6d920b09d3aed8b04cecb84535612f388b8 | [
"Apache-2.0"
] | 106 | 2021-11-03T05:16:45.000Z | 2022-03-31T06:16:23.000Z | projects/text_classification/dataset/utils_glue.py | khloe-zhang/libai | 1787a6d920b09d3aed8b04cecb84535612f388b8 | [
"Apache-2.0"
] | 13 | 2021-12-29T08:12:08.000Z | 2022-03-28T06:59:45.000Z | # coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from .utils import DataProcessor, EncodePattern, InputExample, InputFeatures
logger = logging.getLogger(__name__)
def glue_convert_examples_to_features(
examples,
tokenizer,
max_length,
task=None,
pattern=EncodePattern.bert_pattern,
label_list=None,
output_mode=None,
):
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info(f"Using label list {label_list} for task {task}")
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info(f"Using output mode {output_mode} for task {task}")
label_map = {label: i for i, label in enumerate(label_list)}
start_token = [] if tokenizer.start_token is None else [tokenizer.start_token]
end_token = [] if tokenizer.end_token is None else [tokenizer.end_token]
pad_id = tokenizer.pad_token_id
if pattern == EncodePattern.bert_pattern:
added_special_tokens = [2, 3]
elif pattern == EncodePattern.roberta_pattern:
added_special_tokens = [2, 4]
else:
raise KeyError("pattern is not a valid EncodePattern")
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_length - added_special_tokens[1])
else:
if len(tokens_a) > max_length - added_special_tokens[0]:
tokens_a = tokens_a[: (max_length - added_special_tokens[0])]
if pattern is EncodePattern.bert_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
elif pattern is EncodePattern.roberta_pattern:
tokens = start_token + tokens_a + end_token
token_type_ids = [0] * len(tokens)
if tokens_b:
tokens += end_token + tokens_b + end_token
token_type_ids += [1] * (len(tokens) - len(token_type_ids))
else:
raise KeyError("pattern is not a valid EncodePattern")
input_ids = tokenizer.convert_tokens_to_ids(tokens)
attention_mask = [1] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_id] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
label = None
if example.label is not None:
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=label,
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version).
Sentence pair classification task.
Determine whether the two sentences have the same meaning.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[3]
text_b = line[4]
label = None if set_type == "test" else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version).
Sentence pair classification task.
Given a premise sentence and a hypothesis sentence,
the task is to predict whether the premise entails the hypothesis (entailment),
contradicts the hypothesis (contradiction), or neither (neutral).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched"
)
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched"
)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[8]
text_b = line[9]
label = None if set_type.startswith("test") else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched",
)
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")),
"test_mismatched",
)
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version).
Single sentence classification task.
Each example is a sequence of words annotated with whether it is a grammatical English sentence.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
if test_mode:
lines = lines[1:]
text_index = 1 if test_mode else 3
examples = []
for (i, line) in enumerate(lines):
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if test_mode else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version).
Single sentence classification task.
The task is to predict the sentiment of a given sentence.
We use the two-way (positive/negative) class split, and use only sentence-level labels.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
text_index = 1 if set_type == "test" else 0
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{i}"
text_a = line[text_index]
label = None if set_type == "test" else line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version).
Sentence pair task but it is a regression task.
This task is to predict the similarity score of two sentences.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[7]
text_b = line[8]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version).
Sentence pair classification task.
The task is to determine whether a pair of questions are semantically equivalent.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
test_mode = set_type == "test"
q1_index = 1 if test_mode else 3
q2_index = 2 if test_mode else 4
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
try:
text_a = line[q1_index]
text_b = line[q2_index]
label = None if test_mode else line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version).
Sentence pair classification task.
The task is to determine whether the context sentence contains the answer to the question.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version).
Sentence pair classification task.
Recognizing Textual Entailment.
Predict whether the two sentences is entailment or not entailment (neutral and contradiction).
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version).
Sentence pair classification task.
The task is to predict if the sentence with the pronoun substituted is entailed
by the original sentence.
"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = f"{set_type}-{line[0]}"
text_a = line[1]
text_b = line[2]
label = None if set_type == "test" else line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| 35.802281 | 100 | 0.616769 | 2,461 | 18,832 | 4.509549 | 0.106461 | 0.072445 | 0.041088 | 0.061633 | 0.640656 | 0.623356 | 0.612723 | 0.602451 | 0.561543 | 0.546044 | 0 | 0.007414 | 0.262266 | 18,832 | 525 | 101 | 35.870476 | 0.791406 | 0.18511 | 0 | 0.522659 | 0 | 0 | 0.08558 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148036 | false | 0 | 0.009063 | 0 | 0.332326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb43fdde5b3581f834a97b398a6070ff285448c9 | 1,204 | py | Python | spiralOrder.py | xiaochuan-cd/leetcode | 8da7fb9c1a8344f5f258936c8a7d6cd25d3b3393 | [
"MIT"
] | null | null | null | spiralOrder.py | xiaochuan-cd/leetcode | 8da7fb9c1a8344f5f258936c8a7d6cd25d3b3393 | [
"MIT"
] | null | null | null | spiralOrder.py | xiaochuan-cd/leetcode | 8da7fb9c1a8344f5f258936c8a7d6cd25d3b3393 | [
"MIT"
] | null | null | null | class Solution:
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if not matrix:
return []
lvl, lenw, lenh, ret = 0, len(matrix[0]), len(matrix), []
while lvl <= int(min(lenw, lenh)/2):
if len(ret) != lenh*lenw:
for i in range(lvl, lenw-lvl):
ret.append(matrix[lvl][i])
if len(ret) != lenh*lenw:
for i in range(lvl+1, lenh-lvl):
ret.append(matrix[i][lenw-lvl-1])
if len(ret) != lenh*lenw:
for i in range(lenw-lvl-2, -1+lvl, -1):
ret.append(matrix[lenh-lvl-1][i])
if len(ret) != lenh*lenw:
for i in range(lenh-lvl-2, lvl, -1):
ret.append(matrix[i][lvl])
lvl += 1
return ret
if __name__ == "__main__":
print(Solution().spiralOrder(
[
[1]
]
))
print(Solution().spiralOrder(
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20]
]
))
| 26.755556 | 65 | 0.41113 | 147 | 1,204 | 3.312925 | 0.333333 | 0.049281 | 0.065708 | 0.098563 | 0.316222 | 0.238193 | 0.238193 | 0.238193 | 0.238193 | 0.182752 | 0 | 0.064327 | 0.431894 | 1,204 | 44 | 66 | 27.363636 | 0.647661 | 0.039037 | 0 | 0.228571 | 0 | 0 | 0.007111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0 | 0 | 0 | 0.114286 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb464176e752765ae17771663cfb6e6d7f9714b9 | 1,177 | py | Python | integreat_cms/cms/templatetags/tree_filters.py | Integreat/cms-v2 | c79a54fd5abb792696420aa6427a5e5a356fa79c | [
"Apache-2.0"
] | 21 | 2018-10-26T20:10:45.000Z | 2020-10-22T09:41:46.000Z | integreat_cms/cms/templatetags/tree_filters.py | Integreat/cms-v2 | c79a54fd5abb792696420aa6427a5e5a356fa79c | [
"Apache-2.0"
] | 392 | 2018-10-25T08:34:07.000Z | 2020-11-19T08:20:30.000Z | integreat_cms/cms/templatetags/tree_filters.py | Integreat/cms-v2 | c79a54fd5abb792696420aa6427a5e5a356fa79c | [
"Apache-2.0"
] | 23 | 2019-03-06T17:11:35.000Z | 2020-10-16T04:36:41.000Z | """
This is a collection of tags and filters for models which inherit from the MPTT model
:class:`~integreat_cms.cms.models.abstract_tree_node.AbstractTreeNode`
(:class:`~integreat_cms.cms.models.pages.page.Page` and
:class:`~integreat_cms.cms.models.languages.language_tree_node.LanguageTreeNode`).
"""
from django import template
register = template.Library()
@register.filter
def get_descendant_ids(node):
"""
This filter returns the ids of all the node's descendants.
:param node: The requested node
:type node: ~integreat_cms.cms.models.abstract_tree_node.AbstractTreeNode
:return: The list of all the node's descendants' ids
:rtype: list [ int ]
"""
return [
descendant.id for descendant in node.get_cached_descendants(include_self=True)
]
@register.filter
def get_children_ids(node):
"""
This filter returns the ids of all the node's direct children.
:param node: The requested node
:type node: ~integreat_cms.cms.models.abstract_tree_node.AbstractTreeNode
:return: The list of all the node's children's ids
:rtype: list [ int ]
"""
return [child.id for child in node.cached_children]
| 29.425 | 86 | 0.731521 | 167 | 1,177 | 5.02994 | 0.347305 | 0.071429 | 0.089286 | 0.125 | 0.57619 | 0.458333 | 0.432143 | 0.432143 | 0.369048 | 0.369048 | 0 | 0 | 0.172472 | 1,177 | 39 | 87 | 30.179487 | 0.862423 | 0.661852 | 0 | 0.2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4652a753ab3b1a67b9185a0d10ad240ff3d243 | 5,220 | py | Python | Preprocess.py | Rawan19/HMNet | 2aa8de20558412c1adb15fac69f18f50d6f59227 | [
"MIT"
] | null | null | null | Preprocess.py | Rawan19/HMNet | 2aa8de20558412c1adb15fac69f18f50d6f59227 | [
"MIT"
] | null | null | null | Preprocess.py | Rawan19/HMNet | 2aa8de20558412c1adb15fac69f18f50d6f59227 | [
"MIT"
] | null | null | null | import json
import spacy
from nltk.tokenize import word_tokenize
import nltk
nltk.download('punkt')
# print("argss:")
# print(sys.argv[0])
# text = sys.argv[0]
print("Preprocessing the input file ...")
def preprocess_raw(raw_text : str) :
json_dict_outer = {}
json_dict = {}
json_dict_outer['id']="1"
nlp = spacy.load('en', parser = False)
# nlp_spacy = spacy.load("en_core_web_sm")
POS = {w: i for i, w in enumerate([''] + list(nlp.tagger.labels))}
ENT = {w: i for i, w in enumerate([''] + nlp.entity.move_names)}
name_role_dict = {'A' : 'PM' , 'B' : 'ID' , 'C' :'UI' , 'D' :'ME'}
list_dicts = []
turns = text.split('\n')
for turn in turns :
json_dict = {}
if len(turn) < 1 : continue
name = turn.split(":", 1)[0]
role_name = name_role_dict[name]
json_dict['speaker'] = name
json_dict['role'] = role_name
word_text = turn.split(":", 1)[1]
tokenized_text = word_tokenize(word_text)
json_dict['utt'] = {}
output = {'word': [], 'pos_id': [],'ent_id': []}
output['word'] = tokenized_text
for token in nlp(word_text):
pos = token.tag_
output['pos_id'].append(POS[pos] if pos in POS else 0)
ent = 'O' if token.ent_iob_ == 'O' else (token.ent_iob_ + '-' + token.ent_type_)
output['ent_id'].append(ENT[ent] if ent in ENT else 0)
json_dict['utt'] = output
list_dicts.append(json_dict)
json_dict_outer['meeting'] = list_dicts
json_dict_outer['summary']=[""]
with open('test_raw2.jsonl', 'w') as outfile:
for entry in [json_dict_outer]:
# print(entry)
json.dump(entry, outfile)
outfile.write('\n')
import gzip
with open('test_raw2.jsonl', 'rb') as f_in, gzip.open('ExampleRawData/meeting_summarization/AMI_proprec/test/test_raw2.jsonl.gz', 'wb') as f_out:
f_out.writelines(f_in)
print("Prerpocessing is Complete! the new test file was created with the following path: ExampleRawData/meeting_summarization/AMI_proprec/test/test_raw2.jsonl.gz")
# text = """
# Ashwin Swarup:Alright, let's let's start then. So, welcome to the Daily call. So I wanted a basic update on where we are on ditto, so mitesh can even brought update on that.
# Mitesh Gupta: Oh yeah. So deter we have finally our position was in this Marketplace and we have got our latest app available on the marketplace with the updates, about the installation steps and new screenshots. We are waiting to put a new video so that will get a new release next week. Next early. Next week, as well as we're in talk with the Eric, the representative
# Mitesh Gupta:person of zendes where you can help us with the marketing of video. So we are all
# Ashwin Swarup:No. Yeah I also got a video the latest video from anitage and looks good. So I send it for approval to my. So let's see if If you approves it, I will set it up on the detail. but,
# Mitesh Gupta:Corporation, then we can work.
# Ashwin Swarup:But on the whole, it looks good.
# Mitesh Gupta:Yeah. And as well as what I have seen. I have also seen the Google analytics page for dito and I've seen around 8 to 15 requests are there. Like people have visited to our little page,
# Ashwin Swarup:That's good. So I mean, the initial days two days since deployment. Let's see how it goes. All right, what's the status on Chiron?
# Siddhant Bane:Yeah. So in terms of testing we have we are done with our unit tests and today we have pushed them I think with it can review them and basically approve our pull request and we will be done with that apart from that we are working on the chat server where like we discussed in yesterday's meeting, we are going to incorporate handoff feature and Also, the scalability issues that we were facing, we're going to address. From that Shashank can update.
# Shashank M:Oh yeah. So from my side, I had a few Test cases remaining. And as, after that, udit also told me to make a few more code changes to the API for the document passing an intense generation. So we made a, we made the remaining changes and I've pushed the code and with the HTTP action file on the HTTP download and upload code, which I had written. So I've put the changes for that as well. So I just have to sit with Throne to make the UI changes. So yeah so after that I'm done with the unit test case now I just have a few Service Test cases remaining. So once I'm done with that, I'll push it to get
# Ashwin Swarup:And the knowledge graph test cases are also done the part where we were trying to do question augmentation test cases now that that needs to still start.
# Shashank M:With the knowledge craft test cases. So it's divided into two and that one is the document passing. And then the Generation. The addition of the training.
# Ashwin Swarup: Yeah.
# Shashank M: So So the first one, the document passing test cases are all done so the other one which was to add the generated intense and responses for that. We were facing a few issues. So with its help you were able to just you know just get the edge cases out. So if it's failing we're giving the particular the necessary output messages. So yeah, that's all done.
# Ashwin Swarup: All right. Sounds good. Thanks a lot, guys.
# """
preprocess_raw(text)
| 61.411765 | 614 | 0.717625 | 921 | 5,220 | 3.997828 | 0.346363 | 0.026073 | 0.017653 | 0.008691 | 0.066812 | 0.043998 | 0.043998 | 0.034221 | 0.034221 | 0.034221 | 0 | 0.004042 | 0.194253 | 5,220 | 84 | 615 | 62.142857 | 0.871374 | 0.622605 | 0 | 0.043478 | 0 | 0.021739 | 0.198767 | 0.07396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.108696 | 0 | 0.130435 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb470997d608e5d11cdde33ab3f008db973fea29 | 780 | py | Python | problems/python/reverse_integer.py | kcc3/leetcode-solutions | 9a38f76c702a6df0f4bc798ac94b7137ae31ea76 | [
"MIT"
] | null | null | null | problems/python/reverse_integer.py | kcc3/leetcode-solutions | 9a38f76c702a6df0f4bc798ac94b7137ae31ea76 | [
"MIT"
] | null | null | null | problems/python/reverse_integer.py | kcc3/leetcode-solutions | 9a38f76c702a6df0f4bc798ac94b7137ae31ea76 | [
"MIT"
] | null | null | null | def reverse(x):
"""Given a 32-bit signed integer, reverse digits of an integer.
Solve:
Cast int to a string and use string slicing to easily do reversing
Args:
x (int): integer to reverse
Returns:
int: reversed integer or 0 if it exceeds the 32-bit signed integer range: [−2^31, 2^31 − 1]
"""
s = str(x)
negative = False
if s[0] == "-":
negative = True
s = s[1:]
s = s[::-1]
if negative:
s = "-" + s
return 0 if int(s) > 2 ** 31 - 1 or int(s) < -2 ** 31 else int(s)
if __name__ == "__main__":
assert reverse(123) == 321
assert reverse(-123) == -321
assert reverse(120) == 21
assert reverse(-120) == -21
assert reverse(2**32) == 0
assert reverse(-2**33) == 0
| 25.16129 | 100 | 0.546154 | 121 | 780 | 3.471074 | 0.429752 | 0.185714 | 0.052381 | 0.085714 | 0.207143 | 0.207143 | 0 | 0 | 0 | 0 | 0 | 0.099065 | 0.314103 | 780 | 30 | 101 | 26 | 0.682243 | 0.365385 | 0 | 0 | 0 | 0 | 0.021739 | 0 | 0 | 0 | 0 | 0 | 0.352941 | 1 | 0.058824 | false | 0 | 0 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb48963dd6288ee9be3650594810acbe8bced547 | 1,466 | py | Python | alembic/versions/3ea865991c72_create_user_notification_table.py | modist-io/modist-api | 827d4b1962caee9a2fde1470df30d8fd60f8f998 | [
"0BSD"
] | 1 | 2021-01-03T00:20:07.000Z | 2021-01-03T00:20:07.000Z | alembic/versions/3ea865991c72_create_user_notification_table.py | modist-io/modist-api | 827d4b1962caee9a2fde1470df30d8fd60f8f998 | [
"0BSD"
] | null | null | null | alembic/versions/3ea865991c72_create_user_notification_table.py | modist-io/modist-api | 827d4b1962caee9a2fde1470df30d8fd60f8f998 | [
"0BSD"
] | null | null | null | """Create user_notification table.
Revision ID: 3ea865991c72
Revises: 4e8c8d41f23f
Create Date: 2020-04-15 18:43:23.856094
"""
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
# revision identifiers, used by Alembic.
revision = "3ea865991c72"
down_revision = "4e8c8d41f23f"
branch_labels = None
depends_on = None
def upgrade():
"""Pushes changes into the database."""
op.create_table(
"user_notification",
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("user_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("notification_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.ForeignKeyConstraint(
["notification_id"], ["notification.id"], ondelete="cascade"
),
sa.ForeignKeyConstraint(["user_id"], ["user.id"], ondelete="cascade"),
sa.PrimaryKeyConstraint("user_id", "notification_id"),
)
op.create_refresh_updated_at_trigger("user_notification")
def downgrade():
"""Reverts changes performed by upgrade()."""
op.drop_refresh_updated_at_trigger("user_notification")
op.drop_table("user_notification")
| 27.660377 | 84 | 0.65075 | 161 | 1,466 | 5.73913 | 0.409938 | 0.08658 | 0.064935 | 0.068182 | 0.318182 | 0.318182 | 0.233766 | 0.233766 | 0.233766 | 0.145022 | 0 | 0.045574 | 0.221692 | 1,466 | 52 | 85 | 28.192308 | 0.764242 | 0.159618 | 0 | 0.323529 | 0 | 0 | 0.184514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.088235 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb48c15a637195ee193c14fecf22f1af1ff953ad | 813 | py | Python | migrations/versions/0974ecc7059c_protocols_table_add_col_is_cool.py | Spin14/wolf-backend | 21fc9d1a0df092eaa6a533149a165d2898f5fe40 | [
"MIT"
] | 2 | 2020-01-04T17:46:20.000Z | 2020-01-19T17:41:38.000Z | migrations/versions/0974ecc7059c_protocols_table_add_col_is_cool.py | Spin14/wolf-backend | 21fc9d1a0df092eaa6a533149a165d2898f5fe40 | [
"MIT"
] | 7 | 2019-05-06T01:42:12.000Z | 2019-05-14T23:22:54.000Z | migrations/versions/0974ecc7059c_protocols_table_add_col_is_cool.py | Spin14/wolf-backend | 21fc9d1a0df092eaa6a533149a165d2898f5fe40 | [
"MIT"
] | 1 | 2019-09-24T21:15:52.000Z | 2019-09-24T21:15:52.000Z | """protocols table: add col is cool
Revision ID: 0974ecc7059c
Revises: a5d44f24dd74
Create Date: 2019-05-12 01:17:23.068006
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0974ecc7059c'
down_revision = 'a5d44f24dd74'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('protocols', schema=None) as batch_op:
batch_op.add_column(sa.Column('is_cool', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('protocols', schema=None) as batch_op:
batch_op.drop_column('is_cool')
# ### end Alembic commands ###
| 24.636364 | 78 | 0.701107 | 108 | 813 | 5.138889 | 0.5 | 0.05045 | 0.075676 | 0.082883 | 0.353153 | 0.353153 | 0.353153 | 0.353153 | 0.353153 | 0.353153 | 0 | 0.074738 | 0.177122 | 813 | 32 | 79 | 25.40625 | 0.754858 | 0.386224 | 0 | 0.166667 | 0 | 0 | 0.121212 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4979a9585e712765aa4115058b93473d1d95ca | 1,225 | py | Python | avalon/__main__.py | noflame/core | 79ea85a26f0296d7c1f72670d377611d4502307b | [
"MIT"
] | 6 | 2018-09-13T04:44:28.000Z | 2020-02-16T22:27:50.000Z | avalon/__main__.py | noflame/core | 79ea85a26f0296d7c1f72670d377611d4502307b | [
"MIT"
] | 23 | 2017-11-14T15:38:32.000Z | 2019-07-25T11:18:42.000Z | avalon/__main__.py | noflame/core | 79ea85a26f0296d7c1f72670d377611d4502307b | [
"MIT"
] | 3 | 2018-02-02T22:11:56.000Z | 2018-09-19T07:40:00.000Z | import argparse
from . import pipeline
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--creator", action="store_true",
help="Launch Instance Creator in standalone mode")
parser.add_argument("--loader", action="store_true",
help="Launch Asset Loader in standalone mode")
parser.add_argument("--manager", action="store_true",
help="Launch Manager in standalone mode")
parser.add_argument("--projectmanager", action="store_true",
help="Launch Manager in standalone mode")
parser.add_argument("--root",
help="Absolute path to root directory of assets")
args, unknown = parser.parse_known_args()
host = pipeline.debug_host()
pipeline.register_host(host)
if args.creator:
from .tools import creator
creator.show(debug=True)
elif args.loader:
from .tools import loader
loader.show(debug=True)
elif args.manager:
from .tools import manager
manager.show(debug=True)
elif args.projectmanager:
from .tools import projectmanager
projectmanager.cli(unknown)
| 32.236842 | 74 | 0.631837 | 134 | 1,225 | 5.619403 | 0.320896 | 0.059761 | 0.112882 | 0.10093 | 0.410359 | 0.260292 | 0.172643 | 0.172643 | 0.172643 | 0.172643 | 0 | 0 | 0.270204 | 1,225 | 37 | 75 | 33.108108 | 0.842282 | 0 | 0 | 0.068966 | 0 | 0 | 0.23102 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.206897 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4a49b8b6d974716033873278cdc931b262790b | 1,607 | py | Python | Codementor.io/GarethDwyer/apps/clickbait/clickbait_classifier.py | nitin-cherian/Webapps | fbfbef6cb22fc742ee66460268afe6ff7834faa1 | [
"MIT"
] | 1 | 2017-11-22T08:56:06.000Z | 2017-11-22T08:56:06.000Z | Codementor.io/GarethDwyer/apps/clickbait/clickbait_classifier.py | nitin-cherian/Webapps | fbfbef6cb22fc742ee66460268afe6ff7834faa1 | [
"MIT"
] | null | null | null | Codementor.io/GarethDwyer/apps/clickbait/clickbait_classifier.py | nitin-cherian/Webapps | fbfbef6cb22fc742ee66460268afe6ff7834faa1 | [
"MIT"
] | null | null | null | # clickbait_classifier.py
# Import libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
# globals
DATA_SET_FILE = "clickbait.txt"
def is_clicbait(text):
svm, vectorizer = classifier()
# Vectorize the text. Input needs to be an iterable
vector = vectorizer.transform([text])
# Predict using the classifier
prediction = svm.predict(vector)
# if prediction is 1, return True else False
return True if prediction[0] == '1' else False
def load_data(file):
# Load data from dataset into Python lists
with open(file) as f:
lines = [line.strip().split("\t") for line in f]
headlines, labels = zip(*lines)
return headlines, labels
def classifier():
headlines, labels = load_data(DATA_SET_FILE)
# Break dataset into train and test sets
train_headlines = headlines[:8000]
test_headlines = headlines[8000:]
train_labels = labels[:8000]
test_labels = labels[8000:]
# Create a vectorizer and classifier
vectorizer = TfidfVectorizer()
svm = LinearSVC()
# Transform our text data into numerical vectors
train_vector = vectorizer.fit_transform(train_headlines)
# Train the classifier
svm.fit(train_vector, train_labels)
# Test accuracy of our classifier
test_vector = vectorizer.transform(test_headlines)
predictions = svm.predict(test_vector)
print(accuracy_score(predictions, test_labels))
return svm, vectorizer
def main():
classifier()
if __name__ == '__main__':
main()
| 23.289855 | 60 | 0.711886 | 201 | 1,607 | 5.532338 | 0.402985 | 0.029676 | 0.019784 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014879 | 0.205352 | 1,607 | 68 | 61 | 23.632353 | 0.855912 | 0.239577 | 0 | 0 | 0 | 0 | 0.019851 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.3125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4cb07300adc722e5ea30f3413a6aed3a6f72bd | 2,972 | py | Python | CHIMAclient/client_api.py | ANTLab-polimi/CHIMA | 368602b1555ac46a005b9111446c3a7e0952e72d | [
"Apache-2.0"
] | 1 | 2021-12-16T14:50:50.000Z | 2021-12-16T14:50:50.000Z | CHIMAclient/client_api.py | ANTLab-polimi/CHIMA | 368602b1555ac46a005b9111446c3a7e0952e72d | [
"Apache-2.0"
] | null | null | null | CHIMAclient/client_api.py | ANTLab-polimi/CHIMA | 368602b1555ac46a005b9111446c3a7e0952e72d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from flask import Flask
from flask import request
from flask import send_file
app = Flask(__name__)
#To suppress Flask's request logs on stdout
import logging
from config import *
from mpls import *
from utils import *
from encapsulator.userspace import *
#Configured with StubAPI's constructor
CLIENT_FLASK_HOST = ""
CLIENT_FLASK_PORT = ""
class ClientAPI:
#Flask method's can't be defined in a class
#Configuration parameters are set as global variables through this class
def __init__(self, host, port):
global DEBUG, CLIENT_FLASK_HOST, CLIENT_FLASK_PORT
CLIENT_FLASK_HOST = host
CLIENT_FLASK_PORT = port
#Runs the flask server in a thread
def run_client_server():
global app
if not conf.debug:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app.run(host=CLIENT_FLASK_HOST, port=CLIENT_FLASK_PORT)
#Get new destination / stack association
@app.route('/path', methods=['POST'])
def add_destination():
data = request.json
new = mpls_stack()
for val in data["stack"]:
new.add_label(val)
print("New path %s -> %s, stack: %s, size: %d" % (data["src"], data["dst"], new, new.size) )
src = ip2int(data["src"])
dst = ip2int(data["dst"])
conf.destinations.append((src,dst))
conf.encap.map_inject(src, dst, new)
return "OK"
#Delete destination
@app.route('/path', methods=['DELETE'])
def del_destination():
data = request.json
print("Deleting path %s -> %s" % (data["src"], data["dst"]) )
src = ip2int(data["src"])
dst = ip2int(data["dst"])
if (src,dst) in conf.destinations:
conf.destinations.remove((src, dst))
conf.encap.map_remove(src, dst)
return "OK"
#Get new ip route
@app.route('/route', methods=['POST'])
def new_route():
data = request.json
set_route(conf.interface, data["subnet"])
conf.installed_subnets.append(data["subnet"])
return "OK"
#Remove ip route
@app.route('/route', methods=['DELETE'])
def del_route():
data = request.json
remove_route(data["subnet"])
if data["subnet"] in conf.installed_subnets:
conf.installed_subnets.remove(data["subnet"])
return "OK"
#Get new static arp
@app.route('/arp', methods=['POST'])
def new_arp():
data = request.json
set_arp(conf.interface, data["ip"], data["mac"])
conf.installed_ips.append(data["ip"])
return "OK"
#Remove static arp
@app.route('/arp', methods=['DELETE'])
def del_arp():
data = request.json
remove_arp(conf.interface, data["ip"])
if data["ip"] in conf.installed_ips:
conf.installed_ips.remove(data["ip"])
return "OK"
#Get new ip
@app.route('/ip', methods=['POST'])
def new_ip():
data = request.json
set_ip(conf.interface, data["ip"])
return "OK"
#Remove static arp
@app.route('/ip', methods=['DELETE'])
def del_ip():
data = request.json
remove_ip(conf.interface, data["ip"])
return "OK" | 24.766667 | 96 | 0.659825 | 418 | 2,972 | 4.569378 | 0.253589 | 0.046073 | 0.062827 | 0.039791 | 0.231937 | 0.174346 | 0.095288 | 0.072251 | 0.038743 | 0 | 0 | 0.002082 | 0.19179 | 2,972 | 120 | 97 | 24.766667 | 0.793089 | 0.131898 | 0 | 0.25 | 0 | 0 | 0.091936 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1 | 0 | 0.3375 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4ee9418756d227fb920b5482b1a70d72f470a7 | 5,163 | py | Python | tests/test_cli.py | epandurski/swpt_creditors | 35b9c6fa8ec84fe26e203a2604aff9cd5280dc4c | [
"MIT"
] | null | null | null | tests/test_cli.py | epandurski/swpt_creditors | 35b9c6fa8ec84fe26e203a2604aff9cd5280dc4c | [
"MIT"
] | null | null | null | tests/test_cli.py | epandurski/swpt_creditors | 35b9c6fa8ec84fe26e203a2604aff9cd5280dc4c | [
"MIT"
] | 1 | 2020-01-16T13:24:31.000Z | 2020-01-16T13:24:31.000Z | import logging
from datetime import date, timedelta
from swpt_creditors import procedures as p
from swpt_creditors import models as m
D_ID = -1
C_ID = 4294967296
def _create_new_creditor(creditor_id: int, activate: bool = False):
creditor = p.reserve_creditor(creditor_id)
if activate:
p.activate_creditor(creditor_id, creditor.reservation_id)
def test_process_ledger_entries(app, db_session, current_ts):
_create_new_creditor(C_ID, activate=True)
p.create_new_account(C_ID, D_ID)
params = {
'debtor_id': D_ID,
'creditor_id': C_ID,
'creation_date': date(2020, 1, 1),
'last_change_ts': current_ts,
'last_change_seqnum': 1,
'principal': 1000,
'interest': 0.0,
'interest_rate': 5.0,
'last_interest_rate_change_ts': current_ts,
'transfer_note_max_bytes': 500,
'last_config_ts': current_ts,
'last_config_seqnum': 1,
'negligible_amount': 0.0,
'config_flags': 0,
'config_data': '',
'account_id': str(C_ID),
'debtor_info_iri': 'http://example.com',
'debtor_info_content_type': None,
'debtor_info_sha256': None,
'last_transfer_number': 0,
'last_transfer_committed_at': current_ts,
'ts': current_ts,
'ttl': 100000,
}
p.process_account_update_signal(**params)
params = {
'debtor_id': D_ID,
'creditor_id': C_ID,
'creation_date': date(2020, 1, 1),
'transfer_number': 1,
'coordinator_type': 'direct',
'sender': '666',
'recipient': str(C_ID),
'acquired_amount': 200,
'transfer_note_format': 'json',
'transfer_note': '{"message": "test"}',
'committed_at': current_ts,
'principal': 200,
'ts': current_ts,
'previous_transfer_number': 0,
'retention_interval': timedelta(days=5),
}
p.process_account_transfer_signal(**params)
params['transfer_number'] = 2
params['principal'] = 400
params['previous_transfer_number'] = 1
p.process_account_transfer_signal(**params)
assert len(p.get_account_ledger_entries(C_ID, D_ID, prev=10000, count=10000)) == 0
runner = app.test_cli_runner()
result = runner.invoke(args=['swpt_creditors', 'process_ledger_updates', '--burst=1', '--quit-early', '--wait=0'])
assert not result.output
assert len(p.get_account_ledger_entries(C_ID, D_ID, prev=10000, count=10000)) == 2
def test_process_log_additions(app, db_session, current_ts):
_create_new_creditor(C_ID, activate=True)
p.create_new_account(C_ID, D_ID)
latest_update_id = p.get_account_config(C_ID, D_ID).config_latest_update_id
p.update_account_config(
creditor_id=C_ID,
debtor_id=D_ID,
is_scheduled_for_deletion=True,
negligible_amount=1e30,
allow_unsafe_deletion=False,
latest_update_id=latest_update_id + 1,
)
entries1, _ = p.get_log_entries(C_ID, count=10000)
runner = app.test_cli_runner()
result = runner.invoke(args=['swpt_creditors', 'process_log_additions', '--wait=0', '--quit-early'])
assert result.exit_code == 0
assert not result.output
entries2, _ = p.get_log_entries(C_ID, count=10000)
assert len(entries2) > len(entries1)
def test_configure_interval(app, db_session, current_ts, caplog):
caplog.at_level(logging.ERROR)
ac = m.AgentConfig.query.one_or_none()
if ac and ac.min_creditor_id == m.MIN_INT64:
min_creditor_id = m.MIN_INT64 + 1
max_creditor_id = m.MAX_INT64
else:
min_creditor_id = m.MIN_INT64
max_creditor_id = m.MAX_INT64
runner = app.test_cli_runner()
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', str(m.MIN_INT64 - 1), '-1'])
assert result.exit_code != 0
assert 'not a valid creditor ID' in caplog.text
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', '1', str(m.MAX_INT64 + 1)])
assert result.exit_code != 0
assert 'not a valid creditor ID' in caplog.text
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', '2', '1'])
assert result.exit_code != 0
assert 'invalid interval' in caplog.text
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', '-1', '1'])
assert result.exit_code != 0
assert 'contains 0' in caplog.text
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', '1', str(max_creditor_id)])
assert result.exit_code == 0
assert not result.output
ac = m.AgentConfig.query.one()
assert ac.min_creditor_id == 1
assert ac.max_creditor_id == max_creditor_id
caplog.clear()
result = runner.invoke(args=[
'swpt_creditors', 'configure_interval', '--', str(min_creditor_id), '-1'])
assert result.exit_code == 0
assert not result.output
ac = m.AgentConfig.query.one()
assert ac.min_creditor_id == min_creditor_id
assert ac.max_creditor_id == -1
| 34.192053 | 118 | 0.654852 | 694 | 5,163 | 4.54611 | 0.213256 | 0.066561 | 0.012678 | 0.055784 | 0.533122 | 0.487797 | 0.430745 | 0.42187 | 0.395246 | 0.381933 | 0 | 0.035608 | 0.216734 | 5,163 | 150 | 119 | 34.42 | 0.74456 | 0 | 0 | 0.366412 | 0 | 0 | 0.200077 | 0.037188 | 0 | 0 | 0 | 0 | 0.167939 | 1 | 0.030534 | false | 0 | 0.030534 | 0 | 0.061069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb4f9fcdd845899468417c334c142d81c1bbc6d8 | 888 | py | Python | pajbot/web/routes/base/playsounds.py | KasperHelsted/pajbot | c366dcfc5e6076f9adcfce24c7a666653068b031 | [
"MIT"
] | null | null | null | pajbot/web/routes/base/playsounds.py | KasperHelsted/pajbot | c366dcfc5e6076f9adcfce24c7a666653068b031 | [
"MIT"
] | null | null | null | pajbot/web/routes/base/playsounds.py | KasperHelsted/pajbot | c366dcfc5e6076f9adcfce24c7a666653068b031 | [
"MIT"
] | 1 | 2020-03-11T19:37:10.000Z | 2020-03-11T19:37:10.000Z | from flask import render_template
from pajbot.managers.db import DBManager
from pajbot.models.module import Module
from pajbot.models.playsound import Playsound
from pajbot.modules import PlaysoundModule
def init(app):
@app.route("/playsounds/")
def user_playsounds():
with DBManager.create_session_scope() as session:
playsounds = session.query(Playsound).filter(Playsound.enabled).all()
playsound_module = session.query(Module).filter(Module.id == PlaysoundModule.ID).one_or_none()
enabled = False
if playsound_module is not None:
enabled = playsound_module.enabled
return render_template(
"playsounds.html",
playsounds=playsounds,
module_settings=PlaysoundModule.module_settings(),
playsounds_enabled=enabled,
)
| 34.153846 | 106 | 0.665541 | 92 | 888 | 6.282609 | 0.434783 | 0.069204 | 0.055363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.256757 | 888 | 25 | 107 | 35.52 | 0.875758 | 0 | 0 | 0 | 0 | 0 | 0.030405 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.25 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
bb50ddc7c9fdf4690ca7e9bb60ec4a0df3dd0ba9 | 1,542 | py | Python | cla_backend/apps/timer/tests/test_managers.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 3 | 2019-10-02T15:31:03.000Z | 2022-01-13T10:15:53.000Z | cla_backend/apps/timer/tests/test_managers.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 206 | 2015-01-02T16:50:11.000Z | 2022-02-16T20:16:05.000Z | cla_backend/apps/timer/tests/test_managers.py | uk-gov-mirror/ministryofjustice.cla_backend | 4d524c10e7bd31f085d9c5f7bf6e08a6bb39c0a6 | [
"MIT"
] | 6 | 2015-03-23T23:08:42.000Z | 2022-02-15T17:04:44.000Z | from django.test import TestCase
from django.utils import timezone
from django.db import IntegrityError
from django.core.exceptions import MultipleObjectsReturned
from core.tests.mommy_utils import make_recipe, make_user
from timer.models import Timer
class RunningTimerManagerTestCase(TestCase):
def test_query_set(self):
timer1 = make_recipe("timer.Timer", stopped=None)
make_recipe("timer.Timer", stopped=timezone.now())
timer3 = make_recipe("timer.Timer", stopped=None)
make_recipe("timer.Timer", cancelled=True)
timers = Timer.running_objects.all()
self.assertItemsEqual(timers, [timer1, timer3])
def test_get_by_user_fails_with_multiple_timers(self):
try:
user = make_user()
make_recipe("timer.Timer", stopped=None, created_by=user, _quantity=2)
Timer.running_objects.get_by_user(user)
except (MultipleObjectsReturned, IntegrityError):
pass
else:
self.assertTrue(False, "It should raise MultipleObjectsReturned or IntegrityError")
def test_get_by_user_fails_when_no_timer(self):
user = make_user()
make_recipe("timer.Timer", stopped=timezone.now(), created_by=user)
self.assertRaises(IndexError, Timer.running_objects.get_by_user, user)
def test_get_by_user_returns_timer(self):
user = make_user()
timer = make_recipe("timer.Timer", stopped=None, created_by=user)
self.assertEqual(Timer.running_objects.get_by_user(user), timer)
| 35.860465 | 95 | 0.713359 | 191 | 1,542 | 5.507853 | 0.319372 | 0.051331 | 0.09981 | 0.13308 | 0.431559 | 0.387833 | 0.347909 | 0.229087 | 0.180608 | 0.096958 | 0 | 0.004026 | 0.194553 | 1,542 | 42 | 96 | 36.714286 | 0.842995 | 0 | 0 | 0.096774 | 0 | 0 | 0.0869 | 0.014916 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.129032 | false | 0.032258 | 0.193548 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |