index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,900 | 5ac27ada56a365bb33cc0776218c291a414cc2de | import argparse
import datetime
from collections import defaultdict
import fire
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import logger
from torch.nn.parallel import DistributedDataParallel as DDP
import math
import pprint
from torch.utils.tensorboard import SummaryWriter
import kitti_dataset
import sfmnet
from pair_frames_dataset import PairConsecutiveFramesDataset
log = logger.noop
def get_rank():
return dist.get_rank() if dist.is_initialized() else 0
def load(checkpoint_file, model, optimizer):
# Returns the epoch number to start at, as well as loads the optimizer and model
if checkpoint_file is None:
return
try:
map_location = {
'cuda:0': f'cuda:{get_rank()}' if torch.cuda.is_available() else 'cpu'}
checkpoint = torch.load(checkpoint_file, map_location)
model = sfmnet.SfMNet3D.load_from_params(checkpoint['model_hyperparams'], checkpoint['model_state_dict'])
if model == None:
model = sfmnet.SfMNet2D.load_from_params(checkpoint['model_hyperparams'], checkpoint['model_state_dict'])
if model == None:
raise Exception('Cannot load checkpoint', checkpoint_file)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
log.INFO(
f'RANK {get_rank()}: Loaded from checkpoint at {checkpoint_file}')
if 'epoch' in checkpoint:
return checkpoint['epoch']
else:
return 0
except FileNotFoundError:
return 0
def save(checkpoint_file, model, optimizer, epoch):
if get_rank() != 0:
return
if checkpoint_file is None:
return
tmp_file = checkpoint_file + '.tmp'
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'model_hyperparams': model.get_params(),
'epoch': epoch
}, tmp_file)
os.replace(tmp_file, checkpoint_file)
log.INFO(f'Checkpoint saved at {checkpoint_file}')
def memory_summary(device):
return torch.cuda.memory_summary(device) if torch.cuda.is_available() else 'NO CUDA DEVICE'
def noop_callback(*a, **k):
pass
def train_loop(*,
device,
dl_train,
dl_validation=None,
vis_point=None,
train_model,
validation_model,
optimizer,
mask_logit_noise_curriculum=None,
num_epochs=1,
start_at_epoch=0,
log_metrics=noop_callback,
using_ddp=False,
checkpoint_file=None,
checkpoint_freq=None,
):
def get_mask_logit_noise(epoch):
if mask_logit_noise_curriculum is None:
return 0.
return min(1., epoch/mask_logit_noise_curriculum)
def finalize_metrics(metrics):
if using_ddp:
metrics = reduce_metrics(metrics)
if get_rank() == 0:
metrics = normalize_metrics(metrics)
return metrics
def reduce_metrics(metrics):
nonlocal device
t = torch.empty(len(metrics.keys()), device=device)
for i, v in enumerate(metrics.values()):
t[i] = v
dist.reduce(t, dst=0)
reduced = {}
for i, k in enumerate(metrics.keys()):
reduced[k] = t[i]
return reduced
def normalize_metrics(m):
for k, v in m.items():
if k == 'num_samples':
continue
m[k] = v / m['num_samples']
del m['num_samples']
return m
def update_metrics(metrics, *, labels, im_estimate, out, loss=None, recon_loss):
mask = out['mask']
N, K, H, W = mask.shape
metrics['num_samples'] += N
if loss is not None:
metrics['Loss/Total'] += loss
metrics['Loss/Recon'] += recon_loss
# metrics['Metric/MaskMass'] += torch.sum(torch.mean(mask, dim=(1,))) # Mask mass
# metrics['Metric/DisplLength'] += torch.mean(
# torch.sum(
# torch.abs(displacement * torch.tensor([W/2, H/2] if displacement.shape[-1] == 2 else displacement, device=device)),
# dim=(0,2,)
# ),
# ) # Mean over the # of objects in the scene
# metrics['Metric/MaskVar'] += torch.sum(torch.mean(mask * (1 - mask), dim=(1,))) # Mask var
if 'camera_translation' in labels and out.get('displacement') != None:
displacement = out.get('displacement')
ct = labels['camera_translation'].to(device)
H, W = tuple(mask.shape[2:4])
# Need this in case using forwbackw and so batch size of displacement is 2*M = N
M = ct.shape[0]
ae = torch.sum(torch.abs(
displacement[0:M, 0] * torch.tensor([W/2, H/2], device=device) - ct)) * N / M
metrics['Label/CameraDisplAE'] += ae
def run_step(im1, im2, labels, metrics):
optimizer.zero_grad()
im1, im2 = im1.to(device), im2.to(device)
log.DEBUG(f'Start of train batch {step}:', memory_summary(device))
batch_size, C, H, W = im1.shape
total_loss, recon_loss, im2_estimate, out = train_model(
im1, im2, mask_logit_noise_var=get_mask_logit_noise(e))
log.DEBUG(f'After forward {step}:', memory_summary(device))
total_loss.backward()
log.DEBUG(f'After backward {step}:', memory_summary(device))
optimizer.step()
update_metrics(
metrics,
loss=total_loss.item() * batch_size,
recon_loss=recon_loss.item() * batch_size,
im_estimate=im2_estimate,
labels=labels,
out=out
)
def run_validation(model, dl):
model.eval()
with torch.no_grad():
log.DEBUG('Start of validation', memory_summary(device))
assert(len(dl.dataset) > 0)
m = defaultdict(int)
for im1, im2, labels in dl:
N, C, H, W = im1.shape
im1, im2 = im1.to(device), im2.to(device)
total_loss, recon_loss, im2_estimate, out = validation_model(
im1, im2, reduction=torch.sum)
update_metrics(
metrics=m,
labels=labels,
recon_loss=recon_loss,
out=out,
im_estimate=im2_estimate
)
model.train()
return m
step = 0
for e in range(start_at_epoch, num_epochs):
if isinstance(dl_train.sampler, torch.utils.data.DistributedSampler):
dl_train.sampler.set_epoch(e)
if dl_validation is not None:
validation_metrics = run_validation(
validation_model, dl_validation)
validation_metrics = finalize_metrics(validation_metrics)
train_metrics = defaultdict(int)
for im1, im2, labels in dl_train:
run_step(im1, im2, labels, train_metrics)
step += 1
train_metrics = finalize_metrics(train_metrics)
if dl_validation is not None:
log_metrics(epoch=e, step=step,
metric=validation_metrics, prefix='Validation/')
log_metrics(epoch=e, step=step, metric=train_metrics, prefix='Train/')
if checkpoint_file is not None and e % checkpoint_freq == 0:
save(checkpoint_file, train_model, optimizer, e)
if using_ddp:
dist.barrier()
def train(*,
data_dir,
dataset_type,
rgb=True, # Used for kitti dataset
tensorboard_dir=None,
checkpoint_file=None,
checkpoint_freq=10,
dl_num_workers=6,
validation_split=0.1,
seed=42,
K=1,
camera_translation=False,
C=16,
fc_layer_width=128,
num_hidden_layers=1,
conv_depth=2,
depth_smooth_reg=0.,
flow_smooth_reg=0.,
mask_smooth_reg=0.,
flowreg_coeff=0.,
forwbackw_reg_coeff=0.,
dimension=3, # Either 2dsfm or 3dsfm
lr=0.001,
mask_logit_noise_curriculum=None,
batch_size=16,
num_epochs=1,
n_vis_point=None,
vis_freq=50,
using_ddp=False,
debug=False,
returning=False,
):
args = locals()
if dataset_type == 'consecutive':
ds = PairConsecutiveFramesDataset(data_dir)
if dataset_type == 'kitti_stereo':
ds = kitti_dataset.CollectionKittiRawStereoDataset(data_dir, rgb)
else:
raise f'dataset_type {dataset_type} not supported'
im_channels, H, W = ds[0][0].shape
# sfm is the only model with parameters. The validation_model and train_model return the self-supervised
# loss for training purposes.
if dimension == 2:
sfm = sfmnet.SfMNet2D(H=H, W=W, im_channels=im_channels,
C=C, K=K, camera_translation=camera_translation, conv_depth=conv_depth,
hidden_layer_widths=[
fc_layer_width]*num_hidden_layers
)
if dimension == 3:
sfm = sfmnet.SfMNet3D(H=H, W=W, im_channels=im_channels,
C=C, K=K, conv_depth=conv_depth,
hidden_layer_widths=[
fc_layer_width]*num_hidden_layers
)
validation_model = sfmnet.LossModule(sfm_model=sfm,
l1_flow_reg_coeff=flowreg_coeff,
depth_smooth_reg=depth_smooth_reg,
flow_smooth_reg=flow_smooth_reg,
mask_smooth_reg=mask_smooth_reg,
)
if forwbackw_reg_coeff != 0.:
train_model = sfmnet.ForwBackwLoss(
validation_model, forwbackw_reg_coeff)
else:
train_model = validation_model
n_params = sfm.total_params()
if using_ddp:
setup_dist()
device = torch.device('cuda', 0)
model = DDP(train_model.to(device), device_ids=[device])
elif torch.cuda.is_available():
device = torch.device('cuda', 0)
model = train_model.to(device)
else:
device = torch.device('cpu')
global log
rank = get_rank()
if rank is 0:
pprint.PrettyPrinter(indent=4).pprint(args)
log = logger.logger(logger.LEVEL_INFO, rank)
log.INFO('Initialized the model which has', n_params, 'parameters')
log.INFO('Dataset has size', len(ds))
log.INFO('Training on', device)
log.DEBUG(f'Inputs has size ({im_channels},{H},{W})')
optimizer = torch.optim.Adam(sfm.parameters(), lr=lr)
start_at_epoch = 0
if checkpoint_file is not None:
start_at_epoch = load(checkpoint_file, model, optimizer)
n_validation = int(len(ds) * validation_split)
n_train = len(ds) - n_validation
log.DEBUG(f'Validation size {n_validation}, train size {n_train}')
ds_train, ds_validation = torch.utils.data.random_split(
ds, [n_train, n_validation], generator=torch.Generator().manual_seed(seed))
sampler_train = torch.utils.data.DistributedSampler(
ds_train) if using_ddp else None
sampler_validation = torch.utils.data.DistributedSampler(
ds_validation, shuffle=False) if using_ddp else None
dl_train = torch.utils.data.DataLoader(ds_train, batch_size=batch_size,
shuffle=(sampler_train is None), sampler=sampler_train, num_workers=dl_num_workers, pin_memory=True)
dl_validation = torch.utils.data.DataLoader(ds_validation, sampler=sampler_validation,
batch_size=batch_size, shuffle=False, num_workers=dl_num_workers, pin_memory=True)
if tensorboard_dir is not None and rank is 0:
writer = SummaryWriter(log_dir=tensorboard_dir)
writer.add_text('model_summary', str(sfm))
else:
writer = None
if n_vis_point is not None:
vis_dl = torch.utils.data.DataLoader(ds_validation, batch_size=n_vis_point, shuffle=False)
vis_point = next(iter(vis_dl))
else:
vis_point = None
best_validation = math.inf
start_time = time.monotonic()
def log_metrics(*, step, epoch, metric, prefix=''):
nonlocal best_validation
nonlocal start_time
nonlocal rank
if rank != 0:
return
best_validation = min(best_validation, metric.get(
'Loss/Validation/Recon', math.inf))
s = f'epoch: {epoch} step: {step} time_elapsed: {time.monotonic() - start_time:.2f}s '
for k, v in metric.items():
s += f'{prefix}{k}: {v:7f} '
log.INFO(s)
if writer is not None:
for k, v in metric.items():
writer.add_scalar(prefix+k, v, step)
if vis_point is not None and epoch % vis_freq == 0:
validation_model.eval()
vp = (vis_point[0].to(device), vis_point[1].to(device))
fig = sfmnet.visualize(validation_model, *vp)
validation_model.train()
if writer is not None:
writer.add_figure(f'Visualization', fig, step)
else:
pass
# plt.show()
train_loop(
device=device,
validation_model=validation_model,
train_model=train_model,
dl_train=dl_train,
dl_validation=dl_validation,
optimizer=optimizer,
mask_logit_noise_curriculum=mask_logit_noise_curriculum,
num_epochs=num_epochs,
start_at_epoch=start_at_epoch,
log_metrics=log_metrics,
using_ddp=using_ddp,
checkpoint_file=checkpoint_file,
checkpoint_freq=checkpoint_freq,
)
if writer is not None:
writer.add_hparams({
'lr': lr,
'flowreg': flowreg_coeff,
}, {
'Validation/Recon': best_validation
})
if checkpoint_file is not None:
save(checkpoint_file, model, optimizer, num_epochs)
if using_ddp:
cleanup_dist()
if returning:
return sfm
def setup_dist():
assert not torch.cuda.is_available or torch.cuda.device_count() == 1
env_dict = {
key: os.environ[key]
for key in ("MASTER_ADDR", "MASTER_PORT", "RANK", "WORLD_SIZE")
}
print(f"[{os.getpid()}] Initializing process group with: {env_dict}")
dist.init_process_group(
backend="nccl" if torch.cuda.is_available() else 'gloo', init_method='env://')
print(
f"[{os.getpid()}] world_size = {dist.get_world_size()}, "
+ f"rank = {get_rank()}, backend={dist.get_backend()}"
)
def cleanup_dist():
dist.destroy_process_group()
if __name__ == '__main__':
fire.Fire(train)
|
988,901 | ac0d317884bedab6a5b055b7139da844b5d09885 | #!/usr/bin/python
from collections import OrderedDict
def find_rank_index(scores, alice_score, current_rank_index = 0):
for i in range(current_rank_index, -1, -1):
#print "searching start from index = ", i, " score = ", scores[i], " alice_score =", alice_score
if scores[i] > alice_score:
#print "Found higher score, returning: score =", scores[i], " & i =", i
return scores[i], i
return None, None
def climbingLeaderboard(scores, alice):
ranks = OrderedDict()
rank = 1
for i in range(len(scores)):
if not ranks.has_key(scores[i]):
ranks[scores[i]] = rank
rank += 1
serach_from_index = len(scores) - 1
# Loop over Alice's scores
highest_rank_acheived = False
for i in range(len(alice)):
if highest_rank_acheived:
print 1
continue
if ranks.has_key(alice[i]):
print ranks[alice[i]]
continue
next_largest_score, serach_from_index = find_rank_index(scores, alice[i], serach_from_index)
#print "i =", i, " alice_score =", alice[i], " next_higher_score =", next_largest_score
if next_largest_score is None:
# If there is no greater score than this, then alice has reached RANK 1
highest_rank_acheived = True
print 1
else:
print ranks[next_largest_score] + 1
def main():
#scores = [100, 100, 50, 40, 40, 20, 10]
#alice = [5, 25, 50, 120]
n = int(raw_input().strip())
scores = map(int, raw_input().strip().split(' '))
m = int(raw_input().strip())
alice = map(int, raw_input().strip().split(' '))
print len(alice)
#result = climbingLeaderboard(scores, alice)
climbingLeaderboard(scores, alice)
if __name__ == "__main__":
main() |
988,902 | 282e651640d53c034c7cc342dccb4b61b5f37d66 | from __future__ import annotations
import logging
import math
from typing import Dict, List, TYPE_CHECKING, Type
from dcs.mapping import Point
from dcs.task import Task
from dcs.unittype import UnitType
from game import persistency
from game.debriefing import AirLosses, Debriefing
from game.infos.information import Information
from game.operation.operation import Operation
from game.theater import ControlPoint
from gen import AirTaskingOrder
from gen.ground_forces.combat_stance import CombatStance
from ..unitmap import UnitMap
if TYPE_CHECKING:
from ..game import Game
DIFFICULTY_LOG_BASE = 1.1
EVENT_DEPARTURE_MAX_DISTANCE = 340000
MINOR_DEFEAT_INFLUENCE = 0.1
DEFEAT_INFLUENCE = 0.3
STRONG_DEFEAT_INFLUENCE = 0.5
class Event:
silent = False
informational = False
game = None # type: Game
location = None # type: Point
from_cp = None # type: ControlPoint
to_cp = None # type: ControlPoint
difficulty = 1 # type: int
BONUS_BASE = 5
def __init__(self, game, from_cp: ControlPoint, target_cp: ControlPoint, location: Point, attacker_name: str, defender_name: str):
self.game = game
self.from_cp = from_cp
self.to_cp = target_cp
self.location = location
self.attacker_name = attacker_name
self.defender_name = defender_name
@property
def is_player_attacking(self) -> bool:
return self.attacker_name == self.game.player_name
@property
def tasks(self) -> List[Type[Task]]:
return []
def bonus(self) -> int:
return int(math.log(self.to_cp.importance + 1, DIFFICULTY_LOG_BASE) * self.BONUS_BASE)
def generate(self) -> UnitMap:
Operation.prepare(self.game)
unit_map = Operation.generate()
Operation.current_mission.save(
persistency.mission_path_for("liberation_nextturn.miz"))
return unit_map
@staticmethod
def _transfer_aircraft(ato: AirTaskingOrder, losses: AirLosses,
for_player: bool) -> None:
for package in ato.packages:
for flight in package.flights:
# No need to transfer to the same location.
if flight.departure == flight.arrival:
continue
# Don't transfer to bases that were captured. Note that if the
# airfield was back-filling transfers it may overflow. We could
# attempt to be smarter in the future by performing transfers in
# order up a graph to prevent transfers to full airports and
# send overflow off-map, but overflow is fine for now.
if flight.arrival.captured != for_player:
logging.info(
f"Not transferring {flight} because {flight.arrival} "
"was captured")
continue
transfer_count = losses.surviving_flight_members(flight)
if transfer_count < 0:
logging.error(f"{flight} had {flight.count} aircraft but "
f"{transfer_count} losses were recorded.")
continue
aircraft = flight.unit_type
available = flight.departure.base.total_units_of_type(aircraft)
if available < transfer_count:
logging.error(
f"Found killed {aircraft} from {flight.departure} but "
f"that airbase has only {available} available.")
continue
flight.departure.base.aircraft[aircraft] -= transfer_count
if aircraft not in flight.arrival.base.aircraft:
# TODO: Should use defaultdict.
flight.arrival.base.aircraft[aircraft] = 0
flight.arrival.base.aircraft[aircraft] += transfer_count
def complete_aircraft_transfers(self, debriefing: Debriefing) -> None:
self._transfer_aircraft(self.game.blue_ato, debriefing.air_losses,
for_player=True)
self._transfer_aircraft(self.game.red_ato, debriefing.air_losses,
for_player=False)
@staticmethod
def commit_air_losses(debriefing: Debriefing) -> None:
for loss in debriefing.air_losses.losses:
aircraft = loss.unit_type
cp = loss.departure
available = cp.base.total_units_of_type(aircraft)
if available <= 0:
logging.error(
f"Found killed {aircraft} from {cp} but that airbase has "
"none available.")
continue
logging.info(f"{aircraft} destroyed from {cp}")
cp.base.aircraft[aircraft] -= 1
@staticmethod
def commit_front_line_losses(debriefing: Debriefing) -> None:
for loss in debriefing.front_line_losses:
unit_type = loss.unit_type
control_point = loss.origin
available = control_point.base.total_units_of_type(unit_type)
if available <= 0:
logging.error(
f"Found killed {unit_type} from {control_point} but that "
"airbase has none available.")
continue
logging.info(f"{unit_type} destroyed from {control_point}")
control_point.base.armor[unit_type] -= 1
@staticmethod
def commit_ground_object_losses(debriefing: Debriefing) -> None:
for loss in debriefing.ground_object_losses:
# TODO: This should be stored in the TGO, not in the pydcs Group.
if not hasattr(loss.group, "units_losts"):
loss.group.units_losts = []
loss.group.units.remove(loss.unit)
loss.group.units_losts.append(loss.unit)
def commit_building_losses(self, debriefing: Debriefing) -> None:
for loss in debriefing.building_losses:
loss.ground_object.kill()
self.game.informations.append(Information(
"Building destroyed",
f"{loss.ground_object.dcs_identifier} has been destroyed at "
f"location {loss.ground_object.obj_name}", self.game.turn
))
@staticmethod
def commit_damaged_runways(debriefing: Debriefing) -> None:
for damaged_runway in debriefing.damaged_runways:
damaged_runway.damage_runway()
def commit(self, debriefing: Debriefing):
logging.info("Committing mission results")
self.commit_air_losses(debriefing)
self.commit_front_line_losses(debriefing)
self.commit_ground_object_losses(debriefing)
self.commit_building_losses(debriefing)
self.commit_damaged_runways(debriefing)
# ------------------------------
# Captured bases
#if self.game.player_country in db.BLUEFOR_FACTIONS:
coalition = 2 # Value in DCS mission event for BLUE
#else:
# coalition = 1 # Value in DCS mission event for RED
for captured in debriefing.base_capture_events:
try:
id = int(captured.split("||")[0])
new_owner_coalition = int(captured.split("||")[1])
captured_cps = []
for cp in self.game.theater.controlpoints:
if cp.id == id:
if cp.captured and new_owner_coalition != coalition:
for_player = False
info = Information(cp.name + " lost !", "The ennemy took control of " + cp.name + "\nShame on us !", self.game.turn)
self.game.informations.append(info)
captured_cps.append(cp)
elif not(cp.captured) and new_owner_coalition == coalition:
for_player = True
info = Information(cp.name + " captured !", "We took control of " + cp.name + "! Great job !", self.game.turn)
self.game.informations.append(info)
captured_cps.append(cp)
else:
continue
cp.capture(self.game, for_player)
for cp in captured_cps:
logging.info("Will run redeploy for " + cp.name)
self.redeploy_units(cp)
except Exception:
logging.exception(f"Could not process base capture {captured}")
self.complete_aircraft_transfers(debriefing)
# Destroyed units carcass
# -------------------------
for destroyed_unit in debriefing.state_data.destroyed_statics:
self.game.add_destroyed_units(destroyed_unit)
# -----------------------------------
# Compute damage to bases
for cp in self.game.theater.player_points():
enemy_cps = [e for e in cp.connected_points if not e.captured]
for enemy_cp in enemy_cps:
print("Compute frontline progression for : " + cp.name + " to " + enemy_cp.name)
delta = 0.0
player_won = True
ally_casualties = debriefing.casualty_count(cp)
enemy_casualties = debriefing.casualty_count(enemy_cp)
ally_units_alive = cp.base.total_armor
enemy_units_alive = enemy_cp.base.total_armor
print(ally_units_alive)
print(enemy_units_alive)
print(ally_casualties)
print(enemy_casualties)
ratio = (1.0 + enemy_casualties) / (1.0 + ally_casualties)
player_aggresive = cp.stances[enemy_cp.id] in [CombatStance.AGGRESSIVE, CombatStance.ELIMINATION, CombatStance.BREAKTHROUGH]
if ally_units_alive == 0:
player_won = False
delta = STRONG_DEFEAT_INFLUENCE
elif enemy_units_alive == 0:
player_won = True
delta = STRONG_DEFEAT_INFLUENCE
elif cp.stances[enemy_cp.id] == CombatStance.RETREAT:
player_won = False
delta = STRONG_DEFEAT_INFLUENCE
else:
if enemy_casualties > ally_casualties:
player_won = True
if cp.stances[enemy_cp.id] == CombatStance.BREAKTHROUGH:
delta = STRONG_DEFEAT_INFLUENCE
else:
if ratio > 3:
delta = STRONG_DEFEAT_INFLUENCE
elif ratio < 1.5:
delta = MINOR_DEFEAT_INFLUENCE
else:
delta = DEFEAT_INFLUENCE
elif ally_casualties > enemy_casualties:
if ally_units_alive > 2*enemy_units_alive and player_aggresive:
# Even with casualties if the enemy is overwhelmed, they are going to lose ground
player_won = True
delta = MINOR_DEFEAT_INFLUENCE
elif ally_units_alive > 3*enemy_units_alive and player_aggresive:
player_won = True
delta = STRONG_DEFEAT_INFLUENCE
else:
# But is the enemy is not outnumbered, we lose
player_won = False
if cp.stances[enemy_cp.id] == CombatStance.BREAKTHROUGH:
delta = STRONG_DEFEAT_INFLUENCE
else:
delta = STRONG_DEFEAT_INFLUENCE
# No progress with defensive strategies
if player_won and cp.stances[enemy_cp.id] in [CombatStance.DEFENSIVE, CombatStance.AMBUSH]:
print("Defensive stance, progress is limited")
delta = MINOR_DEFEAT_INFLUENCE
if player_won:
print(cp.name + " won ! factor > " + str(delta))
cp.base.affect_strength(delta)
enemy_cp.base.affect_strength(-delta)
info = Information("Frontline Report",
"Our ground forces from " + cp.name + " are making progress toward " + enemy_cp.name,
self.game.turn)
self.game.informations.append(info)
else:
print(cp.name + " lost ! factor > " + str(delta))
enemy_cp.base.affect_strength(delta)
cp.base.affect_strength(-delta)
info = Information("Frontline Report",
"Our ground forces from " + cp.name + " are losing ground against the enemy forces from " + enemy_cp.name,
self.game.turn)
self.game.informations.append(info)
def skip(self):
pass
def redeploy_units(self, cp):
""""
Auto redeploy units to newly captured base
"""
ally_connected_cps = [ocp for ocp in cp.connected_points if cp.captured == ocp.captured]
enemy_connected_cps = [ocp for ocp in cp.connected_points if cp.captured != ocp.captured]
# If the newly captured cp does not have enemy connected cp,
# then it is not necessary to redeploy frontline units there.
if len(enemy_connected_cps) == 0:
return
else:
# From each ally cp, send reinforcements
for ally_cp in ally_connected_cps:
total_units_redeployed = 0
own_enemy_cp = [ocp for ocp in ally_cp.connected_points if ally_cp.captured != ocp.captured]
moved_units = {}
# If the connected base, does not have any more enemy cp connected.
# Or if it is not the opponent redeploying forces there (enemy AI will never redeploy all their forces at once)
if len(own_enemy_cp) > 0 or not cp.captured:
for frontline_unit, count in ally_cp.base.armor.items():
moved_units[frontline_unit] = int(count/2)
total_units_redeployed = total_units_redeployed + int(count/2)
else: # So if the old base, does not have any more enemy cp connected, or if it is an enemy base
for frontline_unit, count in ally_cp.base.armor.items():
moved_units[frontline_unit] = count
total_units_redeployed = total_units_redeployed + count
cp.base.commision_units(moved_units)
ally_cp.base.commit_losses(moved_units)
if total_units_redeployed > 0:
info = Information("Units redeployed", "", self.game.turn)
info.text = str(total_units_redeployed) + " units have been redeployed from " + ally_cp.name + " to " + cp.name
self.game.informations.append(info)
logging.info(info.text)
class UnitsDeliveryEvent(Event):
informational = True
def __init__(self, attacker_name: str, defender_name: str,
from_cp: ControlPoint, to_cp: ControlPoint,
game: Game) -> None:
super(UnitsDeliveryEvent, self).__init__(game=game,
location=to_cp.position,
from_cp=from_cp,
target_cp=to_cp,
attacker_name=attacker_name,
defender_name=defender_name)
self.units: Dict[Type[UnitType], int] = {}
def __str__(self) -> str:
return "Pending delivery to {}".format(self.to_cp)
def deliver(self, units: Dict[Type[UnitType], int]) -> None:
for k, v in units.items():
self.units[k] = self.units.get(k, 0) + v
def skip(self) -> None:
for k, v in self.units.items():
if self.to_cp.captured:
name = "Ally "
else:
name = "Enemy "
self.game.message(
f"{name} reinforcements: {k.id} x {v} at {self.to_cp.name}")
self.to_cp.base.commision_units(self.units)
|
988,903 | abb78de6f14cd69a1ae132b750e437ac6342078b |
from prediction.randomforest_hist_prediction import RandomForestPrediction
from preprocessing.dataset import DatapointKey as DK
import os
def run_experiment_grid_prediction():
n_bins = 20
n_estimators = 1000
input_path_sim = os.getenv("DSLAB_CLIMATE_LABELED_DATA")
definitions = [DK.CP07, DK.UT, DK.U65]
cutoff_points = [90, 120]
feature_intervals = [7, 14, 21, 28]
prediction_intervals = [7, 14, 21, 28]
prediction_start_days = [0, 7, 14, 21]
# Evaluate for simulated data only
for definition in definitions:
for cutoff in cutoff_points:
for feature in feature_intervals:
for prediction in prediction_intervals:
for start_day in prediction_start_days:
print("Evaluating for "
"definition {}, "
"cutoff {}, "
"feature {}, "
"prediction {},"
"start day {}".format(
definition,
cutoff,
feature,
prediction,
start_day))
model = RandomForestPrediction(
definition=definition,
path=input_path_sim,
n_bins=n_bins,
n_estimators=n_estimators,
cutoff_point=cutoff,
features_interval=feature,
prediction_start_day=start_day,
prediction_interval=prediction
)
model.evaluate(plot=False)
if __name__ == '__main__':
run_experiment_grid_prediction()
|
988,904 | d1cd13fe7d5fd257acf9e4ef57c5ed5193da7ebf | import sys
import math
def solve(ss):
sums = [(0,[])]
for i in range(len(ss)):
s = ss[i]
sums += [(x[0]+s, x[1]+[s]) for x in sums]
sums.sort()
for i in range(1, len(sums)):
if sums[i-1][0]==sums[i][0]:
return (sums[i-1][1], sums[i][1])
return None
def readline():
return input.readline().strip(' \r\n\t')
def do_test(input):
line = readline().split()
ss = [int(x) for x in line[1:]]
res = solve(ss)
return res
input = sys.stdin
N = int(readline())
for test in range(N):
ans = do_test(input)
print 'Case #%d:' % (test+1,)
if ans is None:
print 'Impossible'
else:
print ' '.join(str(x) for x in ans[0])
print ' '.join(str(x) for x in ans[1])
sys.stdout.flush()
|
988,905 | 493c9a6a491db44e6ef7036e32cf25c607a95629 | import gevent
from gevent.monkey import patch_socket; patch_socket()
from gevent import spawn
from irc import Dispatcher, IRCBot
host = 'irc.freenode.net'
port = 6667
nick = 'spawnbot'
rooms = ['#lawrence-botwars']
MAX_BOTS = 11
BOTS = []
class SpawningDispatcher(Dispatcher):
def spawn(self, sender, message, channel, is_ping, reply):
if not is_ping or not channel:
return
try:
n = int(message.split()[-1])
except ValueError:
return "%s doesn't look like a number" % message.split()[-1]
if n < 0:
reply('removing %s bots' % n)
for x in range(abs(n)):
if len(BOTS) == 1:
continue
b = BOTS.pop()
b.conn.disconnect()
del(b)
return
if len(BOTS) + n > MAX_BOTS:
return 'sorry, would exceed maximum of %s bots' % MAX_BOTS
reply('spawning %s bots' % n)
for x in range(n):
if len(BOTS) >= MAX_BOTS:
return 'reached max'
add_bot('%s%s' % (nick, len(BOTS)))
def sleep(self, sender, message, channel, is_ping, reply):
if not channel:
return
n = float(message.split()[-1])
gevent.sleep(n)
return 'slept %ss' % n
def get_patterns(self):
return (
('^spawn', self.spawn),
('^sleep', self.sleep),
)
# start telnet backdoor on port 2000
from gevent.backdoor import BackdoorServer
server = BackdoorServer(('127.0.0.1', 2000), locals=locals())
server.start()
def add_bot(nick):
bot = IRCBot(host, port, nick, rooms, [SpawningDispatcher])
BOTS.append(bot)
g = spawn(bot.run_forever)
return bot, g
master, g = add_bot(nick)
g.join() # run until the master bot exits
|
988,906 | a4b8b77cd5b2186e0ee471c5850087eddd5a14c4 | import cv2
import numpy as np
image = cv2.imread('./img/Origin_of_Species.jpg', 0)
cv2.imshow('Original', image)
ret, tresh = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
cv2.imshow('Limiarizacao Binaria', tresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
988,907 | 17ee566154ad0d3f9d93b20d05605f67d7bfc167 | from sofi.ui import Strikethrough
def test_basic():
assert(str(Strikethrough()) == "<s></s>")
def test_text():
assert(str(Strikethrough("text")) == "<s>text</s>")
def test_custom_class_ident_style_and_attrs():
assert(str(Strikethrough("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<s id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</s>")
|
988,908 | b0da1a80bd6cbcb822f404fe55a1069bff2f0421 | import os
import requests
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from skimage import data, io
from skimage.transform import rescale, resize
from skimage import img_as_ubyte
def get_big_page_id(Title):
TITLE = Title
BASE_URL = "http://en.wikipedia.org/w/api.php"
rev_lengths = []
while not rev_lengths:
# print('in while loop')
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500'}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
else:
while str(len(pages_revisions)) == parameters['rvlimit']:
# print('tuple list size: ' + str(len(rev_lengths)))
start_id = (rev_lengths[-1])[0]
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500',
'rvstartid': start_id}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
if len(pages_revisions) > 0 and len(pages_revisions) < int(parameters['rvlimit']):
# print('tuple list size: ' + str(len(rev_lengths)))
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
biggest = max(rev_lengths)[1]
for tup in rev_lengths:
if biggest in tup:
bigtup = tup
longestpage = bigtup[0]
return longestpage
def get_big_page_size(Title):
TITLE = Title
BASE_URL = "http://en.wikipedia.org/w/api.php"
rev_lengths = []
while not rev_lengths:
# print('in while loop')
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500'}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
else:
while str(len(pages_revisions)) == parameters['rvlimit']:
# print('tuple list size: ' + str(len(rev_lengths)))
start_id = (rev_lengths[-1])[0]
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500',
'rvstartid': start_id}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
if len(pages_revisions) > 0 and len(pages_revisions) < int(parameters['rvlimit']):
# print('tuple list size: ' + str(len(rev_lengths)))
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
biggest = max(rev_lengths)[1]
for tup in rev_lengths:
if biggest in tup:
bigtup = tup
longestpage = bigtup[0]
# start selenium section
snapshot_url = f'https://en.wikipedia.org/w/index.php?title={TITLE}'
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--start-maximized')
page_id_param = f'&oldid={longestpage}'
full_url = snapshot_url + page_id_param
driver = webdriver.Chrome(options=chrome_options)
driver.get(full_url)
#time.sleep(2)
#driver.implicitly_wait(1)
width = driver.execute_script("return document.body.scrollWidth")
height = driver.execute_script("return document.body.scrollHeight")
# print(f'page {longestpage} height: {height}')
driver.quit()
return width, height
def get_big_page_url(Title):
TITLE = Title
BASE_URL = "http://en.wikipedia.org/w/api.php"
rev_lengths = []
while not rev_lengths:
# print('in while loop')
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500'}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
else:
while str(len(pages_revisions)) == parameters['rvlimit']:
# print('tuple list size: ' + str(len(rev_lengths)))
start_id = (rev_lengths[-1])[0]
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500',
'rvstartid': start_id}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
if len(pages_revisions) > 0 and len(pages_revisions) < int(parameters['rvlimit']):
# print('tuple list size: ' + str(len(rev_lengths)))
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
biggest = max(rev_lengths)[1]
for tup in rev_lengths:
if biggest in tup:
bigtup = tup
longestpage = bigtup[0]
print(type(longestpage), longestpage)
# start selenium section
snapshot_url = f'https://en.wikipedia.org/w/index.php?title={TITLE}'
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--start-maximized')
page_id_param = f'&oldid={longestpage}'
full_url = snapshot_url + page_id_param
driver = webdriver.Chrome(options=chrome_options)
driver.get(full_url)
time.sleep(2)
height = driver.execute_script("return document.body.scrollHeight")
# print(f'page {longestpage} height: {height}')
driver.quit()
return full_url
def get_big_page_pic(Title):
TITLE = Title
BASE_URL = "http://en.wikipedia.org/w/api.php"
rev_lengths = []
while not rev_lengths:
print('Initializing Browser')
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500'}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
print('Getting a picture of the wrong page height...')
pages_revisions = pages_info['revisions']
for d in pages_revisions:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
else:
while str(len(pages_revisions)) == parameters['rvlimit']:
# print('tuple list size: ' + str(len(rev_lengths)))
start_id = (rev_lengths[-1])[0]
parameters = {'action': 'query',
'format': 'json',
'titles': TITLE,
'prop': 'revisions',
'rvprop': 'ids|size',
'rvlimit': '500',
'rvstartid': start_id}
wp_call = requests.get(BASE_URL, params=parameters)
response = wp_call.json()
query = response['query']
pages = query['pages']
page_id = list(pages.keys())[0]
pages_info = pages[page_id]
pages_revisions = pages_info['revisions']
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
if len(pages_revisions) > 0 and len(pages_revisions) < int(parameters['rvlimit']):
# print('tuple list size: ' + str(len(rev_lengths)))
for d in pages_revisions[1:]:
tup = (d['revid'], d['size'])
rev_lengths.append(tup)
biggest = max(rev_lengths)[1]
for tup in rev_lengths:
if biggest in tup:
bigtup = tup
longestpage = bigtup[0]
# start selenium section
snapshot_url = f'https://en.wikipedia.org/w/index.php?title={TITLE}'
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--start-maximized')
page_id_param = f'&oldid={longestpage}'
full_url = snapshot_url + page_id_param
driver = webdriver.Chrome(options=chrome_options)
driver.get(full_url)
driver.implicitly_wait(2)
height = driver.execute_script("return document.body.scrollHeight")
width = driver.execute_script("return document.body.scrollWidth")
imgpath = os.path.join(os.path.curdir, f'{TITLE}LongShot.png')
driver.set_window_size(width, height) # the trick
time.sleep(2)
driver.save_screenshot(imgpath)
driver.quit()
photo = io.imread(imgpath)
photo_resized = resize(photo, (photo.shape[0], photo.shape[1]))
#smallerphoto = photo[::2, ::2]
io.imsave(imgpath, img_as_ubyte(photo_resized))
# print(f'page {longestpage} height: {height}')
driver.quit()
return os.path.abspath(imgpath)
|
988,909 | 1754d1c82180ffece03b14603c971f7b983ce99b | import numpy as np
import operator
import matplotlib.pyplot as plt
from Construction import constructMatrix
from Construction import popSearchQueue
from Construction import constructGoalMatrix
# Define the node class
class Node():
def __init__(self, parent=None, state=None):
self.parent = parent
self.state = state
# Check for goal state
def checkIfGoalState(x, goalState):
Aposition = goalState.index('A')
Bposition = goalState.index('B')
Cposition = goalState.index('C')
if x[Aposition] == 'A' and x[Bposition] == 'B' and x[Cposition] == 'C':
return True
else:
return False
# Find a target element in a list
def find(target, list):
for i in range(len(list)):
base = list[i]
if operator.eq(target, base):
return False
return True
# Find a node in a list
def findNode(state, list):
for node in list:
if node.state == state:
return node
print("The node is not found!")
# Methods for moving agent to its neighbor grid
def moveAgent(State, size, Goal, searchedNode, Start, parentOfStartState):
endloop = False
step = 0
position = State.index('M')
x = int(position / size)
y = position - x * size
queue = popSearchQueue(x, y, size)
queue = queue.astype(np.int64)
parentState = []
initialState = State.copy()
initialNode = Node(parentOfStartState, initialState)
while queue.shape[0] != 0:
newState = initialState.copy()
grid = queue[0]
grid = grid.astype(np.int64) # pop out a grid
queue = np.delete(queue, 0, 0)
column = grid[0]
row = grid[1]
temp = newState[column * size + row]
newState[column * size + row] = newState[x * size + y]
newState[x * size + y] = temp
new_node = Node(initialState, newState)
# print(newState)
if checkIfGoalState(new_node.state, Goal):
searchedNode.append(initialNode)
route = [new_node.state]
end = True
current = new_node
while end:
parent = current.parent
route = route + [parent]
current = findNode(parent, searchedNode)
if parent == Start:
end = False
route.reverse()
print("The shortest path is: ", route)
print("Solution is found!")
endloop = True
break
else:
parentState.append(new_node)
step = step + 1
return parentState, initialState, step, endloop
# BFS tree search
def BFSTreesearch(Start, size, goal):
start_node = Node(None, Start)
searchedNodes = []
searchedNodes.append(start_node)
parentState, searchedInitialState, totalstep, end = moveAgent(Start, size, goal, searchedNodes, Start, None)
print("Number of nodes expanded(enable duplicate): ", totalstep)
while len(parentState) != 0:
length = len(parentState)
lengthlist = []
lengthlist.append(length)
lengthlist.sort()
State = parentState.pop(0)
statelist1, b, step, end = moveAgent(State.state, size, goal, searchedNodes, Start, State.parent)
totalstep = totalstep + step
print("Number of nodes expanded(enable duplicate): ", totalstep)
if end:
break
for i in range(len(statelist1)):
parentState.append(statelist1[i])
searchedNodes.append(State)
# b_copy = b.copy()
return totalstep
print("Space comlexity is : ", lengthlist[-1])
# A = constructMatrix(4, 4, 4, 1, 4, 2, 4, 3, 4, 4)
# Goal = constructGoalMatrix(4, 4, 2, 2, 3, 2, 4, 2)
A2 = constructMatrix(3, 3, 1, 1, 2, 1, 3, 1, 3, 3)
Goal2 = constructGoalMatrix(3, 3, 3, 3, 2, 3, 1, 3)
import time
start_time = time.time()
BFSTreesearch(A2, 3, Goal2)
print("--- %s seconds ---" % (time.time() - start_time))
|
988,910 | 5f606482c2ed8218581294dd89828230a1c73c95 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# this python script is to calculate electronic coupling between two specific frontier orbital using block diagonalization method.
# steps
# 1) build whole matrix from the raw lower triangle matrix
# 2) block-diagonalize the matrix
# 3) get the wanted orbital and the related site energy and coupling
# syntax: ./calHda_BD.py -f fockmatrix -s overlapmatrix -no number_of_orbital -ne number_of_electron -o output
from sys import argv,exit
import numpy as np
import numpy.linalg as la
from scipy.linalg import block_diag,sqrtm
from math import sqrt
import argparse
#script name
script = argv[0]
#parser setup
#for help information, use ./calHda_BD.py -h [--help]
parser = argparse.ArgumentParser()
parser.add_argument('-f', dest = 'fock', help = 'Fock matrix, lower triangle form', type = str)
parser.add_argument('-s', dest = 'overlap', help = 'Overlap matrix, lower triangle form', type = str)
parser.add_argument('-nb', dest = 'numofbasis', nargs = '+', help = 'Number of basis in each block', type = int)
parser.add_argument('-ne', dest = 'numofelectron', nargs = '+', help = 'Number of electron in each block', type = int)
parser.add_argument('-o', dest = 'output', help = 'Output file prefix, block_diag as the default', type = str)
#parse the arguments and store them in the defined directory
options = vars(parser.parse_args())
#test
#print options
#input validation
if options['output'] == None:
out_prefix = 'block_diag'
else:
out_prefix = options['output']
basislist = options['numofbasis']
electronlist = options['numofelectron']
#########################
# 1) build whole matrix
#########################
#######################################
def build_matrix(filename):
#open file
f = open(filename, 'r')
lines = f.readlines()
f.close()
#read all lower triangle matrix elements in the list
numlist = []
for line in lines:
linelist = line.split()
for num in linelist:
numlist.append(float(num))
#get the length of the list and get the dimension
tria_num = len(numlist)
for i in range(int(sqrt(tria_num*2)),1,-1):
if i*(i+1)/2.0 == tria_num:
dim = i
break
else:
print "The input parsed is not a lower triangle matrix, please look into it and try again"
exit()
#build full matrix
full_matrix = np.matrix(np.zeros((dim,dim)))
#set the original point (0,0) as the first element
full_matrix[0,0] = numlist[0]
idx = 1
#build lower triangular matrix element
for i in range(1,dim):
for j in range(0,i+1):
full_matrix[i,j] = numlist[idx]
idx = idx + 1
#build upper triangular matrix element
for i in range(dim):
for j in range(i+1,dim):
full_matrix[i,j] = np.conjugate(full_matrix[j,i])
return full_matrix, dim
#######################################
#get the full matrix and examine the dimension
fockfile = options['fock']
overfile = options['overlap']
fock_full, fock_dim = build_matrix(fockfile)
over_full, over_dim = build_matrix(overfile)
if fock_dim != over_dim:
print "The dimension of both files don't match. Please look into it and try again."
exit()
#########################
# 2) block-diagonlize
#########################
# need to know
# basis set of this calculation is required for calculating the electronic coupling correctly
#############################################
def build_BD(fock,over,basis):
#get the dimension of the matrix
dim = np.shape(fock)[0]
#test the basis input
if sum(basis) != dim:
print "Input in -nb section is wrong! Please look into it and try again."
exit()
#get np.matrix format
fock = np.matrix(fock)
over = np.matrix(over)
#transform the fock matrix in orthogonal basis
U_eval, U_evec = la.eig(over)
over_prime = np.matrix(np.diag(U_eval))
over_T = U_evec * la.inv(sqrtm(over_prime)) * U_evec.transpose()
fock_O = over_T.transpose() * fock * over_T
#block diagonalize submatries
submatrix = []
num_basis = 0
#store each submatrix
for i in range(len(basis)):
submatrix.append(fock_O[num_basis:num_basis+basis[i],num_basis:num_basis+basis[i]])
num_basis = num_basis + basis[i]
#build both block eval and evec
eval_sub = []
evec_sub = []
for matrix in submatrix:
eval_tmp, evec_tmp = la.eig(matrix)
index = eval_tmp.argsort()
eval_sub.append(eval_tmp[index])
evec_sub.append(evec_tmp[:,index])
#get the block diagonalized fock matrix
U = block_diag(*evec_sub)
F = U.transpose() * fock_O * U
#for test, get the full MO Fock matrix
MO_val, MO_vec = la.eig(fock_O)
MO_index = MO_val.argsort()
MO_val = MO_val[MO_index]
return F, MO_val
#############################################
#########################
# 3) get reduced fock
#########################
#############################################
def get_BD(fock,basis,electron):
#since it's a closed shell calculation, both alpha and beta electron stay in the same orbital
#so the orbital we want for HOMO and hole transfer is divided by 2 from the total electron
dim = len(electron)
orb_index = []
num_basis = 0
for i in range(len(electron)):
orb = (electron[i]+1)/2
orb_index.append(num_basis+orb-1)
num_basis = num_basis + basis[i]
reduced_F = []
for i in orb_index:
for j in orb_index:
reduced_F.append(fock[i,j])
F = np.array(reduced_F).reshape([dim,dim])
return F
#############################################
dim = fock_dim
fock_BD, MO_val = build_BD(fock_full,over_full,basislist)
reduced_F = get_BD(fock_BD,basislist,electronlist)
#################################################
############### ###############
############### OUTPUT STREAMING ###############
############### ###############
#################################################
#block diagnolized fock matrix output
fock_BD_out = open(out_prefix+'_BDmatrix.dat', 'w')
for i in range(dim):
for j in range(dim):
fock_BD_out.write('%e '%fock_BD[i,j].real)
fock_BD_out.write('\n')
fock_BD_out.close()
#reduced-fock matrix output
fock_reduce_out = open(out_prefix+'_redF.dat', 'w')
unit = len(basislist)
for i in range(unit):
for j in range(unit):
fock_reduce_out.write('%e '%reduced_F[i,j].real)
fock_reduce_out.write('\n')
fock_reduce_out.close()
#full MO energy output
MO_out = open(out_prefix+'_MO.dat', 'w')
for i in range(len(MO_val)):
MO_out.write('%e'%MO_val[i].real)
MO_out.write('\n')
MO_out.close()
|
988,911 | 6357d4f459fe7f62aaa39a14e44681c8561b6794 |
from celery_config import app_task
from importlib import import_module
import pkgutil
import os
import sys
import tasks
from tasks.computation.sum import Sum
if __name__ == '__main__':
task_b = Sum()
task_b.delay(1, 201.0)
|
988,912 | 2c3ceb288fd67340bfa77201fbe2a4946d86b50a | import numpy as np
def dfs(t,e,n):
global top
global stack
global book
if top==n-1:
return
for i in range(n):
if book[i]==0 and e[t][i]==1:
book[i]=1
top+=1
stack[top]=i
dfs(i,e,n)
return
def bianli_dfs(e,start):
global stack
global top
global book
n=len(e)
book = np.zeros((n,),dtype=int)
stack = np.zeros((n,),dtype=int)
top=0
stack[top]=start
book[start]=1
dfs(start,e,n)
print("深度优先遍历结果")
for i in range(0,top+1):
print("%d " %stack[i], "")
def bianli_bfs(e,start):
n = len(e)
book = np.zeros((n,),dtype=int)
q=np.zeros((n,),dtype=int)
head=0
tail=0
stack=np.zeros((n,),dtype=int)
top=-1
q[tail]=start
book[start]=1
tail+=1
top+=1
stack[top]=start
num=1
flag=0
while head<tail:
tmp = q[head]
for i in range(n):
if e[tmp][i]==1 and book[i]==0:
q[tail]=i
tail+=1
book[i]=1
top+=1
stack[top]=i
num+=1
if num>=n:
flag=1
break
if flag==1:
break
head+=1
print("广度优先遍历结果")
for i in range(0,top+1):
print("%d " %stack[i], "")
if __name__=="__main__":
e = [
[0,1,1,9999,1],
[1,0,9999,1,9999],
[1,9999,0,9999,1],
[9999,1,9999,0,9999],
[1,9999,1,9999,0]
]
bianli_bfs(e,0)
bianli_dfs(e,0)
|
988,913 | f8485d30fce6ff9d2f7fc16d8e0ee9a669e2c20e | # -*- coding: utf-8 -*-
# Filename: __main__
# Author: brayton
# Datetime: 2019-Oct-14 2:59 PM
print('>>>>>>>>>... hello')
|
988,914 | f6ad22212408baac5a95e73976c0c9a136713a22 | import RPi.GPIO as GPIO
import time
import DHT_read as DHT
dhtPin =11
def loop():
dht= DHT.DHT(dhtPin)
sumCnt = 0
while (True):
sumCnt +=1
chk =dht.readDHT11()
print("The sumcnt is : %d \t chk: %d" %(sumCnt, chk))
if (chk is dht.DHTLIB_OK):
print("DHT11 OK!!")
elif (chk is dht.DHTLIB_ERROR_CHKSUM):
print("DHTLIB_ERROR_CHKSUM")
elif (chk is dht.DHTLIB_ERROR_TIMEOUT):
print("Check sum Error")
else:
print("Other error")
print("Humidity: %.2f, \t Temprature: %.2f \n" %(dht.humidity, dht.temp))
time.sleep(2)
if __name__ =="__main__":
try:
loop()
except KeyboardInterrupt:
GPIO.cleanup()
exit() |
988,915 | 2bef4385451b7c0af819217141c201a4955c5378 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 20:54:37 2017
@author: Саша
"""
import numpy as np
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
def load_data():
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normilize data
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Categorize output data
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
return (x_train, y_train, x_test, y_test)
def create_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding = 'same',
input_shape = (32, 32, 3),
activation = 'relu'))
model.add(Conv2D(32, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
# Regulization layer
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding = 'same',
activation = 'relu'))
model.add(Conv2D(64, (3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.25))
# Convert from matrix to array
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'SGD',
metrics = ['accuracy'])
return model
def dump_data(model):
model_json = model.to_json()
with open("model.json", "w") as file:
file.write(model_json)
model.save_weights("mnist_model.h5")
return
def load_neuralNetwork(model):
with open("model.json") as file:
model = model_from_json(file.read())
model.load_weights("mnist_model.h5")
model.compile(loss = "categorical_crossentropy",
optimizer = "SGD", metrics = ["accuracy"])
return model
def main():
np.random.seed(42) # For eqvivalent results
x_train, y_train, x_test, y_test = load_data()
model = create_model()
for i in range(30):
model.fit(x_train, y_train, batch_size = 32, epochs = 1,
verbose = 1, validation_split = 0.1, shuffle = True)
dump_data(model)
scores = model.evaluate(x_test, y_test, verbose = 0)
print("Accuracy: %.2f%%" % (scores[1] * 100))
return
main()
|
988,916 | fe46ec4aabb910743e36293742ecc202447c9b1b | #CSDojo
a_string = 'ABCDD'
def Longest(a_string):
dic = dict()
stack = ''
i = 0
for i in range(len(a_string)):
if a_string[i] in stack:
stack += a_string[i]
if i == len(a_string) - 1:
dic[a_string[i]] = len(stack)
else:
if stack != '':
dic[stack[0]] = len(stack)
stack = a_string[i]
if i == len(a_string) - 1:
dic[a_string[i]] = len(stack)
else:
stack += a_string[i]
if i == len(a_string) - 1:
dic[a_string[i]] = len(stack)
print(dic)
return [item for item in dic.items() if item[1] == max(dic.values())]
print(Longest(a_string))
|
988,917 | 9267528cc6ad48858f66786a7ae8c746b2d3bdd2 | # Generated by Django 3.0.5 on 2021-04-03 20:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('appMediSystem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cita',
fields=[
('idCita', models.AutoField(primary_key=True, serialize=False)),
('fechaCita', models.DateField()),
('motivoCita', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Paciente',
fields=[
('idPaciente', models.AutoField(primary_key=True, serialize=False)),
('pesoPaciente', models.CharField(max_length=100)),
('generoPaciente', models.CharField(max_length=100)),
('nombrePaciente', models.CharField(max_length=100)),
('tipoDeSangrePaciente', models.CharField(max_length=100)),
('cedulaPaciente', models.CharField(max_length=100)),
('edadPaciente', models.IntegerField()),
('alturaPaciente', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('idDoctor', models.AutoField(primary_key=True, serialize=False)),
('especialidadDoctor', models.CharField(max_length=200)),
('nombreDoctor', models.CharField(max_length=100)),
('citasPorDoctor', models.ManyToManyField(to='appMediSystem.Cita')),
],
),
migrations.AddField(
model_name='cita',
name='pacienteFK',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='appMediSystem.Paciente'),
),
]
|
988,918 | 8516a3f0abe80f8122a967023fa736a98cbb376d | import boto3
import json
import time
import random
import pandas
import datetime
def isValidGame(game_modes,human_players,duration):
if game_modes[0] in (1,2,3,5,16,22) and human_players[0] == 10 and duration[0]>780:
return True
return False
def generate_kinesis_record(item):
JSON = json.dumps(info)
return JSON
client = boto3.client(
's3',
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
kinesis_client = boto3.client('kinesis', region_name='us-east-1')
client = boto3.client('s3')
kinesis_stream_name = "Consumer_Test"
#Put records in one at a time into the Kinesis Stream. One thread provides ~100 games per second
for line in pandas.read_csv("s3://dotadatastorage/matches", chunksize = 1):
info = line.to_dict(orient = "list")
info["Kinesis_Stream_Timestamp"] = datetime.datetime.utcnow().isoformat()
game_validity = isValidGame(info['game_mode'],info['human_players'],info['duration'])
if game_validity:
#We use a partition key which results in uniform partitioning into Kinesis
put_response = kinesis_client.put_record(StreamName = kinesis_stream_name, PartitionKey = str(random.randrange(0, 5000000)), Data = generate_kinesis_record(info))
|
988,919 | 645aff6c36e8a1525f26a6692d15c416dd95b420 | from ConfigParser import ConfigParser, NoOptionError
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import sessionmaker
import acserver
from core.events import eventHandler
from core.consts import *
from Authentication import database as db
engine = None
AuthenticatedClients = {} #Key: CN, Value: User Class
module_permissions = [
('listUsers',"Allows the user to view all other users."),
('addUser',"Allows the user create new users"),
('grantPermission',"Allows the user grant permissions (caution, this is practically root access!)"),
('serverOp',"Allows the user to claim op and control the server")
]
def main(plugin):
global engine
conf = plugin.getConf({'db_url':'users.db','db_user':'','db_pwd':'','db_type':'sqlite3','db_database':''})
DBURL = conf.get('Settings', 'db_url')
DBType = conf.get('Settings', 'db_type')
DBUser = conf.get('Settings', 'db_user')
DBPWD = conf.get('Settings', 'db_pwd')
DBDataBase = conf.get('Settings', 'db_database')
engine = db.setup(DBType,DBURL,DBUser,DBPWD,DBDataBase)
session = _getSession()
if session.query(db.User).count() == 0:
acserver.log("Authentication: No users exist, initalizing database.")
acserver.log("Authentication: Creating root user.")
session.add(db.makeUser("root","pyacserver",""))
for perm in module_permissions:
addPermissionIfMissing(*perm)
session.commit()
session.close()
def _getSession():
"""
Returns the session
"""
session = sessionmaker(bind=engine)()
return session
def getSession(f):
"""
Decorator, passes the session as the first argument. Closes it
automatically afterwards
"""
def wrapper(*args,**kwargs):
s = _getSession()
return f(*[s]+list(args),**kwargs)
s.close()
return wrapper
@getSession
def addPermissionIfMissing(session,perm,desc):
"""
Adds a permission if it is nonexistant.
Returns True if it got added, False if it didn't.
"""
try:
db.getPerm(session,perm)
return False
except NoResultFound:
session.add(db.makePermission(perm,desc))
acserver.log("Authentication: Adding permission %s"%perm)
session.commit()
return True
def hasPermission(cn,perm):
"""
Checks cn to see if they have the specified permission.
Returns True if they do or user is Root.
Returns False if they don't or the cn isn't authenticated.
"""
if cn not in AuthenticatedClients:
return False
if AuthenticatedClients[cn].id == 1:
return True
else:
return perm in map(lambda p: p.name, AuthenticatedClients[cn].permissions)
@eventHandler('serverExtension')
@getSession
def serverext(session,cn,ext,ext_text):
if ext == "auth":
args = ext_text.split()
if len(args) != 2:
acserver.msg("\f9Invalid arguments to auth/", cn)
return
name, pwd = args
try:
usr = session.query(db.User).filter(db.User.name==name).one()
except NoResultFound:
acserver.msg("\f9Invalid login!",cn)
return
if usr.checkPassword(pwd):
AuthenticatedClients[cn] = usr
acserver.msg("\fJLogin Succeeded!",cn)
acserver.log("Authenticated client (%d) %s as %s"%(cn,acserver.getClient(cn)['name'],name))
else:
acserver.msg("\f9Invalid login!",cn)
if ext == "adduser":
if hasPermission(cn,'addUser'):
args = ext_text.split()
if len(args) != 3:
acserver.msg("\f9Invalid arguments to register", cn)
return
name, email, pwd = args
usrcount = session.query(db.User).filter(db.User.name==name).count()
if usrcount:
acserver.msg("\f9User already exists!",cn)
session.close()
return
session.add(db.makeUser(name,pwd,email))
session.commit()
acserver.msg("\fJCreated user! Please login now with the credentials you provided.",cn)
else:
acserver.msg("\f3You don't have access to that command!",cn)
if ext == "claimadmin":
if hasPermission(cn,'serverOp'):
acserver.setAdmin(cn,1)
else:
acserver.msg("\f3You don't have access to that command!",cn)
if ext == "listusers":
if hasPermission(cn,'listUsers'):
acserver.msg("\fHUser List:",cn)
for usr in session.query(db.User).all():
if usr.id == AuthenticatedClients[cn].id:
acserver.msg("%d) \fQ%s \f5- \fI%s \f5: {\fN%s\f5}"%(usr.id, usr.name,usr.email,"\f5, \fN".join(map(lambda p: p.name, usr.permissions))),cn)
else:
acserver.msg("%d) \fR%s \f5- \fI%s \f5: {\fN%s\f5}"%(usr.id, usr.name,usr.email,"\f5, \fN".join(map(lambda p: p.name, usr.permissions))),cn)
acserver.msg("\fHEnd User List.",cn)
else:
acserver.msg("\f3You don't have access to that command!",cn)
if ext == "grantperm":
if hasPermission(cn,'grantPermission'):
args = ext_text.split()
if len(args) != 2:
acserver.msg("\f9Invalid arguments to grantperm", cn)
return
username,permname = args
try:
user = db.getUser(session,username)
except NoResultFound:
acserver.msg("\f3User not found!",cn)
return
try:
perm = db.getPerm(session,permname)
except NoResultFound:
acserver.msg("\f3Permission does not exist!",cn)
return
if perm in user.permissions:
acserver.msg("\f3User already has that permission!",cn)
return
else:
user.permissions.append(perm)
session.commit()
acserver.msg("\fJPermission granted successfully!",cn)
else:
acserver.msg("\f3You don't have access to that command!",cn)
@eventHandler('clientDisconnect')
def clientdisconect(cn,reason):
if cn in AuthenticatedClients:
del AuthenticatedClients[cn] |
988,920 | e90768eb32344e17a011baac349c9084e0f9e2b7 | n=int(input())
for i in range(n):
m=int(input())
sr=5
zeros=0
while sr<=m:
zeros=zeros+int(m//sr)
sr=sr*5
print(zeros)
|
988,921 | 51dad57a70ba01736e15d5f94c132f426854172c | import time
def randomized(x, y):
# method to generate a random number in a certain interval
from random import randint
return randint(x, y)
def cracker_per_digit(x):
# method to crack a password digit per digit
start = time.time()
lista = list(x)
cracked = []
tmp = 0
cycle = 1
print("Cracking password per digit")
while True:
number = str(randomized(0, 9))
print("Number found: ", number)
print("Cycle: ", cycle)
if lista[tmp] == number:
cracked.append(number)
tmp += 1
print("password cracked: ", cracked)
if tmp == len(lista):
break
cycle += 1
end = time.time()
return (end - start, cycle)
def cracker_complete_with_dict(x):
""" method to crack a password generating and checking random numbers and
storing the generated numbers in a list"""
dictionary = []
start = time.time()
lista = list(x)
cracked = []
cycle = 1
print("Cracking password with a dictionary")
while True:
number = str(randomized(0, 9))
cracked.append(number)
if cracked == lista:
print("Cycle: ", cycle)
print(cracked)
print("length dictionary: ", len(dictionary))
break
if len(cracked) == len(lista):
if cracked in dictionary:
cracked = []
else:
print("Cycle = ", cycle)
print(cracked)
dictionary.append(cracked)
cracked = []
cycle += 1
end = time.time()
return (end - start, cycle, len(dictionary))
def cracker_complete_no_dict(x):
""" method to crack a password generating and
checking random numbers """
start = time.time()
lista = list(x)
cracked = []
cycle = 1
print("Cracking password without a dictionary")
while True:
number = str(randomized(0, 9))
cracked.append(number)
if cracked == lista:
print("Cycle: ", cycle)
print(cracked)
break
if len(cracked) == len(lista):
print("Cycle =", cycle)
print(cracked)
cracked = []
cycle += 1
end = time.time()
return (end - start, cycle)
def cracker_incrementing(x):
# method to crack a password incrementing numbers
start = time.time()
number_int = 1
cycle = 1
print("Cracking password incrementing digits")
while True:
number_str = str(number_int)
if number_str == x:
print("Cycle = ", cycle)
print(number_str)
break
print("Cycle =", cycle)
print(number_str)
number_int += 1
cycle += 1
end = time.time()
return (end - start, cycle)
while True:
password = str(input("Type a password made of numbers: "))
(elapsedTimeNoDict, cyclesNoDict) = cracker_complete_no_dict(password)
(elapsedTimeWithDict, cyclesWithDict, DictSize) = cracker_complete_with_dict(password)
(elapsedTimeIncrementing, cyclesincrementing) = cracker_incrementing(password)
(elapsedTimePerDigit, cyclesPerDigit) = cracker_per_digit(password)
print(f"Password cracked without dictionary in {elapsedTimeNoDict} seconds in {cyclesNoDict} tries")
print(f"Password cracked with dictionary in {elapsedTimeWithDict} seconds in {cyclesWithDict} tries and with dictionary with {DictSize} elements")
print(f"Password cracked incremeting in {elapsedTimeIncrementing} seconds in {cyclesincrementing} tries")
print(f"Password cracked per digit in {elapsedTimePerDigit} seconds in {cyclesPerDigit} tries")
print(f"Password cracked per digit in {elapsedTimePerDigit} seconds in {cyclesPerDigit} tries")
print("\n") |
988,922 | 6ab3fd7aa95331452a36cd690a50f4ed46369145 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 22:30:05 2021
@author: Aaron
"""
import pandas as pd
import datetime
import matplotlib.pyplot as plt
def convert_to_right_format(x):
year,month,day,hour=x.split("_")
return datetime.datetime(year=int(year),month=int(month),day=int(day),hour=int(hour[1:]))
data=pd.read_csv("DATA_SET_1.csv")#Q1.1
data.loc[:,"Date/Time"]=data.loc[:,"Date/Time"].apply(lambda x:convert_to_right_format(x))#Q1.2
data=data.set_index("Date/Time")#Q1.2
mask_missing=data.isnull().sum(axis=1)>0#Q1.3
missing_index=data.loc[mask_missing,:].index.to_list()#Q1.3
print("Missing Index:",missing_index)#1.3
data=data.interpolate(method="time")#Q1.4
data=data.T#Q1.5
mask_year=data.T.index.year==2019#Q1.6
mask_month=data.T.index.month==10
safe=data.T.loc[mask_year & mask_month,"SAFEHARB 13 KV UNIT1 (DALMP) Average"]
face=data.T.loc[mask_year & mask_month,"FACEROCK 13 KV HOLT11 (DALMP) Average"]
fig, ax = plt.subplots(figsize=(12, 12))
ax.plot(safe,label="SAFEHARB 13 KV UNIT1 (DALMP) Average")
ax.plot(face,label="FACEROCK 13 KV HOLT11 (DALMP) Average")
ax.set(title="Oct 2019 DALMP",
xlabel="Time",
ylabel="DALMP")
plt.legend(loc="upper left")
plt.show()
mask_q7=(data.T.index.hour >=7)&(data.T.index.hour <=23)#Q1.7
data_q7=data.T.loc[mask_q7,:]
data_q7=data_q7.groupby(data_q7.index.month)[["SAFEHARB 13 KV UNIT1 (DALMP) Average","FACEROCK 13 KV HOLT11 (DALMP) Average"]].mean()
|
988,923 | 530cce61380ef4e2175292cba6782da3cb3ce1a3 | stopWords = open('english.stop', 'r').read().split()
f = open('stopwords.py', 'w')
print >> f, "stopWords = ", stopWords
|
988,924 | fea767b8aa2f148d618001313aa47bbedb34a24a | #
# See the documentation for more details on how this works
#
# The idea here is you provide a simulation object that overrides specific
# pieces of WPILib, and modifies motors/sensors accordingly depending on the
# state of the simulation. An example of this would be measuring a motor
# moving for a set period of time, and then changing a limit switch to turn
# on after that period of time. This can help you do more complex simulations
# of your robot code without too much extra effort.
#
# NOTE: THIS API IS ALPHA AND WILL MOST LIKELY CHANGE!
# ... if you have better ideas on how to implement, submit a patch!
#
from pyfrc import wpilib
from pyfrc.physics import drivetrains
class PhysicsEngine(object):
'''
Simulates a motor moving something that strikes two limit switches,
one on each end of the track. Obviously, this is not particularly
realistic, but it's good enough to illustrate the point
TODO: a better way to implement this is have something track all of
the input values, and have that in a data structure, while also
providing the override capability.
'''
#: Width of robot, specified in feet
ROBOT_WIDTH = 2
ROBOT_HEIGHT = 3
ROBOT_STARTING_X = 18.5
ROBOT_STARTING_Y = 12
# In degrees, 0 is east, 90 is south
STARTING_ANGLE = 180
def __init__(self, physics_controller):
'''
:param physics_controller: `pyfrc.physics.core.Physics` object
to communicate simulation effects to
'''
self.physics_controller = physics_controller
self.jag_value = None
self.position = 0
self.last_tm = None
def update_sim(self, now, tm_diff):
'''
Called when the simulation parameters for the program need to be
updated. This is mostly when wpilib.Wait is called.
:param now: The current time as a float
:param tm_diff: The amount of time that has passed since the last
time that this function was called
'''
# Simulate the drivetrain
l_motor = wpilib.DigitalModule._pwm[0].Get()
r_motor = wpilib.DigitalModule._pwm[1].Get()
speed, rotation = drivetrains.two_motor_drivetrain(l_motor, r_motor)
self.physics_controller.drive(speed, rotation, tm_diff)
if self.jag_value is None:
return
# update position (use tm_diff so the rate is constant)
self.position += self.jag_value * tm_diff * 3
# update limit switches based on position
if self.position <= 0:
switch1 = True
switch2 = False
elif self.position > 10:
switch1 = False
switch2 = True
else:
switch1 = False
switch2 = False
# set values here
try:
wpilib.DigitalModule._io[0].value = switch1
except:
pass
try:
wpilib.DigitalModule._io[1].value = switch2
except:
pass
try:
wpilib.AnalogModule._channels[1].voltage = self.position
except:
pass
# always reset variables in case the input values aren't updated
# by the robot
self.jag_value = None
def sim_Jaguar_Set(self, obj, fn, value):
'''
Called when Jaguar.Set() is called. This function should
call fn() with the passed in value.
:param obj: Jaguar object
:param fn: Wrapped Jaguar.Set function
:param value: Value passed to Jaguar.Set
'''
if obj.channel == 4:
self.jag_value = value
fn(value)
|
988,925 | 459f0241e20c89f56ae5817937d4f1fec891a8ab | ### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
import os
import numpy as np
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
opt = TestOptions().parse(save=False)
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
if opt.dataset_mode == 'temporal':
opt.dataset_mode = 'test'
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
input_nc = 1 if opt.label_nc != 0 else opt.input_nc
save_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
print('save_dir:', save_dir)
print('Doing %d frames' % len(dataset))
for i, data in enumerate(dataset):
# if os.path.exists(save_dir):
# continue
if i >= opt.how_many:
break
if data['change_seq']:
model.fake_B_prev = None
_, _, height, width = data['A'].size()
A = Variable(data['A']).view(1, -1, input_nc, height, width)
B = Variable(data['B']).view(1, -1, opt.output_nc, height, width) if len(data['B'].size()) > 2 else None
inst = Variable(data['inst']).view(1, -1, 1, height, width) if len(data['inst'].size()) > 2 else None
generated = model.inference(A, B, inst)
if opt.label_nc != 0:
real_A = util.tensor2label(generated[1], opt.label_nc)
else:
c = 3 if opt.input_nc == 3 else 1
real_A = util.tensor2im(generated[1][:c], normalize=False)
visual_list = [('real_A', real_A),
('fake_B', util.tensor2im(generated[0].data[0]))]
visuals = OrderedDict(visual_list)
img_path = data['A_path']
print('process image... %s' % img_path)
visualizer.save_images(save_dir, visuals, img_path) |
988,926 | f2ffdcd359ceb44b1955014aa1f7d01d3c2e3194 | import numpy as np
import SimpleITK as stk
import os
from PIL import Image
import random
from random import shuffle
import tensorflow as tf
#classification index value
nothing = 0
bone = 1
bonesLId = 8
bonesRId = 9
# this function, checkes if path exisits, and if isn't exisits create this path.
def createPathIfNotExists(path):
if not (os.path.exists(path)):
os.makedirs(path)
# seg_list - all the files and directories in path,
# sub_name - find the sub name of file_name and appent to this sub name "_index"
# if this function find some sub-name,in seg_list, return it.
# return the file that contain the sub_name, otherwise return false
def getSegFileName(path, file_name, index):
seg_list = os.listdir(path)
sub_name = file_name[0:len(file_name) - 7] + "_" + str(index)
for f in seg_list:
if (f.startswith(sub_name)):
return f
return "Fail"
|
988,927 | 080c5e9d389b3d065b72897377e2fa55098f1d88 | from setuptools import setup
VERSION = "0.1.8"
setup(
name='Xlsxcursor',
description="Xlsxcursor for xlsxwriter.",
version=VERSION,
url='https://github.com/KokocGroup/xslxcursor',
download_url='https://github.com/KokocGroup/xslxcursor/tarball/v{}'.format(VERSION),
packages=['xlsxcursor'],
install_requires=[
'xlsxwriter',
],
)
|
988,928 | e5a341db2cd6fddcbd30dcce9c95f4f5f261e39c | def fact_sum(num):
sum1=0
for i in range(1,(num//2)+1):
if (num%i==0):
sum1=sum1+i
return sum1
count=0
fact_dict = dict()
for x in range (1,100000000):
if(count==10):
break
fact_sum_x = fact_sum(x)
if fact_sum_x==x:
continue
if x==fact_sum(fact_sum_x):
if x in fact_dict.keys():
continue
fact_dict[x]=True
fact_dict[fact_sum_x] = True
count+=1
print(x, fact_sum_x)
|
988,929 | 4f429d30b103bebafce7c5e2cffd09c29822c629 | # Generated by Django 3.2.3 on 2021-06-08 19:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Messages', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='message',
options={'ordering': ('date',)},
),
migrations.AddField(
model_name='message',
name='alert',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='message',
name='message',
field=models.CharField(max_length=2000, verbose_name='Type your message..'),
),
]
|
988,930 | ffdb0ac3bc3de5df8a2a0e54b0089b9da82690ce | import base64
import json
from datetime import datetime
from urllib.parse import urljoin
import parsel
import requests
import app.database
import app.queue
import worker
from model import File, Result, Site
from model.configuration import get_config
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) '\
'Gecko/20100101 Firefox/40.1'
class ScrapeException(Exception):
''' Represents a user-facing exception. '''
def __init__(self, message):
self.message = message
def test_site(site_id, tracker_id, request_timeout=10):
"""
Perform postive and negative test of site.
Postive test: check_username() return True for existing username.
Negative test: check_username() returns False for non-existent username.
Site is valid if:
positive result = 'f' (found)
negative result = 'n' (not found)
"""
worker.start_job()
redis = worker.get_redis()
db_session = worker.get_session()
site = db_session.query(Site).get(site_id)
# Do positive test.
result_pos_id = check_username(username=site.test_username_pos,
site_id=site_id,
category_id=None,
total=2,
tracker_id=tracker_id + '-1',
test=True)
result_pos = db_session.query(Result).get(result_pos_id)
# Do negative test.
result_neg_id = check_username(username=site.test_username_neg,
site_id=site_id,
category_id=None,
total=2,
tracker_id=tracker_id + '-2',
test=True)
result_neg = db_session.query(Result).get(result_neg_id)
# Update site with test results
site.test_result_pos = result_pos
site.test_result_neg = result_neg
# Set site validity based on results
# of both tests.
if result_pos.status == 'f' and \
result_neg.status == 'n':
site.valid = True
else:
site.valid = False
site.tested_at = datetime.utcnow()
db_session.commit()
# Send redis notification
msg = {
'tracker_id': tracker_id,
'status': 'tested',
'site': site.as_dict(),
'resource': None,
}
redis.publish('site', json.dumps(msg))
def check_username(username, site_id, category_id, total,
tracker_id, request_timeout=10, test=False):
"""
Check if `username` exists on the specified site.
"""
worker.start_job()
redis = worker.get_redis()
db_session = worker.get_session()
# Make a splash request.
site = db_session.query(Site).get(site_id)
# Check site.
splash_result = _splash_username_request(username,
site,
request_timeout)
image_file = _save_image(db_session, splash_result)
# Save result to DB.
result = Result(
tracker_id=tracker_id,
site_name=splash_result['site']['name'],
site_url=splash_result['url'],
status=splash_result['status'],
image_file_id=image_file.id,
error=splash_result['error']
)
db_session.add(result)
db_session.commit()
if not test:
# Notify clients of the result.
current = redis.incr(tracker_id)
result_dict = result.as_dict()
result_dict['current'] = current
# result_dict['image_file_url'] = image_file.url()
# result_dict['image_name'] = image_file.name
result_dict['total'] = total
redis.publish('result', json.dumps(result_dict))
# If this username search is complete, then queue an archive job.
if current == total:
app.queue.schedule_archive(username, category_id, tracker_id)
worker.finish_job()
return result.id
def splash_request(target_url, headers={}, request_timeout=10):
''' Ask splash to render a page. '''
db_session = worker.get_session()
splash_url = get_config(db_session, 'splash_url', required=True).value
splash_user = get_config(db_session, 'splash_user',
required=True).value
splash_pass = get_config(db_session, 'splash_password',
required=True).value
auth = (splash_user, splash_pass)
splash_headers = {'content-type': 'application/json'}
if 'user-agent' not in [header.lower() for
header in headers.keys()]:
headers['user-agent'] = USER_AGENT
payload = {
'url': target_url,
'html': 1,
'jpeg': 1,
'har': 1,
'history': 1,
'timeout': request_timeout,
'resource_timeout': 5,
'headers': headers
}
splash_response = requests.post(
urljoin(splash_url, 'render.json'),
headers=splash_headers,
json=payload,
auth=auth
)
return splash_response
def _splash_username_request(username, site, request_timeout):
"""
Ask splash to render a `username` search
result for `site`.
"""
target_url = site.get_url(username)
if site.headers is None:
site.headers = {}
splash_response = splash_request(target_url,
site.headers,
request_timeout)
result = {
'code': splash_response.status_code,
'error': None,
'image': None,
'site': site.as_dict(),
'url': target_url,
}
splash_data = splash_response.json()
try:
splash_response.raise_for_status()
if _check_splash_response(site, splash_response, splash_data):
result['status'] = 'f'
else:
result['status'] = 'n'
result['image'] = splash_data['jpeg']
except Exception as e:
result['status'] = 'e'
result['error'] = str(e)
return result
def _check_splash_response(site, splash_response, splash_data):
"""
Parse response and test against site criteria to determine
whether username exists. Used with requests response object.
"""
sel = parsel.Selector(text=splash_data['html'])
status_ok = True
match_ok = True
if site.status_code is not None:
upstream_status = splash_data['history'][0]['response']['status']
status_ok = site.status_code == upstream_status
if site.match_expr is not None:
if site.match_type == 'css':
match_ok = len(sel.css(site.match_expr)) > 0
elif site.match_type == 'text':
text_nodes = sel.css(':not(script):not(style)::text').extract()
text = ''
for text_node in text_nodes:
stripped = text_node.strip()
if stripped != '':
text += stripped + ' '
match_ok = site.match_expr in text
elif site.match_type == 'xpath':
match_ok = len(sel.xpath(site.match_expr)) > 0
else:
raise ValueError('Unknown match_type: {}'.format(site.match_type))
return status_ok and match_ok
def _save_image(db_session, scrape_result):
""" Save the image returned by Splash to a local file. """
if scrape_result['error'] is None:
image_name = '{}.jpg'.format(scrape_result['site']['name'])
content = base64.decodestring(scrape_result['image'].encode('utf8'))
image_file = File(name=image_name,
mime='image/jpeg',
content=content)
db_session.add(image_file)
try:
db_session.commit()
except:
db_session.rollback()
raise ScrapeException('Could not save image')
else:
# Get the generic error image.
image_file = (
db_session
.query(File)
.filter(File.name == 'hgprofiler_error.png')
.one()
)
return image_file
|
988,931 | 0f4bf478920604b4f994fdba7a77b643181f435d |
from openpyxl import Workbook
from openpyxl import Workbook
from openpyxl.compat import range
from openpyxl.cell import get_column_letter
from openpyxl import load_workbook
import time
import math
from openpyxl.styles import colors
from openpyxl.styles import Font, Color
from openpyxl.styles import colors
from constant import compoundNameTitle, libraryScoreTitle, rtMeasuredTitle,\
NAValue, measuredAreaTitle, ipTitle, lsTitle, mzDeltaTitle
def loadData(fileName, logger):
#print "start load data %s" % time.clock()
logger.info("Start loading data: " + fileName)
wholeWorkBook = {}
inputDataDict = load_workbook(fileName)
#print "end load data %s" % time.clock()
logger.info("Finish loading data: " + fileName)
sheetNames = inputDataDict.get_sheet_names()
#print sheetNames
sheetNames.sort()
for currentSheetName in sheetNames:
#print "load sheet Name %s: %s" % (currentSheetName, time.clock())
logger.info("Start to load sheet data: " + currentSheetName)
sheetData = inputDataDict.get_sheet_by_name(currentSheetName)
rows = sheetData.rows;
columns = sheetData.columns;
rowsNum = len(rows)
columsNum = len(columns)
cnt = 0
sheetRowTitleValue = []
sheetColValue = []
#print "Data in ", currentSheetName, ":\n"
for row in rows:
for colValue in row:
if(cnt < columsNum):
sheetRowTitleValue.append(colValue.value)
else:
sheetColValue.append(colValue.value)
cnt = cnt + 1
wholeColumData = []
for i in range(columsNum):
columnData = []
for j in range(rowsNum - 1):
index = j * columsNum + i;
#print index
columnData.append(sheetColValue[index])
wholeColumData.append(columnData)
#print wholeColumData
sheetDataFinal = {}
for i in range(columsNum):
sheetDataFinal[sheetRowTitleValue[i]] = wholeColumData[i]
#print sheetDataFinal
wholeWorkBook[currentSheetName] = sheetDataFinal
return wholeWorkBook
def extractPartData(wordBook, rows = 6, output="partial.xlsx"):
wb = Workbook()
#remove default worksheet
wb.remove_sheet(wb.get_sheet_by_name("Sheet"))
#wb.remove_sheet("Sheet")
sheetNames = list(wordBook)
sheetNames.sort()
for currentSheetName in sheetNames:
ws = wb.create_sheet(title=currentSheetName)
sheetData = wordBook[currentSheetName]
sheetTitle = list(sheetData)
print "sheet title = ", sheetTitle
ws.append(sheetTitle)
print "Data in sheet: ", currentSheetName, ":\n"
allColumnData = []
for currentTitle in sheetTitle:
columnData = sheetData[currentTitle]
allColumnData.append(columnData)
rowsNum = len(allColumnData[0])
columnsNum = len(sheetTitle)
print rowsNum, columnsNum
#for debug file quan.xlsx
rowsNum = rows
for i in range(columnsNum):
for j in range(rowsNum):
ws.cell(row = j + 2, column = i + 1, value=allColumnData[i][j])
#print columnData
wb.save(filename=output)
def createOutputWordBook():
wb = Workbook()
#remove default worksheet
wb.remove_sheet(wb.get_sheet_by_name("Sheet"))
return wb
def writeWordBook(wordbook, output="newbook.xlsx"):
wordbook.save(filename = output)
def writeDataToColumn(wordBook, sheetName, data, columnIndex, rowStartIndex):
ws = wordBook.get_sheet_by_name(sheetName)
for i in range(len(data)):
ws.cell(row = rowStartIndex + i, column = columnIndex, value = data[i])
def writeDataToRow(wordBook, sheetName, data, rowIndex, columnStartIndex):
ws = wordBook.get_sheet_by_name(sheetName)
for i in range(len(data)):
ws.cell(row = rowIndex, column = columnStartIndex + i, value = data[i])
def highLightCell(wordBook, sheetName, rtValue, data, rowIndex, columnStartIndex):
ws = wordBook.get_sheet_by_name(sheetName)
ft = Font(color=colors.RED)
# if(diff >= 0.2):
# ws2.cell(row = i + 2, column = columnIndex).font = ft
for i in range(len(data)):
if(data[i] == ""):
continue
diff = math.fabs(data[i] - rtValue)
if(diff >= 0.2):
ws.cell(row = rowIndex, column = columnStartIndex + i).font = ft
def extractMZExpectData(screenDataBook):
mzExpectInfo = {}
sheetNames = list(screenDataBook)
compoundNameTitle = "Compound Name"
mzExpectedTitle = "m/z (Expected)"
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
mzExpectData = sheetData[mzExpectedTitle]
for i in range(len(compoundNameData)):
mzExpectInfo[compoundNameData[i]] = mzExpectData[i]
return mzExpectInfo
def extractFormulaData(screenDataBook):
formulaInfo = {}
sheetNames = list(screenDataBook)
compoundNameTitle = "Compound Name"
formulaTitle = "Formula"
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
formulaData = sheetData[formulaTitle]
for i in range(len(compoundNameData)):
formulaInfo[compoundNameData[i]] = formulaData[i]
return formulaInfo
def extractLibMatchNameData(screenDataBook):
libMatchNameInfo = {}
sheetNames = list(screenDataBook)
compoundNameTitle = "Compound Name"
libMatchNameTitle = "Lib Match Name"
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
libMatchNameData = sheetData[libMatchNameTitle]
for i in range(len(compoundNameData)):
libMatchNameInfo[compoundNameData[i]] = libMatchNameData[i]
return libMatchNameInfo
def extractLSMARTData(screenDataBook):
rtLibraryScoreInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
rtLibraryScoreInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
libraryScoreData = sheetData.get(libraryScoreTitle)
rtMeasuredData = sheetData[rtMeasuredTitle]
measuredAreaData = sheetData[measuredAreaTitle]
for i in range(len(compoundNameData)):
mzDeltaValue = libraryScoreData[i]
if(mzDeltaValue == "N/A"):
mzDeltaValue = NAValue
rtLibraryScoreInfo[compoundNameData[i]].append([currSheetName, mzDeltaValue, measuredAreaData[i],rtMeasuredData[i]])
return rtLibraryScoreInfo
def isAllNA(data):
flag = True
for i in range(len(data)):
if(data[i] != NAValue):
flag = False
break
return flag
def extractCPLSData(screenDataBook):
cplsInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
cplsInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
libraryScoreData = sheetData.get(libraryScoreTitle)
for i in range(len(compoundNameData)):
mzDeltaValue = libraryScoreData[i]
if(mzDeltaValue == "N/A"):
mzDeltaValue = NAValue
cplsInfo[compoundNameData[i]].append([currSheetName, mzDeltaValue])
return cplsInfo
def extractIPData(screenDataBook):
ipInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
ipInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
ipData = sheetData.get(ipTitle)
for i in range(len(compoundNameData)):
ipValue = ipData[i]
ipInfo[compoundNameData[i]].append([currSheetName, ipValue])
return ipInfo
def isContainPass(data):
flag = False
for i in range(len(data)):
if(data[i] == "Pass"):
flag = True
break
return flag
def extractLSData(screenDataBook):
lsInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
lsInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
lsData = sheetData.get(lsTitle)
for i in range(len(compoundNameData)):
lsValue = lsData[i]
lsInfo[compoundNameData[i]].append([currSheetName, lsValue])
return lsInfo
def extractRTMeasuredData(screenDataBook):
rtMeasuredInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
rtMeasuredInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
rtMeasuredData = sheetData.get(rtMeasuredTitle)
for i in range(len(compoundNameData)):
mzMeasuredValue = rtMeasuredData[i]
rtMeasuredInfo[compoundNameData[i]].append([currSheetName, mzMeasuredValue])
return rtMeasuredInfo
def extractMZDeltaData(screenDataBook):
mzDeltaInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
mzDeltaInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
mzDeltaData = sheetData.get(mzDeltaTitle)
for i in range(len(compoundNameData)):
mzDeltaValue = mzDeltaData[i]
mzDeltaInfo[compoundNameData[i]].append([currSheetName, mzDeltaValue])
return mzDeltaInfo
def isSheetInList(inputSheetName, data):
flag = False
for [sheetName, maValue] in data:
if(inputSheetName == sheetName):
flag = True
break
return flag
def extractMeasuredAreaData(screenDataBook):
measuredAreaInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
measuredAreaInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
measuredAreaData = sheetData.get(measuredAreaTitle)
for i in range(len(compoundNameData)):
measuredAreaValue = measuredAreaData[i]
measuredAreaInfo[compoundNameData[i]].append([currSheetName, measuredAreaValue])
#measuredAreaInfo[compoundNameData[i]].append({currSheetName:measuredAreaValue})
#padding compound name value that not in that sheet
cpNames = list(measuredAreaInfo)
numOfSheetNames = len(sheetNames)
for currCPName in cpNames:
for currSheetName in sheetNames:
snmaList = measuredAreaInfo[currCPName]#.append({currSheetName:""})
if(len(snmaList) == numOfSheetNames):
continue
flag = isSheetInList(currSheetName, snmaList)
if(flag == False):
measuredAreaInfo[currCPName].append([currSheetName, ""])
return measuredAreaInfo
def extractALLRTMeasuredData(screenDataBook):
rtmInfo = {}
sheetNames = list(screenDataBook)
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
for i in range(len(compoundNameData)):
rtmInfo[compoundNameData[i]] = []
for currSheetName in sheetNames:
sheetData = screenDataBook[currSheetName]
compoundNameData = sheetData[compoundNameTitle]
rtmData = sheetData.get(rtMeasuredTitle)
for i in range(len(compoundNameData)):
measuredAreaValue = rtmData[i]
rtmInfo[compoundNameData[i]].append([currSheetName, measuredAreaValue])
#rtmInfo[compoundNameData[i]].append({currSheetName:measuredAreaValue})
#padding compound name value that not in that sheet
cpNames = list(rtmInfo)
numOfSheetNames = len(sheetNames)
for currCPName in cpNames:
for currSheetName in sheetNames:
snrtmList = rtmInfo[currCPName]#.append({currSheetName:""})
if(len(snrtmList) == numOfSheetNames):
continue
flag = isSheetInList(currSheetName, snrtmList)
if(flag == False):
rtmInfo[currCPName].append([currSheetName, ""])
return rtmInfo
def printDict(data):
keys = list(data)
keys.sort()
for key in keys:
print key, ":"
value = data[key]
for item in value:
print "====>", item
# wordDataBook = loadData("screen.xlsx")
# extractPartData(wordDataBook, 10, "small-screen2.xlsx")
def checkFileValid(dataBook, rowTitleList):
sheetNames = list(dataBook)
missingRowTitle = {}
missNum = 0
for sheetName in sheetNames:
missingRowTitle[sheetName] = []
sheetData = dataBook[sheetName]
for title in rowTitleList:
if(sheetData.has_key(title)):
continue
else:
missNum = missNum + 1
missingRowTitle[sheetName].append(title)
return [missNum, missingRowTitle]
def getCurrTime():
st = time.localtime()
year = st.tm_year
month = st.tm_mon
day = st.tm_mday
hour = st.tm_hour
miniute = st.tm_min
sec = st.tm_sec
strTime = str(year) + "-" + str("%02d" % month) + "-" + str("%02d" % day) + "-" + str("%02d" % hour) + ":" + str("%02d" % miniute) + ":" + str("%02d" % sec)
return strTime
|
988,932 | 1a8bddfee49e44e78ef81547b00e41cf8aeed1c2 | # coding: utf-8
from Text_CNN import TextCNN
import numpy as np
import tensorflow as tf
from sklearn.metrics import f1_score, roc_auc_score
import os
def predict(filter_sizes, num_filters, num_classes, learning_rate, batch_size, decay_steps, decay_rate,
sequence_length, vocab_size, embed_size, X_test, y_test, train_epochs,
initializer=tf.random_normal_initializer(stddev=0.1), multi_label_flag=False,
clip_gradients=5.0, decay_rate_big=0.50, dropout=1.0, char_embed_matrix=None):
with tf.Session() as sess:
# Instantiate model
text_CNN = TextCNN(filter_sizes, num_filters, num_classes, learning_rate, batch_size, decay_steps, decay_rate,
sequence_length, vocab_size, embed_size,
initializer=tf.random_normal_initializer(stddev=0.1),
multi_label_flag=False, clip_gradients=5.0, decay_rate_big=0.50)
saver_path = os.getcwd() + '\checkpoint'
saver = tf.train.Saver(max_to_keep=5)
model_file = tf.train.latest_checkpoint(saver_path)
saver.restore(sess, model_file)
print('Start Testing...')
feed_dict = {text_CNN.input_x: X_test, text_CNN.input_y: y_test, text_CNN.dropout_keep_prob: dropout,
text_CNN.char_embed_matrix: char_embed_matrix, text_CNN.train_iteration: train_epochs,
text_CNN.is_train: None}
acc_test, logits_test = sess.run([text_CNN.accuracy, text_CNN.logits], feed_dict=feed_dict)
y_pred = np.argmax(logits_test, 1)
f1_test = f1_score(y_test, y_pred, average='weighted', labels=np.unique(y_test))
# auc_test = roc_auc_score(y_true, y_pred, average='weighted')
print('The test accuracy / f1 : {0[0]:.2%} / {0[1]:.4f}'.format((acc_test, f1_test)))
if __name__ == '__main__':
fold_path = os.getcwd() + '\\related_data'
# Load test data
lst = ['\X_test.npy', '\y_test.npy']
X_test, y_test = (np.load(fold_path + name) for name in lst)
print(len(set(y_test)))
# Load pre-trained word_embedding
char_embed_path = fold_path + '\char_embed_matrix.npy'
if os.path.exists(char_embed_path):
char_embed_matrix = np.load(char_embed_path)
else:
wv_path = fold_path + '\wiki_100_utf8.txt'
vocab, embed = utils.load_pretrained_wordvector(wv_path)
char_embed_matrix = np.asarray(embed, dtype='float32')
np.save(char_embed_path, char_embed_matrix)
predict(filter_sizes=[3, 4, 5], num_filters=[200, 200, 200], num_classes=78, learning_rate=0.001, batch_size=64,
decay_steps=0, decay_rate=0, sequence_length=120, vocab_size=16116, embed_size=100, X_test=X_test,
y_test=y_test, train_epochs=1, initializer=tf.random_normal_initializer(stddev=0.1), multi_label_flag=False,
clip_gradients=5.0, decay_rate_big=0.50, dropout=1.0, char_embed_matrix=char_embed_matrix) |
988,933 | e0fce33aee150656b6dd5ff681ba064a7a4c94a4 | import json
import sys, argparse
import os
import webbrowser
from plotly.offline import plot
import plotly.graph_objs as go
import plotly.figure_factory as ff
import HelperMethods
import Graphs
# OutputData Settings
Heatmap_Color = [[0.0, 'rgb(255,23,68)'], [0.5, 'rgb(255,234,0)'], [1.0, 'rgb(0,230,118)']]
Heatmap_MinVal = 80
Recombination_Colors = ['rgb(0,176,255)', 'rgb(255,23,68)']
Tree_Image_Size = 720
# Intialize variables
DataFolder = ""
OutputFolder = ""
# Read command line args
parser=argparse.ArgumentParser()
parser.add_argument('--i')
parser.add_argument('--out')
args=parser.parse_args()
DataFolder = args.i
OutputFolder = args.out
#DataFolder = r"C:\Users\Rylan\source\repos\SequenceAnalysis\PRRSAnalysis\bin\Debug\_TempData" # temp
#OutputFolder = r"C:\Users\Rylan\Documents\SequenceAnalysisProgram\Output\Test" # temp
# Get data variables
Sequences = HelperMethods.readJson(DataFolder + "/Sequences.json")
PercentIdentityData = HelperMethods.readJson(DataFolder + "/PercentIdentities.json")
RecombinationData = HelperMethods.readJson(DataFolder + "/Recombination.json")
AnalysisNames = HelperMethods.readJson(DataFolder + "/AnalysisNames.json")
Trees = HelperMethods.readJson(DataFolder + "/Trees.json")
# Create output folders
HelperMethods.createDir(OutputFolder)
HelperMethods.createDir(OutputFolder + "/PercentIdentity_Heatmaps/")
HelperMethods.createDir(OutputFolder + "/PhyloGeneticTrees/")
HelperMethods.createDir(OutputFolder + "/ReportParts/")
## Non Report Items ##
# Heatmaps
size = len(Sequences)*50
if size > 800:
size = 800
elif size < 600:
size = 600
for analysisName, data in PercentIdentityData.items():
#s = HelperMethods.removeVaccines(data["Sequences"])
p = ff.create_annotated_heatmap(z=data["Data"], y=data["Sequences"], x=data["Sequences"], colorscale=Heatmap_Color, zmin=Heatmap_MinVal,
zmax=100, hoverinfo = "none")
p.layout.update(autosize=True, width=size, height=size, margin=go.Margin(l=250,r=100,b=200,t=50,pad=4))
plot(p, filename=OutputFolder + "/PercentIdentity_Heatmaps/" + analysisName + ".html", auto_open=False, config={'showLink': False, 'displayModeBar': False })
# Phylogetetic Trees
for name, value in Trees.items():
Graphs.CreatePhyloGeneticTree(value["NewickFile"], OutputFolder + "/PhyloGeneticTrees/" + name + "_tree.png", Tree_Image_Size)
## Report Items ##
# Orf Bar Plots
''' orfData = []
layout = None
#orfAnnotations = {}
rangeD = {}
prevousRange = 0
seqs = []
vis = True
for sequence in Sequences.keys():
if not Sequences[sequence]["Vaccine"]:
data, layout, annotations = Graphs.StackedSequenceGraph(PercentIdentityData, sequence, AnalysisNames, Sequences,
title="Amino Acid Percent Identity Comparisons", visible=vis)
if vis:
vis = False
orfData += data
orfAnnotations[sequence] = annotations
rangeD[sequence] = range(prevousRange, prevousRange + len(data))
prevousRange += len(data)
seqs.append(sequence)
dropdown = Graphs.CreateDropDown(seqs, len(orfData), rangeD, orfAnnotations)
layout['updatemenus'] = dropdown
fig_orfDropdown = go.Figure(data=orfData, layout=layout)
# Vaccine Orf Bar Plots
fig_vaccines = []
for sequence in Sequences.keys():
if Sequences[sequence]["Vaccine"]:
data, layout, annotations = Graphs.StackedSequenceGraph(PercentIdentityData, sequence, AnalysisNames, Sequences,
title=(sequence + " Comparison"))
layout['annotations'] = annotations
fig_vaccines.append(go.Figure(data=data, layout=layout))'''
# Orf Vaccine Graphs
fig_vaccines_n = []
fig_vaccines_a = []
vac_height = 0
for sequence in Sequences.keys():
if Sequences[sequence]["Vaccine"]:
fig_a, fig_n, vac_height = Graphs.CreateOrfPlot(PercentIdentityData, sequence, Sequences, Heatmap_Color, Heatmap_MinVal, AnalysisNames,
title = sequence)
fig_vaccines_a.append(fig_a)
fig_vaccines_n.append(fig_n)
# Orf Plot
orfData_n = []
orfData_a = []
layouts_n = []
layouts_a = []
layout_n = None
layout_a = None
rangeD_n = {}
prevousRange = 0
seqs = []
vis = True
for sequence in Sequences.keys():
if not Sequences[sequence]["Vaccine"]:
fig_a, fig_n, orf_height = Graphs.CreateOrfPlot(PercentIdentityData, sequence, Sequences, Heatmap_Color, Heatmap_MinVal,
AnalysisNames, title = "")
orfData_n += (fig_n['data'])
orfData_a += (fig_a['data'])
layouts_n.append(fig_n['layout'])
layouts_a.append(fig_a['layout'])
seqs.append(sequence)
layout_n = fig_n['layout']
layout_a = fig_a['layout']
orf_height = 75*len(seqs) + 75
if orf_height < 350:
orf_height = 350
dropdown_n = Graphs.CreateNewDropDown(seqs, layouts_n)
layout_n['updatemenus'] = dropdown_n
layout_n['height'] = orf_height
layout_n['title'] = "Nucleotide Comparison"
layout_n['margin']['t'] = 200
fig_orfDropdown_n = go.Figure(data=orfData_n, layout=layout_n)
dropdown_a = Graphs.CreateNewDropDown(seqs, layouts_a)
layout_a['updatemenus'] = dropdown_a
layout_a['height'] = orf_height
layout_a['title'] = "Amino Acid Comparison"
layout_a['margin']['t'] = 200
fig_orfDropdown_a = go.Figure(data=orfData_a, layout=layout_a)
# Recombination Graph
fig_recombination = None
if(len(RecombinationData) > 0):
fig_recombination = Graphs.CreateRecombinationGraph(RecombinationData, Recombination_Colors, Sequences, title="Recombination Sites")
# Heatmaps
s = HelperMethods.removeVaccines(PercentIdentityData["Wholegenome"]["Sequences"])
fig_Heatmap_Wholegenome = ff.create_annotated_heatmap(z=PercentIdentityData["Wholegenome"]["Data"], y=s,
x=s, colorscale=Heatmap_Color, zmin=Heatmap_MinVal, zmax=100, hoverinfo = "none")
fig_Heatmap_Wholegenome.layout.update(title="Whole Genome Nucleotide Heatmap", width=size, height=size,
margin=go.Margin(l=200,r=100,b=200,t=50,pad=4), xaxis=dict(side='bottom'))
try:
s = HelperMethods.removeVaccines(PercentIdentityData["Orf2b-Orf5a_aa"]["Sequences"])
fig_Heatmap_orf2b5a = ff.create_annotated_heatmap(z=PercentIdentityData["Orf2b-Orf5a_aa"]["Data"], y=s,
x=s, colorscale=Heatmap_Color, zmin=Heatmap_MinVal, zmax=100, hoverinfo = "none")
fig_Heatmap_orf2b5a.layout.update(title="Orf2b through Orf5a Amino Acid Heatmap", width=size, height=size,
margin=go.Margin(l=200,r=100,b=200,t=50,pad=4), xaxis=dict(side='bottom'))
except:
fig_Heatmap_orf2b5a = None
# Create Plots
html_recombination = None;
html_orfDropdown_n = plot(fig_orfDropdown_n, filename=OutputFolder + "/ReportParts/orfgraph_n.html", auto_open=False, config={'showLink': False, 'displayModeBar': False})
html_orfDropdown_a = plot(fig_orfDropdown_a, filename=OutputFolder + "/ReportParts/orfgraph_a.html", auto_open=False, config={'showLink': False, 'displayModeBar': False})
html_vaccinePlots_n = []
html_vaccinePlots_a = []
for i,f in enumerate(fig_vaccines_n):
html_vaccinePlots_n.append(plot(f, filename=OutputFolder + "/ReportParts/vaccine_n" + str(i+1) + ".html", auto_open=False, config={'showLink': False, 'displayModeBar': False}))
for i,f in enumerate(fig_vaccines_a):
html_vaccinePlots_a.append(plot(f, filename=OutputFolder + "/ReportParts/vaccine_a" + str(i+1) + ".html", auto_open=False, config={'showLink': False, 'displayModeBar': False}))
html_heatmap_wholegenome = plot(fig_Heatmap_Wholegenome, filename=OutputFolder + "/ReportParts/heatmap_wholegenome.html", auto_open=False, config={'showLink': False, 'displayModeBar': False})
html_heatmap_orf2b5a = plot(fig_Heatmap_orf2b5a, filename=OutputFolder + "/ReportParts/heatmap_orf2b5a.html" , auto_open=False, config={'showLink': False, 'displayModeBar': False})
if(fig_recombination != None):
html_recombination = plot(fig_recombination, filename=OutputFolder + "/ReportParts/recombination.html" , auto_open=False, config={'showLink': False, 'displayModeBar': False})
# Add to html
recomb_height = len(RecombinationData)*125
if recomb_height < 300:
recomb_height = 300
html_string = Graphs.InitalizeHtmlString()
for str in html_vaccinePlots_n:
html_string += Graphs.CreateHtmlPlotString(str, width='33%', height=vac_height, min_width=500)
for str in html_vaccinePlots_a:
html_string += Graphs.CreateHtmlPlotString(str, width='33%', height=vac_height, min_width=500)
html_string += Graphs.CreateHtmlPlotString(html_orfDropdown_n, width='50%', height=orf_height)
html_string += Graphs.CreateHtmlPlotString(html_orfDropdown_a, width='50%', height=orf_height)
html_string += Graphs.CreateHtmlPlotString(html_heatmap_wholegenome, width='50%', padding_top='50%', min_width=size, height=size)
html_string += Graphs.CreateHtmlPlotString(html_heatmap_orf2b5a, width='50%', padding_top='50%', min_width=size, height=size)
html_string += Graphs.CreateHtmlPlotString(html_recombination, width=800, height=recomb_height, min_width=800)
html_string += Graphs.CreateImageHtmlString(OutputFolder + "\PhyloGeneticTrees\Wholegenome_tree.png", width=600, height='auto',
title='Whole Genome Phylogenetic Tree', min_width=600)
html_string += Graphs.EndHtmlString()
f = open(OutputFolder + "/Report.html",'w')
f.write(html_string)
f.close()
webbrowser.open('file://' + os.path.realpath(OutputFolder + "/Report.html"))
# Mobile Report
'''html_string = Graphs.InitalizeHtmlString()
#html_string += Graphs.CreateHtmlPlotString(html_orfDropdown, width='100%', height=bar_height)
for str in html_vaccinePlots:
html_string += Graphs.CreateHtmlPlotString(str, width='100%', height=bar_height)
html_string += Graphs.CreateHtmlPlotString(html_heatmap_wholegenome, width='100%', height=size, min_width=size)
html_string += Graphs.CreateHtmlPlotString(html_heatmap_orf2b5a, width='100%', height=size, min_width=size)
html_string += Graphs.CreateHtmlPlotString(html_recombination, width='100%', height=recomb_height, min_width=200)
html_string += Graphs.CreateImageHtmlString(OutputFolder + "\PhyloGeneticTrees\Wholegenome_tree.png", width='100%', height='auto',
title='Whole Genome Phylogenetic Tree', min_width=200)
html_string += Graphs.EndHtmlString()
f = open(OutputFolder + "/Report_Mobile.html",'w')
f.write(html_string)
f.close()'''
|
988,934 | 6e62369f40f41ff529083d0ed83938c6576d0973 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# $Id: admin.py 433 2009-07-14 04:10:28Z tobias $
# ----------------------------------------------------------------------------
#
# Copyright (C) 2008 Caktus Consulting Group, LLC
#
# This file is part of minibooks.
#
# minibooks is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# minibooks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with minibooks. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from crm import models as crm
class BusinessTypeAdmin(admin.ModelAdmin):
pass
admin.site.register(crm.BusinessType, BusinessTypeAdmin)
class RelationshipType(admin.ModelAdmin):
list_display = ('name', 'slug',)
admin.site.register(crm.RelationshipType, RelationshipType)
class InteractionAdmin(admin.ModelAdmin):
pass
admin.site.register(crm.Interaction, InteractionAdmin)
def send_account_activation_email(modeladmin, request, queryset):
selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME)
selected = ["ids=%d" % pk for pk in selected]
url = reverse('create_registration')
return HttpResponseRedirect("%s?%s" % (
url,
"&".join(selected)
))
class ContactAdmin(admin.ModelAdmin):
search_fields = ('first_name', 'last_name', 'name', 'email')
raw_id_fields = ('user', 'locations')
list_display = ('id', 'type', 'name', 'first_name', 'last_name', 'email', 'external_id')
list_filter = ('type',)
order_by = ('sortname',)
actions = [send_account_activation_email]
admin.site.register(crm.Contact, ContactAdmin)
class LoginRegistrationAdmin(admin.ModelAdmin):
list_display = ('contact', 'date', 'activation_key', 'activated')
raw_id_fields = ('contact',)
list_filter = ('activated', 'date',)
order_by = ('date',)
admin.site.register(crm.LoginRegistration, LoginRegistrationAdmin)
class ContactRelationshipAdmin(admin.ModelAdmin):
list_display = ('id', 'from_contact', 'to_contact', 'start_date',
'end_date')
raw_id_fields = ('from_contact', 'to_contact')
list_filter = ('start_date', 'end_date',)
order_by = ('start_date',)
admin.site.register(crm.ContactRelationship, ContactRelationshipAdmin)
|
988,935 | 8ba39b19da7921775b579b20ac22808351235c35 | """Module to extract data from CMIP NetCDF data files.
This module wraps the area extraction funcionality from
`kcs.utils.coord`. It can run multiple processes in
parallel. Extracted datasets can be saved (by default) to disk, in
subdirectoriees named after the variable and area (given by a template
that follows Python formatted strings with variable names; the default
is given in the `TEMPLATE` constant).
The module can also be used as a executable module, with the `-m
kcs.extraction` option to the `python` executable.
"""
import sys
import argparse
import itertools
import logging
from ..utils.logging import setup as setup_logging
from ..utils.argparse import parser as kcs_parser
from ..utils.atlist import atlist
from ..config import read_config, default_config
from .core import calc
logger = logging.getLogger('extraction') # pylint: disable=invalid-name
def parse_args():
"""Parse the command line arguments"""
areas = list(default_config['areas'].keys())
class ListAreas(argparse.Action):
"""Helper class for argparse to list available areas and exit"""
def __call__(self, parser, namespace, values, option_string=None):
print("\n".join(areas))
parser.exit()
parser = argparse.ArgumentParser(parents=[kcs_parser],
conflict_handler='resolve')
parser.add_argument('files', nargs='+', help="Input files")
parser.add_argument('--area', action='append', required=True,
choices=areas, help="One or more area names")
parser.add_argument('--template',
help="Output path template, including subdirectory")
parser.add_argument('-v', '--verbosity', action='count',
default=0, help="Verbosity level")
parser.add_argument('-P', '--nproc', type=int, default=1,
help="Number of simultaneous processes")
parser.add_argument('--list-areas', action=ListAreas, nargs=0,
help="List availabe areas and quit")
parser.add_argument('--regrid', action='store_true',
help="Regrid the data (to a 1x1 deg. grid)")
parser.add_argument('--no-save-results', action='store_true',
help="Store the resulting extracted datasets on disk")
parser.add_argument('--no-average-area', action='store_true',
help="Don't average the extracted areas")
parser.add_argument('--tempdir')
parser.add_argument('--subdir-per-realization', action='store_true')
parser.add_argument('--ignore-common-warnings', action='store_true')
args = parser.parse_args()
setup_logging(args.verbosity)
read_config(args.config)
if args.template is None:
args.template = default_config['data']['extraction']['template']
args.save_result = not args.no_save_results
args.average_area = not args.no_average_area
args.area = {name: default_config['areas'][name] for name in args.area}
args.area = {key: None if value == 'global' else value for key, value in args.area.items()}
return args
def main():
"""DUMMY DOCSTRING"""
args = parse_args()
logger.debug("%s", " ".join(sys.argv))
logger.debug("Args: %s", args)
files = list(itertools.chain.from_iterable(atlist(fname) for fname in args.files))
calc(files, args.area, regrid=args.regrid,
save_result=args.save_result, average_area=args.average_area,
nproc=args.nproc, template=args.template,
tempdir=args.tempdir,
subdir_per_realization=args.subdir_per_realization,
ignore_common_warnings=args.ignore_common_warnings)
logger.debug("%s finished", sys.argv[0])
if __name__ == "__main__":
main()
|
988,936 | 1bb146cf5bc9d0992121a00b24773d58f6c31083 | import sys, os, numpy
from scipy.stats import mannwhitneyu, wilcoxon
from glbase3 import *
import matplotlib.pyplot as plot
sys.path.append('../../../')
import shared
def bundle_up_by_name(mode, all_genes, tes, _draw_hist=True):
# First I need to bundle them up by their name;
bundles = {}
for gene in all_genes:
#if gene['expression'] == 'depleted':
# continue
symbol = gene['name'].split(' ')[0].strip()
if symbol not in bundles:
bundles[symbol] = []
if gene['transcript_id'] in tes:
gene['doms'] = tes[gene['transcript_id']]['doms']
gene['TEs'] = True
else:
gene['TEs'] = False
gene['doms'] = []
# remove the genes that are only coding/non-coding
if mode != 'all' and gene['coding'] != mode:
continue
bundles[symbol].append(gene)
print(mode)
print('Found {0:,} genes'.format(len(bundles)))
bundles = {b: bundles[b] for b in bundles if len(bundles[b]) > 1}
genes_with_multiple_transcripts = len(bundles)
print('Found {0:,} genes with >1 transcript'.format(genes_with_multiple_transcripts))
transcript_variants_per_gene = [len(bundles[gene]) for gene in bundles]
# limit to 10+
transcript_variants_per_gene = [min(b, 20) for b in transcript_variants_per_gene]
# histogram;
if _draw_hist:
fig = plot.figure(figsize=[1.6,1.1])
ax = fig.add_subplot(111)
ax.hist(transcript_variants_per_gene, max(transcript_variants_per_gene)-1, range=(0, 20))
ax.set_xlim([-0.5, 21.5])
ax.set_xticks([1.5, 10, 19.5])
ax.set_xticklabels([2, 10, '>=20'])
[t.set_fontsize(6) for t in ax.get_yticklabels()]
[t.set_fontsize(6) for t in ax.get_xticklabels()]
fig.savefig('transcripts_per_gene-{0}.pdf'.format(mode))
return bundles
# Broad summary:
def process_bundles(bundle):
res_fcs = {}
ps = {}
gene_with_noTE_and_TE_transcript = 0
has_no_with_te_transcript = 0
has_no_nonte_transcript = 0
# FOR P calc:
tpms_withTE = {}
tpms_noTE = {}
for gene in bundle:
tpms_for_no_te = []
tpms_for_with_te = {}
for transcript in bundle[gene]:
if transcript['TEs']:
unq_tes = set([t['dom'] for t in transcript['doms']])
for te in unq_tes:
full_name = dfam_dict[te]
if full_name not in tpms_for_with_te:
tpms_for_with_te[full_name] = []
tpms_for_with_te[full_name].append(transcript['TPM'])
else: # No TE:
tpms_for_no_te.append(transcript['TPM'])
# Get FC:
# A few ways to do this, take the mean or the max
if not tpms_for_with_te:
has_no_with_te_transcript += 1
continue # No paired
if not tpms_for_no_te: # There is a few!
has_no_nonte_transcript += 1
continue
gene_with_noTE_and_TE_transcript += 1
for te in tpms_for_with_te:
fc = utils.fold_change(max(tpms_for_no_te), max(tpms_for_with_te[te]), pad=0.01) # correct way around
#print(te, max(tpms_for_no_te), max(tpms_for_with_te[te]), fc)
#fc = utils.fold_change(numpy.mean(tpms_for_no_te), numpy.mean(tpms_for_with_te[te]), pad=0.01)
# You need to think about this slightly odd way of generating a P value, but it basically keeps all genes in each category
# that are with or without a specific TE, and then does a MWU against that;
if te not in tpms_noTE:
tpms_noTE[te] = []
if te not in tpms_withTE:
tpms_withTE[te] = []
tpms_noTE[te] += tpms_for_no_te
tpms_withTE[te] += tpms_for_with_te[te]
if te not in res_fcs:
res_fcs[te] = []
res_fcs[te].append(fc)
# Figure out the P:
ps = {}
for te in tpms_withTE:
ps[te] = mannwhitneyu(tpms_noTE[te], tpms_withTE[te], alternative='two-sided')[1]
# Q value correct?
print('{0:,} genes without a non-TE transcript '.format(has_no_nonte_transcript))
print('{0:,} genes without a TE-containing transcript'.format(has_no_with_te_transcript))
print('Found {0:,} genes with at least 1 non-TE transcript and 1 TE-containing transcript'.format(gene_with_noTE_and_TE_transcript))
return res_fcs, ps
|
988,937 | 7af16e351166cde99e46bbd7f47f34417f29789f | """
Utility classes
---------------
"""
from __future__ import annotations
import typing as t
import typing_extensions as te
import warnings
from collections import namedtuple
__all__ = ['NameTitle', 'LabeledEnum', 'InspectableSet', 'classmethodproperty']
NameTitle = namedtuple('NameTitle', ['name', 'title'])
class _LabeledEnumMeta(type):
"""Construct labeled enumeration."""
def __new__(
mcs: t.Type, # noqa: N804
name: str,
bases: t.Tuple[t.Type, ...],
attrs: t.Dict[str, t.Any],
**kwargs: t.Any,
) -> t.Type[LabeledEnum]:
labels: t.Dict[str, t.Any] = {}
names: t.Dict[str, t.Any] = {}
for key, value in tuple(attrs.items()):
if key != '__order__' and isinstance(value, tuple):
# value = tuple of actual value (0), label/name (1), optional title (2)
if len(value) == 2:
labels[value[0]] = value[1]
attrs[key] = names[key] = value[0]
elif len(value) == 3:
labels[value[0]] = NameTitle(value[1], value[2])
attrs[key] = names[key] = value[0]
else: # pragma: no cover
raise AttributeError(f"Unprocessed attribute {key}")
elif key != '__order__' and isinstance(value, set):
# value = set of other unprocessed values
attrs[key] = names[key] = {
v[0] if isinstance(v, tuple) else v for v in value
}
if '__order__' in attrs:
warnings.warn(
"LabeledEnum.__order__ is obsolete in Python >= 3.6", stacklevel=2
)
attrs['__labels__'] = labels
attrs['__names__'] = names
return type.__new__(mcs, name, bases, attrs)
def __getitem__(cls, key: t.Union[str, tuple]) -> t.Any:
return cls.__labels__[key] # type: ignore[attr-defined]
def __contains__(cls, key: t.Union[str, tuple]) -> bool:
return key in cls.__labels__ # type: ignore[attr-defined]
class LabeledEnum(metaclass=_LabeledEnumMeta):
"""
Labeled enumerations.
Declarate an enumeration with values and labels (for use in UI)::
>>> class MY_ENUM(LabeledEnum):
... FIRST = (1, "First")
... THIRD = (3, "Third")
... SECOND = (2, "Second")
:class:`LabeledEnum` will convert any attribute that is a 2-tuple into a value and
label pair. Access values as direct attributes of the enumeration::
>>> MY_ENUM.FIRST
1
>>> MY_ENUM.SECOND
2
>>> MY_ENUM.THIRD
3
Access labels via dictionary lookup on the enumeration::
>>> MY_ENUM[MY_ENUM.FIRST]
'First'
>>> MY_ENUM[2]
'Second'
>>> MY_ENUM.get(3)
'Third'
>>> MY_ENUM.get(4) is None
True
Retrieve a full list of values and labels with ``.items()``. Definition order is
preserved::
>>> MY_ENUM.items()
[(1, 'First'), (3, 'Third'), (2, 'Second')]
>>> MY_ENUM.keys()
[1, 3, 2]
>>> MY_ENUM.values()
['First', 'Third', 'Second']
Three value tuples are assumed to be (value, name, title) and the name and title are
converted into NameTitle(name, title)::
>>> class NAME_ENUM(LabeledEnum):
... FIRST = (1, 'first', "First")
... THIRD = (3, 'third', "Third")
... SECOND = (2, 'second', "Second")
>>> NAME_ENUM.FIRST
1
>>> NAME_ENUM[NAME_ENUM.FIRST]
NameTitle(name='first', title='First')
>>> NAME_ENUM[NAME_ENUM.SECOND].name
'second'
>>> NAME_ENUM[NAME_ENUM.THIRD].title
'Third'
To make it easier to use with forms and to hide the actual values, a list of (name,
title) pairs is available::
>>> [tuple(x) for x in NAME_ENUM.nametitles()]
[('first', 'First'), ('third', 'Third'), ('second', 'Second')]
Given a name, the value can be looked up::
>>> NAME_ENUM.value_for('first')
1
>>> NAME_ENUM.value_for('second')
2
Values can be grouped together using a set, for performing "in" operations. These do
not have labels and cannot be accessed via dictionary access::
>>> class RSVP_EXTRA(LabeledEnum):
... RSVP_Y = ('Y', "Yes")
... RSVP_N = ('N', "No")
... RSVP_M = ('M', "Maybe")
... RSVP_U = ('U', "Unknown")
... RSVP_A = ('A', "Awaiting")
... UNCERTAIN = {RSVP_M, RSVP_U, 'A'}
>>> isinstance(RSVP_EXTRA.UNCERTAIN, set)
True
>>> sorted(RSVP_EXTRA.UNCERTAIN)
['A', 'M', 'U']
>>> 'N' in RSVP_EXTRA.UNCERTAIN
False
>>> 'M' in RSVP_EXTRA.UNCERTAIN
True
>>> RSVP_EXTRA.RSVP_U in RSVP_EXTRA.UNCERTAIN
True
Labels are stored internally in a dictionary named ``__labels__``, mapping the value
to the label. Symbol names are stored in ``__names__``, mapping name to the value.
The label dictionary will only contain values processed using the tuple syntax,
which excludes grouped values, while the names dictionary will contain both, but
will exclude anything else found in the class that could not be processed (use
``__dict__`` for everything)::
>>> list(RSVP_EXTRA.__labels__.keys())
['Y', 'N', 'M', 'U', 'A']
>>> list(RSVP_EXTRA.__names__.keys())
['RSVP_Y', 'RSVP_N', 'RSVP_M', 'RSVP_U', 'RSVP_A', 'UNCERTAIN']
"""
__labels__: t.ClassVar[t.Dict[t.Any, t.Any]]
__names__: t.ClassVar[t.Dict[str, t.Any]]
@classmethod
def get(cls, key: t.Any, default: t.Optional[t.Any] = None) -> t.Any:
"""Get the label for an enum value."""
return cls.__labels__.get(key, default)
@classmethod
def keys(cls) -> t.List[t.Any]:
"""Get all enum values."""
return list(cls.__labels__.keys())
@classmethod
def values(cls) -> t.List[t.Union[str, NameTitle]]:
"""Get all enum labels."""
return list(cls.__labels__.values())
@classmethod
def items(cls) -> t.List[t.Tuple[t.Any, t.Union[str, NameTitle]]]:
"""Get all enum values and associated labels."""
return list(cls.__labels__.items())
@classmethod
def value_for(cls, name: str) -> t.Any:
"""Get enum value given a label name."""
for key, value in list(cls.__labels__.items()):
if isinstance(value, NameTitle) and value.name == name:
return key
return None
@classmethod
def nametitles(cls) -> t.List[NameTitle]:
"""Get names and titles of labels."""
return [label for label in cls.values() if isinstance(label, tuple)]
_C = t.TypeVar('_C', bound=t.Collection)
class InspectableSet(t.Generic[_C]):
"""
InspectableSet provides an ``elem in set`` test via attribute or dictionary access.
For example, if ``permissions`` is an InspectableSet wrapping a regular `set`, a
test for an element in the set can be rewritten from ``if 'view' in permissions`` to
``if permissions.view``. The concise form improves readability for visual inspection
where code linters cannot help, such as in Jinja2 templates.
InspectableSet provides a read-only view to the wrapped data source. The mutation
operators ``+=``, ``-=``, ``&=``, ``|=`` and ``^=`` will be proxied to the
underlying data source, if supported, while the copy operators ``+``, ``-``, ``&``,
``|`` and ``^`` will be proxied and the result re-wrapped with InspectableSet.
If no data source is supplied to InspectableSet, an empty set is used.
::
>>> myset = InspectableSet({'member', 'other'})
>>> 'member' in myset
True
>>> 'random' in myset
False
>>> myset.member
True
>>> myset.random
False
>>> myset['member']
True
>>> myset['random']
False
>>> joinset = myset | {'added'}
>>> isinstance(joinset, InspectableSet)
True
>>> joinset = joinset | InspectableSet({'inspectable'})
>>> isinstance(joinset, InspectableSet)
True
>>> 'member' in joinset
True
>>> 'other' in joinset
True
>>> 'added' in joinset
True
>>> 'inspectable' in joinset
True
>>> emptyset = InspectableSet()
>>> len(emptyset)
0
"""
__slots__ = ('__members__',)
__members__: _C
def __init__(self, members: t.Union[_C, InspectableSet[_C], None] = None) -> None:
if isinstance(members, InspectableSet):
members = members.__members__
object.__setattr__(
self, '__members__', members if members is not None else set()
)
def __repr__(self) -> str:
return f'InspectableSet({self.__members__!r})'
def __hash__(self) -> int:
return hash(self.__members__)
def __contains__(self, key: t.Any) -> bool:
return key in self.__members__
def __iter__(self) -> t.Iterator:
yield from self.__members__
def __len__(self) -> int:
return len(self.__members__)
def __bool__(self) -> bool:
return bool(self.__members__)
def __getitem__(self, key: t.Any) -> bool:
return key in self.__members__ # Return True if present, False otherwise
def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:
"""Prevent accidental attempts to set a value."""
raise AttributeError(attr)
def __getattr__(self, attr: str) -> bool:
return attr in self.__members__ # Return True if present, False otherwise
def _op_bool(self, op: str, other: t.Any) -> bool:
"""Return result of a boolean operation."""
if hasattr(self.__members__, op):
if isinstance(other, InspectableSet):
other = other.__members__
return getattr(self.__members__, op)(other)
return NotImplemented
def __le__(self, other: t.Any) -> bool:
"""Return self <= other."""
return self._op_bool('__le__', other)
def __lt__(self, other: t.Any) -> bool:
"""Return self < other."""
return self._op_bool('__lt__', other)
def __eq__(self, other: t.Any) -> bool:
"""Return self == other."""
return self._op_bool('__eq__', other)
def __ne__(self, other: t.Any) -> bool:
"""Return self != other."""
return self._op_bool('__ne__', other)
def __gt__(self, other: t.Any) -> bool:
"""Return self > other."""
return self._op_bool('__gt__', other)
def __ge__(self, other: t.Any) -> bool:
"""Return self >= other."""
return self._op_bool('__ge__', other)
def _op_copy(self, op: str, other: t.Any) -> InspectableSet[_C]:
"""Return result of a copy operation."""
if hasattr(self.__members__, op):
if isinstance(other, InspectableSet):
other = other.__members__
retval = getattr(self.__members__, op)(other)
if retval is not NotImplemented:
return InspectableSet(retval)
return NotImplemented
def __add__(self, other: t.Any) -> InspectableSet[_C]:
"""Return self + other (add)."""
return self._op_copy('__add__', other)
def __radd__(self, other: t.Any) -> InspectableSet[_C]:
"""Return other + self (reverse add)."""
return self._op_copy('__radd__', other)
def __sub__(self, other: t.Any) -> InspectableSet[_C]:
"""Return self - other (subset)."""
return self._op_copy('__sub__', other)
def __rsub__(self, other: t.Any) -> InspectableSet[_C]:
"""Return other - self (reverse subset)."""
return self._op_copy('__rsub__', other)
def __and__(self, other: t.Any) -> InspectableSet[_C]:
"""Return self & other (intersection)."""
return self._op_copy('__and__', other)
def __rand__(self, other: t.Any) -> InspectableSet[_C]:
"""Return other & self (intersection)."""
return self._op_copy('__rand__', other)
def __or__(self, other: t.Any) -> InspectableSet[_C]:
"""Return self | other (union)."""
return self._op_copy('__or__', other)
def __ror__(self, other: t.Any) -> InspectableSet[_C]:
"""Return other | self (union)."""
return self._op_copy('__ror__', other)
def __xor__(self, other: t.Any) -> InspectableSet[_C]:
"""Return self ^ other (non-intersecting)."""
return self._op_copy('__xor__', other)
def __rxor__(self, other: t.Any) -> InspectableSet[_C]:
"""Return other ^ self (non-intersecting)."""
return self._op_copy('__rxor__', other)
def _op_inplace(self, op: str, other: t.Any) -> te.Self:
"""Return self after an inplace operation."""
if hasattr(self.__members__, op):
if isinstance(other, InspectableSet):
other = other.__members__
if getattr(self.__members__, op)(other) is NotImplemented:
return NotImplemented
return self
return NotImplemented
def __iadd__(self, other: t.Any) -> te.Self:
"""Operate self += other (list/tuple add)."""
return self._op_inplace('__iadd__', other)
def __isub__(self, other: t.Any) -> te.Self:
"""Operate self -= other (set.difference_update)."""
return self._op_inplace('__isub__', other)
def __iand__(self, other: t.Any) -> te.Self:
"""Operate self &= other (set.intersection_update)."""
return self._op_inplace('__iand__', other)
def __ior__(self, other: t.Any) -> te.Self:
"""Operate self |= other (set.update)."""
return self._op_inplace('__ior__', other)
def __ixor__(self, other: t.Any) -> te.Self:
"""Operate self ^= other (set.symmetric_difference_update)."""
return self._op_inplace('__isub__', other)
class classmethodproperty: # noqa: N801
"""
Class method decorator to make class methods behave like properties.
Usage::
>>> class Foo:
... @classmethodproperty
... def test(cls):
... return repr(cls)
...
Works on classes::
>>> Foo.test
"<class 'coaster.utils.classes.Foo'>"
Works on class instances::
>>> Foo().test
"<class 'coaster.utils.classes.Foo'>"
Works on subclasses too::
>>> class Bar(Foo):
... pass
...
>>> Bar.test
"<class 'coaster.utils.classes.Bar'>"
>>> Bar().test
"<class 'coaster.utils.classes.Bar'>"
Due to limitations in Python's descriptor API, :class:`classmethodproperty` can
block write and delete access on an instance...
::
>>> Foo().test = 'bar'
Traceback (most recent call last):
AttributeError: test is read-only
>>> del Foo().test
Traceback (most recent call last):
AttributeError: test is read-only
...but not on the class itself::
>>> Foo.test = 'bar'
>>> Foo.test
'bar'
"""
def __init__(self, func: t.Callable) -> None:
self.func = func
def __get__(self, _obj: t.Any, cls: t.Type) -> t.Any:
return self.func(cls)
def __set__(self, _obj: t.Any, _value: t.Any) -> t.NoReturn:
raise AttributeError(f"{self.func.__name__} is read-only")
def __delete__(self, _obj: t.Any) -> t.NoReturn:
raise AttributeError(f"{self.func.__name__} is read-only")
|
988,938 | cbe1f1af179dff15ba3a86f3ab06c09f0fdc3afe | import sys
from traceback import StackSummary
class Traceback(Exception):
__slots__ = ('tb',)
def __init__(self, tb):
self.tb = tb
def __str__(self):
return '\n\nTraceback (most recent call last):\n' + self.tb.rstrip()
def walk_tb(tb):
"""Walk a traceback yielding the frame and line number for each frame.
This will follow tb.tb_next (and thus is in the opposite order to
walk_stack). Usually used with StackSummary.extract.
"""
track = False
result = []
while tb is not None:
if track:
result.append((tb.tb_frame, tb.tb_lineno))
if '__log_tb_start__' in tb.tb_frame.f_locals:
result = []
track = True
tb = tb.tb_next
return result
def extract_log_tb(exc=None):
tb = ''.join(StackSummary.extract(walk_tb(sys.exc_info()[-1])).format())
if exc.__cause__ is not None and isinstance(exc.__cause__, Traceback):
tb = exc.__cause__.tb + tb
return tb
|
988,939 | a211fc0c920293c87210932fe7fa05ee39d42605 | TABLE = [ 0x39cb44b8, 0x23754f67, 0x5f017211, 0x3ebb24da, 0x351707c6, 0x63f9774b, 0x17827288, 0x0fe74821,
0x5b5f670f, 0x48315ae8, 0x785b7769, 0x2b7a1547, 0x38d11292, 0x42a11b32, 0x35332244, 0x77437b60,
0x1eab3b10, 0x53810000, 0x1d0212ae, 0x6f0377a8, 0x43c03092, 0x2d3c0a8e, 0x62950cbf, 0x30f06ffa,
0x34f710e0, 0x28f417fb, 0x350d2f95, 0x5a361d5a, 0x15cc060b, 0x0afd13cc, 0x28603bcf, 0x3371066b,
0x30cd14e4, 0x175d3a67, 0x6dd66a13, 0x2d3409f9, 0x581e7b82, 0x76526b99, 0x5c8d5188, 0x2c857971,
0x15f51fc0, 0x68cc0d11, 0x49f55e5c, 0x275e4364, 0x2d1e0dbc, 0x4cee7ce3, 0x32555840, 0x112e2e08,
0x6978065a, 0x72921406, 0x314578e7, 0x175621b7, 0x40771dbf, 0x3fc238d6, 0x4a31128a, 0x2dad036e,
0x41a069d6, 0x25400192, 0x00dd4667, 0x6afc1f4f, 0x571040ce, 0x62fe66df, 0x41db4b3e, 0x3582231f,
0x55f6079a, 0x1ca70644, 0x1b1643d2, 0x3f7228c9, 0x5f141070, 0x3e1474ab, 0x444b256e, 0x537050d9,
0x0f42094b, 0x2fd820e6, 0x778b2e5e, 0x71176d02, 0x7fea7a69, 0x5bb54628, 0x19ba6c71, 0x39763a99,
0x178d54cd, 0x01246e88, 0x3313537e, 0x2b8e2d17, 0x2a3d10be, 0x59d10582, 0x37a163db, 0x30d6489a,
0x6a215c46, 0x0e1c7a76, 0x1fc760e7, 0x79b80c65, 0x27f459b4, 0x799a7326, 0x50ba1782, 0x2a116d5c,
0x63866e1b, 0x3f920e3c, 0x55023490, 0x55b56089, 0x2c391fd1, 0x2f8035c2, 0x64fd2b7a, 0x4ce8759a,
0x518504f0, 0x799501a8, 0x3f5b2cad, 0x38e60160, 0x637641d8, 0x33352a42, 0x51a22c19, 0x085c5851,
0x032917ab, 0x2b770ac7, 0x30ac77b3, 0x2bec1907, 0x035202d0, 0x0fa933d3, 0x61255df3, 0x22ad06bf,
0x58b86971, 0x5fca0de5, 0x700d6456, 0x56a973db, 0x5ab759fd, 0x330e0be2, 0x5b3c0ddd, 0x495d3c60,
0x53bd59a6, 0x4c5e6d91, 0x49d9318d, 0x103d5079, 0x61ce42e3, 0x7ed5121d, 0x14e160ed, 0x212d4ef2,
0x270133f0, 0x62435a96, 0x1fa75e8b, 0x6f092fbe, 0x4a000d49, 0x57ae1c70, 0x004e2477, 0x561e7e72,
0x468c0033, 0x5dcc2402, 0x78507ac6, 0x58af24c7, 0x0df62d34, 0x358a4708, 0x3cfb1e11, 0x2b71451c,
0x77a75295, 0x56890721, 0x0fef75f3, 0x120f24f1, 0x01990ae7, 0x339c4452, 0x27a15b8e, 0x0ba7276d,
0x60dc1b7b, 0x4f4b7f82, 0x67db7007, 0x4f4a57d9, 0x621252e8, 0x20532cfc, 0x6a390306, 0x18800423,
0x19f3778a, 0x462316f0, 0x56ae0937, 0x43c2675c, 0x65ca45fd, 0x0d604ff2, 0x0bfd22cb, 0x3afe643b,
0x3bf67fa6, 0x44623579, 0x184031f8, 0x32174f97, 0x4c6a092a, 0x5fb50261, 0x01650174, 0x33634af1,
0x712d18f4, 0x6e997169, 0x5dab7afe, 0x7c2b2ee8, 0x6edb75b4, 0x5f836fb6, 0x3c2a6dd6, 0x292d05c2,
0x052244db, 0x149a5f4f, 0x5d486540, 0x331d15ea, 0x4f456920, 0x483a699f, 0x3b450f05, 0x3b207c6c,
0x749d70fe, 0x417461f6, 0x62b031f1, 0x2750577b, 0x29131533, 0x588c3808, 0x1aef3456, 0x0f3c00ec,
0x7da74742, 0x4b797a6c, 0x5ebb3287, 0x786558b8, 0x00ed4ff2, 0x6269691e, 0x24a2255f, 0x62c11f7e,
0x2f8a7dcd, 0x643b17fe, 0x778318b8, 0x253b60fe, 0x34bb63a3, 0x5b03214f, 0x5f1571f4, 0x1a316e9f,
0x7acf2704, 0x28896838, 0x18614677, 0x1bf569eb, 0x0ba85ec9, 0x6aca6b46, 0x1e43422a, 0x514d5f0e,
0x413e018c, 0x307626e9, 0x01ed1dfa, 0x49f46f5a, 0x461b642b, 0x7d7007f2, 0x13652657, 0x6b160bc5,
0x65e04849, 0x1f526e1c, 0x5a0251b6, 0x2bd73f69, 0x2dbf7acd, 0x51e63e80, 0x5cf2670f, 0x21cd0a03,
0x5cff0261, 0x33ae061e, 0x3bb6345f, 0x5d814a75, 0x257b5df4, 0x0a5c2c5b, 0x16a45527, 0x16f23945
]
DW = 0x100000000
def user_pro(username, a2, a3, a4):
v16 = 0
length = len(username)
i = 0
if length <= 0:
result = 0
else:
v13 = 0
v14 = 0
v7 = (15 * a4)%0x100
v15 = (17 * a3)%0x100
while i < length:
upperName_char = ord(username[i].upper())
v9 = (v16 + TABLE[upperName_char])%DW
if a2:
v10 = (TABLE[v7]\
+ TABLE[v15]\
+ TABLE[(upperName_char + 47)]\
* (TABLE[(upperName_char + 13)] ^ v9))%DW
result = (TABLE[v14] + v10)%DW
v16 = (TABLE[v14] + v10)%DW
else:
v12 = (TABLE[v7]\
+ TABLE[v15]\
+ TABLE[(upperName_char + 23)]\
* (TABLE[(upperName_char + 63)] ^ v9))%DW
result = (TABLE[v13] + v12)%DW
v16 = (TABLE[v13] + v12)%DW
v14+= 19
v14 %= 0x100
i+=1
v15 += 9
v15 %= 0x100
v7 += 13
v7 %= 0x100
v13 += 7
v13 %= 0x100
return result
from random import random as rnd
def guess_v8v9():
while True:
i = (int)(rnd()*0x10000)
if ((((i^0x7892)+19760)^0x3421)%0x10000)%11==0 and ((((i^0x7892)+19760)^0x3421)%0x10000)//11 <= 1000:
return i, ((((i^0x7892)+19760)^0x3421)%0x10000)//11
username = input('Name: ').strip()
p = [0]*8
p[3] = 0x9c
v8, v9 = guess_v8v9()
def calc():
user = user_pro(username, True, 0, v9)
p[4] = user%0x100
p[5] = (user>>8)%0x100
p[6] = (user>>16)%0x100
p[7] = (user>>24)%0x100
calc()
p[2] = p[5]^(v8%0x100)
p[1] = p[7]^(v8>>8)
while True:
p[0] = (int)(rnd()*256)
v10 = (((p[6]^p[0])^0x18+61)%0x100)^0xa7
if v10>=10: break
for i in range(8):
if p[i]<16:
p[i] = '0'+hex(p[i])[2:3].upper()
else:
p[i] = hex(p[i])[2:4].upper()
print('Password: %s%s-%s%s-%s%s-%s%s'%(p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7])) |
988,940 | 483328f3b2fc411526c9d12d06960e2224a1aa9b | """
Support Vector Machine for handwritten digit classification
SVMs are great for small datasets
Created by : Saranya Rajagopalan
Date : 17-02-2018
"""
import numpy as np
from matplotlib import pyplot as plt
x = np.array([
[-2, 4, -1],
[4, 1, -1],
[1, 6, 1],
[-2, 4, 1],
[2, 4, 1],
])
y = np.array([-1, -1, -1, 1, 1])
for d, sample in enumerate(x):
if d <=2:
plt.scatter(sample[0], sample[1], s=120, marker= '_', linewidths=2)
else:
plt.scatter(sample[0], sample[1], s=120, marker='+', linewidths=2)
plt.plot([-2, 6],[6, 0.5])
plt.show()
|
988,941 | 7dda29071ef8e9c076de7d7ead238e4b675231b5 |
# -*- coding: utf-8 -*-
"""
ORIGINAL PROGRAM SOURCE CODE:
1: import warnings
2: import functools
3:
4:
5: class MatplotlibDeprecationWarning(UserWarning):
6: '''
7: A class for issuing deprecation warnings for Matplotlib users.
8:
9: In light of the fact that Python builtin DeprecationWarnings are ignored
10: by default as of Python 2.7 (see link below), this class was put in to
11: allow for the signaling of deprecation, but via UserWarnings which are not
12: ignored by default.
13:
14: https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x
15: '''
16: pass
17:
18:
19: mplDeprecation = MatplotlibDeprecationWarning
20:
21:
22: def _generate_deprecation_message(since, message='', name='',
23: alternative='', pending=False,
24: obj_type='attribute',
25: addendum=''):
26:
27: if not message:
28:
29: if pending:
30: message = (
31: 'The %(name)s %(obj_type)s will be deprecated in a '
32: 'future version.')
33: else:
34: message = (
35: 'The %(name)s %(obj_type)s was deprecated in version '
36: '%(since)s.')
37:
38: altmessage = ''
39: if alternative:
40: altmessage = ' Use %s instead.' % alternative
41:
42: message = ((message % {
43: 'func': name,
44: 'name': name,
45: 'alternative': alternative,
46: 'obj_type': obj_type,
47: 'since': since}) +
48: altmessage)
49:
50: if addendum:
51: message += addendum
52:
53: return message
54:
55:
56: def warn_deprecated(
57: since, message='', name='', alternative='', pending=False,
58: obj_type='attribute', addendum=''):
59: '''
60: Used to display deprecation warning in a standard way.
61:
62: Parameters
63: ----------
64: since : str
65: The release at which this API became deprecated.
66:
67: message : str, optional
68: Override the default deprecation message. The format
69: specifier `%(name)s` may be used for the name of the function,
70: and `%(alternative)s` may be used in the deprecation message
71: to insert the name of an alternative to the deprecated
72: function. `%(obj_type)s` may be used to insert a friendly name
73: for the type of object being deprecated.
74:
75: name : str, optional
76: The name of the deprecated object.
77:
78: alternative : str, optional
79: An alternative function that the user may use in place of the
80: deprecated function. The deprecation warning will tell the user
81: about this alternative if provided.
82:
83: pending : bool, optional
84: If True, uses a PendingDeprecationWarning instead of a
85: DeprecationWarning.
86:
87: obj_type : str, optional
88: The object type being deprecated.
89:
90: addendum : str, optional
91: Additional text appended directly to the final message.
92:
93: Examples
94: --------
95:
96: Basic example::
97:
98: # To warn of the deprecation of "matplotlib.name_of_module"
99: warn_deprecated('1.4.0', name='matplotlib.name_of_module',
100: obj_type='module')
101:
102: '''
103: message = _generate_deprecation_message(
104: since, message, name, alternative, pending, obj_type)
105:
106: warnings.warn(message, mplDeprecation, stacklevel=1)
107:
108:
109: def deprecated(since, message='', name='', alternative='', pending=False,
110: obj_type=None, addendum=''):
111: '''
112: Decorator to mark a function or a class as deprecated.
113:
114: Parameters
115: ----------
116: since : str
117: The release at which this API became deprecated. This is
118: required.
119:
120: message : str, optional
121: Override the default deprecation message. The format
122: specifier `%(name)s` may be used for the name of the object,
123: and `%(alternative)s` may be used in the deprecation message
124: to insert the name of an alternative to the deprecated
125: object. `%(obj_type)s` may be used to insert a friendly name
126: for the type of object being deprecated.
127:
128: name : str, optional
129: The name of the deprecated object; if not provided the name
130: is automatically determined from the passed in object,
131: though this is useful in the case of renamed functions, where
132: the new function is just assigned to the name of the
133: deprecated function. For example::
134:
135: def new_function():
136: ...
137: oldFunction = new_function
138:
139: alternative : str, optional
140: An alternative object that the user may use in place of the
141: deprecated object. The deprecation warning will tell the user
142: about this alternative if provided.
143:
144: pending : bool, optional
145: If True, uses a PendingDeprecationWarning instead of a
146: DeprecationWarning.
147:
148: addendum : str, optional
149: Additional text appended directly to the final message.
150:
151: Examples
152: --------
153:
154: Basic example::
155:
156: @deprecated('1.4.0')
157: def the_function_to_deprecate():
158: pass
159:
160: '''
161:
162: def deprecate(obj, message=message, name=name, alternative=alternative,
163: pending=pending, addendum=addendum):
164: import textwrap
165:
166: if not name:
167: name = obj.__name__
168:
169: if isinstance(obj, type):
170: obj_type = "class"
171: old_doc = obj.__doc__
172: func = obj.__init__
173:
174: def finalize(wrapper, new_doc):
175: try:
176: obj.__doc__ = new_doc
177: except (AttributeError, TypeError):
178: # cls.__doc__ is not writeable on Py2.
179: # TypeError occurs on PyPy
180: pass
181: obj.__init__ = wrapper
182: return obj
183: else:
184: obj_type = "function"
185: if isinstance(obj, classmethod):
186: func = obj.__func__
187: old_doc = func.__doc__
188:
189: def finalize(wrapper, new_doc):
190: wrapper = functools.wraps(func)(wrapper)
191: wrapper.__doc__ = new_doc
192: return classmethod(wrapper)
193: else:
194: func = obj
195: old_doc = func.__doc__
196:
197: def finalize(wrapper, new_doc):
198: wrapper = functools.wraps(func)(wrapper)
199: wrapper.__doc__ = new_doc
200: return wrapper
201:
202: message = _generate_deprecation_message(
203: since, message, name, alternative, pending,
204: obj_type, addendum)
205:
206: def wrapper(*args, **kwargs):
207: warnings.warn(message, mplDeprecation, stacklevel=2)
208: return func(*args, **kwargs)
209:
210: old_doc = textwrap.dedent(old_doc or '').strip('\n')
211: message = message.strip()
212: new_doc = (('\n.. deprecated:: %(since)s'
213: '\n %(message)s\n\n' %
214: {'since': since, 'message': message}) + old_doc)
215: if not old_doc:
216: # This is to prevent a spurious 'unexected unindent' warning from
217: # docutils when the original docstring was blank.
218: new_doc += r'\ '
219:
220: return finalize(wrapper, new_doc)
221:
222: return deprecate
223:
"""
# Import the stypy library necessary elements
from stypy.type_inference_programs.type_inference_programs_imports import *
# Create the module type store
module_type_store = Context(None, __file__)
# ################# Begin of the type inference program ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 1, 0))
# 'import warnings' statement (line 1)
import warnings
import_module(stypy.reporting.localization.Localization(__file__, 1, 0), 'warnings', warnings, module_type_store)
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 2, 0))
# 'import functools' statement (line 2)
import functools
import_module(stypy.reporting.localization.Localization(__file__, 2, 0), 'functools', functools, module_type_store)
# Declaration of the 'MatplotlibDeprecationWarning' class
# Getting the type of 'UserWarning' (line 5)
UserWarning_273153 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 5, 35), 'UserWarning')
class MatplotlibDeprecationWarning(UserWarning_273153, ):
str_273154 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 15, (-1)), 'str', '\n A class for issuing deprecation warnings for Matplotlib users.\n\n In light of the fact that Python builtin DeprecationWarnings are ignored\n by default as of Python 2.7 (see link below), this class was put in to\n allow for the signaling of deprecation, but via UserWarnings which are not\n ignored by default.\n\n https://docs.python.org/dev/whatsnew/2.7.html#the-future-for-python-2-x\n ')
pass
@norecursion
def __init__(type_of_self, localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function '__init__'
module_type_store = module_type_store.open_function_context('__init__', 5, 0, False)
# Assigning a type to the variable 'self' (line 6)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 6, 0), 'self', type_of_self)
# Passed parameters checking function
arguments = process_argument_values(localization, type_of_self, module_type_store, 'MatplotlibDeprecationWarning.__init__', [], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return
# Initialize method data
init_call_information(module_type_store, '__init__', localization, [], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of '__init__(...)' code ##################
pass
# ################# End of '__init__(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Assigning a type to the variable 'MatplotlibDeprecationWarning' (line 5)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 5, 0), 'MatplotlibDeprecationWarning', MatplotlibDeprecationWarning)
# Assigning a Name to a Name (line 19):
# Getting the type of 'MatplotlibDeprecationWarning' (line 19)
MatplotlibDeprecationWarning_273155 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 19, 17), 'MatplotlibDeprecationWarning')
# Assigning a type to the variable 'mplDeprecation' (line 19)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 19, 0), 'mplDeprecation', MatplotlibDeprecationWarning_273155)
@norecursion
def _generate_deprecation_message(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
str_273156 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 49), 'str', '')
str_273157 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 22, 58), 'str', '')
str_273158 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 23, 46), 'str', '')
# Getting the type of 'False' (line 23)
False_273159 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 23, 58), 'False')
str_273160 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 24, 43), 'str', 'attribute')
str_273161 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 25, 43), 'str', '')
defaults = [str_273156, str_273157, str_273158, False_273159, str_273160, str_273161]
# Create a new context for function '_generate_deprecation_message'
module_type_store = module_type_store.open_function_context('_generate_deprecation_message', 22, 0, False)
# Passed parameters checking function
_generate_deprecation_message.stypy_localization = localization
_generate_deprecation_message.stypy_type_of_self = None
_generate_deprecation_message.stypy_type_store = module_type_store
_generate_deprecation_message.stypy_function_name = '_generate_deprecation_message'
_generate_deprecation_message.stypy_param_names_list = ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum']
_generate_deprecation_message.stypy_varargs_param_name = None
_generate_deprecation_message.stypy_kwargs_param_name = None
_generate_deprecation_message.stypy_call_defaults = defaults
_generate_deprecation_message.stypy_call_varargs = varargs
_generate_deprecation_message.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, '_generate_deprecation_message', ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, '_generate_deprecation_message', localization, ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of '_generate_deprecation_message(...)' code ##################
# Getting the type of 'message' (line 27)
message_273162 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 27, 11), 'message')
# Applying the 'not' unary operator (line 27)
result_not__273163 = python_operator(stypy.reporting.localization.Localization(__file__, 27, 7), 'not', message_273162)
# Testing the type of an if condition (line 27)
if_condition_273164 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 27, 4), result_not__273163)
# Assigning a type to the variable 'if_condition_273164' (line 27)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 27, 4), 'if_condition_273164', if_condition_273164)
# SSA begins for if statement (line 27)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Getting the type of 'pending' (line 29)
pending_273165 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 29, 11), 'pending')
# Testing the type of an if condition (line 29)
if_condition_273166 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 29, 8), pending_273165)
# Assigning a type to the variable 'if_condition_273166' (line 29)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 29, 8), 'if_condition_273166', if_condition_273166)
# SSA begins for if statement (line 29)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Assigning a Str to a Name (line 30):
str_273167 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 31, 16), 'str', 'The %(name)s %(obj_type)s will be deprecated in a future version.')
# Assigning a type to the variable 'message' (line 30)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 30, 12), 'message', str_273167)
# SSA branch for the else part of an if statement (line 29)
module_type_store.open_ssa_branch('else')
# Assigning a Str to a Name (line 34):
str_273168 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 35, 16), 'str', 'The %(name)s %(obj_type)s was deprecated in version %(since)s.')
# Assigning a type to the variable 'message' (line 34)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 34, 12), 'message', str_273168)
# SSA join for if statement (line 29)
module_type_store = module_type_store.join_ssa_context()
# SSA join for if statement (line 27)
module_type_store = module_type_store.join_ssa_context()
# Assigning a Str to a Name (line 38):
str_273169 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 38, 17), 'str', '')
# Assigning a type to the variable 'altmessage' (line 38)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 38, 4), 'altmessage', str_273169)
# Getting the type of 'alternative' (line 39)
alternative_273170 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 39, 7), 'alternative')
# Testing the type of an if condition (line 39)
if_condition_273171 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 39, 4), alternative_273170)
# Assigning a type to the variable 'if_condition_273171' (line 39)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 39, 4), 'if_condition_273171', if_condition_273171)
# SSA begins for if statement (line 39)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Assigning a BinOp to a Name (line 40):
str_273172 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 40, 21), 'str', ' Use %s instead.')
# Getting the type of 'alternative' (line 40)
alternative_273173 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 40, 42), 'alternative')
# Applying the binary operator '%' (line 40)
result_mod_273174 = python_operator(stypy.reporting.localization.Localization(__file__, 40, 21), '%', str_273172, alternative_273173)
# Assigning a type to the variable 'altmessage' (line 40)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 40, 8), 'altmessage', result_mod_273174)
# SSA join for if statement (line 39)
module_type_store = module_type_store.join_ssa_context()
# Assigning a BinOp to a Name (line 42):
# Getting the type of 'message' (line 42)
message_273175 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 42, 16), 'message')
# Obtaining an instance of the builtin type 'dict' (line 42)
dict_273176 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 42, 26), 'dict')
# Adding type elements to the builtin type 'dict' instance (line 42)
# Adding element type (key, value) (line 42)
str_273177 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 43, 8), 'str', 'func')
# Getting the type of 'name' (line 43)
name_273178 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 43, 16), 'name')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 26), dict_273176, (str_273177, name_273178))
# Adding element type (key, value) (line 42)
str_273179 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 44, 8), 'str', 'name')
# Getting the type of 'name' (line 44)
name_273180 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 44, 16), 'name')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 26), dict_273176, (str_273179, name_273180))
# Adding element type (key, value) (line 42)
str_273181 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 45, 8), 'str', 'alternative')
# Getting the type of 'alternative' (line 45)
alternative_273182 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 45, 23), 'alternative')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 26), dict_273176, (str_273181, alternative_273182))
# Adding element type (key, value) (line 42)
str_273183 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 46, 8), 'str', 'obj_type')
# Getting the type of 'obj_type' (line 46)
obj_type_273184 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 46, 20), 'obj_type')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 26), dict_273176, (str_273183, obj_type_273184))
# Adding element type (key, value) (line 42)
str_273185 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 47, 8), 'str', 'since')
# Getting the type of 'since' (line 47)
since_273186 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 47, 17), 'since')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 42, 26), dict_273176, (str_273185, since_273186))
# Applying the binary operator '%' (line 42)
result_mod_273187 = python_operator(stypy.reporting.localization.Localization(__file__, 42, 16), '%', message_273175, dict_273176)
# Getting the type of 'altmessage' (line 48)
altmessage_273188 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 48, 8), 'altmessage')
# Applying the binary operator '+' (line 42)
result_add_273189 = python_operator(stypy.reporting.localization.Localization(__file__, 42, 15), '+', result_mod_273187, altmessage_273188)
# Assigning a type to the variable 'message' (line 42)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 42, 4), 'message', result_add_273189)
# Getting the type of 'addendum' (line 50)
addendum_273190 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 50, 7), 'addendum')
# Testing the type of an if condition (line 50)
if_condition_273191 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 50, 4), addendum_273190)
# Assigning a type to the variable 'if_condition_273191' (line 50)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 50, 4), 'if_condition_273191', if_condition_273191)
# SSA begins for if statement (line 50)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Getting the type of 'message' (line 51)
message_273192 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 8), 'message')
# Getting the type of 'addendum' (line 51)
addendum_273193 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 51, 19), 'addendum')
# Applying the binary operator '+=' (line 51)
result_iadd_273194 = python_operator(stypy.reporting.localization.Localization(__file__, 51, 8), '+=', message_273192, addendum_273193)
# Assigning a type to the variable 'message' (line 51)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 51, 8), 'message', result_iadd_273194)
# SSA join for if statement (line 50)
module_type_store = module_type_store.join_ssa_context()
# Getting the type of 'message' (line 53)
message_273195 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 53, 11), 'message')
# Assigning a type to the variable 'stypy_return_type' (line 53)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 53, 4), 'stypy_return_type', message_273195)
# ################# End of '_generate_deprecation_message(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function '_generate_deprecation_message' in the type store
# Getting the type of 'stypy_return_type' (line 22)
stypy_return_type_273196 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 22, 0), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273196)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function '_generate_deprecation_message'
return stypy_return_type_273196
# Assigning a type to the variable '_generate_deprecation_message' (line 22)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 22, 0), '_generate_deprecation_message', _generate_deprecation_message)
@norecursion
def warn_deprecated(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
str_273197 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 57, 23), 'str', '')
str_273198 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 57, 32), 'str', '')
str_273199 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 57, 48), 'str', '')
# Getting the type of 'False' (line 57)
False_273200 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 57, 60), 'False')
str_273201 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 58, 17), 'str', 'attribute')
str_273202 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 58, 39), 'str', '')
defaults = [str_273197, str_273198, str_273199, False_273200, str_273201, str_273202]
# Create a new context for function 'warn_deprecated'
module_type_store = module_type_store.open_function_context('warn_deprecated', 56, 0, False)
# Passed parameters checking function
warn_deprecated.stypy_localization = localization
warn_deprecated.stypy_type_of_self = None
warn_deprecated.stypy_type_store = module_type_store
warn_deprecated.stypy_function_name = 'warn_deprecated'
warn_deprecated.stypy_param_names_list = ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum']
warn_deprecated.stypy_varargs_param_name = None
warn_deprecated.stypy_kwargs_param_name = None
warn_deprecated.stypy_call_defaults = defaults
warn_deprecated.stypy_call_varargs = varargs
warn_deprecated.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'warn_deprecated', ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'warn_deprecated', localization, ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'warn_deprecated(...)' code ##################
str_273203 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 102, (-1)), 'str', '\n Used to display deprecation warning in a standard way.\n\n Parameters\n ----------\n since : str\n The release at which this API became deprecated.\n\n message : str, optional\n Override the default deprecation message. The format\n specifier `%(name)s` may be used for the name of the function,\n and `%(alternative)s` may be used in the deprecation message\n to insert the name of an alternative to the deprecated\n function. `%(obj_type)s` may be used to insert a friendly name\n for the type of object being deprecated.\n\n name : str, optional\n The name of the deprecated object.\n\n alternative : str, optional\n An alternative function that the user may use in place of the\n deprecated function. The deprecation warning will tell the user\n about this alternative if provided.\n\n pending : bool, optional\n If True, uses a PendingDeprecationWarning instead of a\n DeprecationWarning.\n\n obj_type : str, optional\n The object type being deprecated.\n\n addendum : str, optional\n Additional text appended directly to the final message.\n\n Examples\n --------\n\n Basic example::\n\n # To warn of the deprecation of "matplotlib.name_of_module"\n warn_deprecated(\'1.4.0\', name=\'matplotlib.name_of_module\',\n obj_type=\'module\')\n\n ')
# Assigning a Call to a Name (line 103):
# Call to _generate_deprecation_message(...): (line 103)
# Processing the call arguments (line 103)
# Getting the type of 'since' (line 104)
since_273205 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 16), 'since', False)
# Getting the type of 'message' (line 104)
message_273206 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 23), 'message', False)
# Getting the type of 'name' (line 104)
name_273207 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 32), 'name', False)
# Getting the type of 'alternative' (line 104)
alternative_273208 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 38), 'alternative', False)
# Getting the type of 'pending' (line 104)
pending_273209 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 51), 'pending', False)
# Getting the type of 'obj_type' (line 104)
obj_type_273210 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 104, 60), 'obj_type', False)
# Processing the call keyword arguments (line 103)
kwargs_273211 = {}
# Getting the type of '_generate_deprecation_message' (line 103)
_generate_deprecation_message_273204 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 103, 14), '_generate_deprecation_message', False)
# Calling _generate_deprecation_message(args, kwargs) (line 103)
_generate_deprecation_message_call_result_273212 = invoke(stypy.reporting.localization.Localization(__file__, 103, 14), _generate_deprecation_message_273204, *[since_273205, message_273206, name_273207, alternative_273208, pending_273209, obj_type_273210], **kwargs_273211)
# Assigning a type to the variable 'message' (line 103)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 103, 4), 'message', _generate_deprecation_message_call_result_273212)
# Call to warn(...): (line 106)
# Processing the call arguments (line 106)
# Getting the type of 'message' (line 106)
message_273215 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 106, 18), 'message', False)
# Getting the type of 'mplDeprecation' (line 106)
mplDeprecation_273216 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 106, 27), 'mplDeprecation', False)
# Processing the call keyword arguments (line 106)
int_273217 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 106, 54), 'int')
keyword_273218 = int_273217
kwargs_273219 = {'stacklevel': keyword_273218}
# Getting the type of 'warnings' (line 106)
warnings_273213 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 106, 4), 'warnings', False)
# Obtaining the member 'warn' of a type (line 106)
warn_273214 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 106, 4), warnings_273213, 'warn')
# Calling warn(args, kwargs) (line 106)
warn_call_result_273220 = invoke(stypy.reporting.localization.Localization(__file__, 106, 4), warn_273214, *[message_273215, mplDeprecation_273216], **kwargs_273219)
# ################# End of 'warn_deprecated(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'warn_deprecated' in the type store
# Getting the type of 'stypy_return_type' (line 56)
stypy_return_type_273221 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 56, 0), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273221)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'warn_deprecated'
return stypy_return_type_273221
# Assigning a type to the variable 'warn_deprecated' (line 56)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 56, 0), 'warn_deprecated', warn_deprecated)
@norecursion
def deprecated(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
str_273222 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 109, 30), 'str', '')
str_273223 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 109, 39), 'str', '')
str_273224 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 109, 55), 'str', '')
# Getting the type of 'False' (line 109)
False_273225 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 109, 67), 'False')
# Getting the type of 'None' (line 110)
None_273226 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 110, 24), 'None')
str_273227 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 110, 39), 'str', '')
defaults = [str_273222, str_273223, str_273224, False_273225, None_273226, str_273227]
# Create a new context for function 'deprecated'
module_type_store = module_type_store.open_function_context('deprecated', 109, 0, False)
# Passed parameters checking function
deprecated.stypy_localization = localization
deprecated.stypy_type_of_self = None
deprecated.stypy_type_store = module_type_store
deprecated.stypy_function_name = 'deprecated'
deprecated.stypy_param_names_list = ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum']
deprecated.stypy_varargs_param_name = None
deprecated.stypy_kwargs_param_name = None
deprecated.stypy_call_defaults = defaults
deprecated.stypy_call_varargs = varargs
deprecated.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'deprecated', ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'deprecated', localization, ['since', 'message', 'name', 'alternative', 'pending', 'obj_type', 'addendum'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'deprecated(...)' code ##################
str_273228 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 160, (-1)), 'str', "\n Decorator to mark a function or a class as deprecated.\n\n Parameters\n ----------\n since : str\n The release at which this API became deprecated. This is\n required.\n\n message : str, optional\n Override the default deprecation message. The format\n specifier `%(name)s` may be used for the name of the object,\n and `%(alternative)s` may be used in the deprecation message\n to insert the name of an alternative to the deprecated\n object. `%(obj_type)s` may be used to insert a friendly name\n for the type of object being deprecated.\n\n name : str, optional\n The name of the deprecated object; if not provided the name\n is automatically determined from the passed in object,\n though this is useful in the case of renamed functions, where\n the new function is just assigned to the name of the\n deprecated function. For example::\n\n def new_function():\n ...\n oldFunction = new_function\n\n alternative : str, optional\n An alternative object that the user may use in place of the\n deprecated object. The deprecation warning will tell the user\n about this alternative if provided.\n\n pending : bool, optional\n If True, uses a PendingDeprecationWarning instead of a\n DeprecationWarning.\n\n addendum : str, optional\n Additional text appended directly to the final message.\n\n Examples\n --------\n\n Basic example::\n\n @deprecated('1.4.0')\n def the_function_to_deprecate():\n pass\n\n ")
@norecursion
def deprecate(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
# Getting the type of 'message' (line 162)
message_273229 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 162, 31), 'message')
# Getting the type of 'name' (line 162)
name_273230 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 162, 45), 'name')
# Getting the type of 'alternative' (line 162)
alternative_273231 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 162, 63), 'alternative')
# Getting the type of 'pending' (line 163)
pending_273232 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 163, 26), 'pending')
# Getting the type of 'addendum' (line 163)
addendum_273233 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 163, 44), 'addendum')
defaults = [message_273229, name_273230, alternative_273231, pending_273232, addendum_273233]
# Create a new context for function 'deprecate'
module_type_store = module_type_store.open_function_context('deprecate', 162, 4, False)
# Passed parameters checking function
deprecate.stypy_localization = localization
deprecate.stypy_type_of_self = None
deprecate.stypy_type_store = module_type_store
deprecate.stypy_function_name = 'deprecate'
deprecate.stypy_param_names_list = ['obj', 'message', 'name', 'alternative', 'pending', 'addendum']
deprecate.stypy_varargs_param_name = None
deprecate.stypy_kwargs_param_name = None
deprecate.stypy_call_defaults = defaults
deprecate.stypy_call_varargs = varargs
deprecate.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'deprecate', ['obj', 'message', 'name', 'alternative', 'pending', 'addendum'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'deprecate', localization, ['obj', 'message', 'name', 'alternative', 'pending', 'addendum'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'deprecate(...)' code ##################
stypy.reporting.localization.Localization.set_current(stypy.reporting.localization.Localization(__file__, 164, 8))
# 'import textwrap' statement (line 164)
import textwrap
import_module(stypy.reporting.localization.Localization(__file__, 164, 8), 'textwrap', textwrap, module_type_store)
# Getting the type of 'name' (line 166)
name_273234 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 166, 15), 'name')
# Applying the 'not' unary operator (line 166)
result_not__273235 = python_operator(stypy.reporting.localization.Localization(__file__, 166, 11), 'not', name_273234)
# Testing the type of an if condition (line 166)
if_condition_273236 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 166, 8), result_not__273235)
# Assigning a type to the variable 'if_condition_273236' (line 166)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 166, 8), 'if_condition_273236', if_condition_273236)
# SSA begins for if statement (line 166)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Assigning a Attribute to a Name (line 167):
# Getting the type of 'obj' (line 167)
obj_273237 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 167, 19), 'obj')
# Obtaining the member '__name__' of a type (line 167)
name___273238 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 167, 19), obj_273237, '__name__')
# Assigning a type to the variable 'name' (line 167)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 167, 12), 'name', name___273238)
# SSA join for if statement (line 166)
module_type_store = module_type_store.join_ssa_context()
# Type idiom detected: calculating its left and rigth part (line 169)
# Getting the type of 'type' (line 169)
type_273239 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 169, 27), 'type')
# Getting the type of 'obj' (line 169)
obj_273240 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 169, 22), 'obj')
(may_be_273241, more_types_in_union_273242) = may_be_subtype(type_273239, obj_273240)
if may_be_273241:
if more_types_in_union_273242:
# Runtime conditional SSA (line 169)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'idiom if')
else:
module_type_store = module_type_store
# Assigning a type to the variable 'obj' (line 169)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 169, 8), 'obj', remove_not_subtype_from_union(obj_273240, type))
# Assigning a Str to a Name (line 170):
str_273243 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 170, 23), 'str', 'class')
# Assigning a type to the variable 'obj_type' (line 170)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 170, 12), 'obj_type', str_273243)
# Assigning a Attribute to a Name (line 171):
# Getting the type of 'obj' (line 171)
obj_273244 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 171, 22), 'obj')
# Obtaining the member '__doc__' of a type (line 171)
doc___273245 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 171, 22), obj_273244, '__doc__')
# Assigning a type to the variable 'old_doc' (line 171)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 171, 12), 'old_doc', doc___273245)
# Assigning a Attribute to a Name (line 172):
# Getting the type of 'obj' (line 172)
obj_273246 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 172, 19), 'obj')
# Obtaining the member '__init__' of a type (line 172)
init___273247 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 172, 19), obj_273246, '__init__')
# Assigning a type to the variable 'func' (line 172)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 172, 12), 'func', init___273247)
@norecursion
def finalize(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function 'finalize'
module_type_store = module_type_store.open_function_context('finalize', 174, 12, False)
# Passed parameters checking function
finalize.stypy_localization = localization
finalize.stypy_type_of_self = None
finalize.stypy_type_store = module_type_store
finalize.stypy_function_name = 'finalize'
finalize.stypy_param_names_list = ['wrapper', 'new_doc']
finalize.stypy_varargs_param_name = None
finalize.stypy_kwargs_param_name = None
finalize.stypy_call_defaults = defaults
finalize.stypy_call_varargs = varargs
finalize.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'finalize', ['wrapper', 'new_doc'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'finalize', localization, ['wrapper', 'new_doc'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'finalize(...)' code ##################
# SSA begins for try-except statement (line 175)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'try-except')
# Assigning a Name to a Attribute (line 176):
# Getting the type of 'new_doc' (line 176)
new_doc_273248 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 176, 34), 'new_doc')
# Getting the type of 'obj' (line 176)
obj_273249 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 176, 20), 'obj')
# Setting the type of the member '__doc__' of a type (line 176)
module_type_store.set_type_of_member(stypy.reporting.localization.Localization(__file__, 176, 20), obj_273249, '__doc__', new_doc_273248)
# SSA branch for the except part of a try statement (line 175)
# SSA branch for the except 'Tuple' branch of a try statement (line 175)
module_type_store.open_ssa_branch('except')
pass
# SSA join for try-except statement (line 175)
module_type_store = module_type_store.join_ssa_context()
# Assigning a Name to a Attribute (line 181):
# Getting the type of 'wrapper' (line 181)
wrapper_273250 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 181, 31), 'wrapper')
# Getting the type of 'obj' (line 181)
obj_273251 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 181, 16), 'obj')
# Setting the type of the member '__init__' of a type (line 181)
module_type_store.set_type_of_member(stypy.reporting.localization.Localization(__file__, 181, 16), obj_273251, '__init__', wrapper_273250)
# Getting the type of 'obj' (line 182)
obj_273252 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 182, 23), 'obj')
# Assigning a type to the variable 'stypy_return_type' (line 182)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 182, 16), 'stypy_return_type', obj_273252)
# ################# End of 'finalize(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'finalize' in the type store
# Getting the type of 'stypy_return_type' (line 174)
stypy_return_type_273253 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 174, 12), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273253)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'finalize'
return stypy_return_type_273253
# Assigning a type to the variable 'finalize' (line 174)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 174, 12), 'finalize', finalize)
if more_types_in_union_273242:
# Runtime conditional SSA for else branch (line 169)
module_type_store.open_ssa_branch('idiom else')
if ((not may_be_273241) or more_types_in_union_273242):
# Assigning a type to the variable 'obj' (line 169)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 169, 8), 'obj', remove_subtype_from_union(obj_273240, type))
# Assigning a Str to a Name (line 184):
str_273254 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 184, 23), 'str', 'function')
# Assigning a type to the variable 'obj_type' (line 184)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 184, 12), 'obj_type', str_273254)
# Type idiom detected: calculating its left and rigth part (line 185)
# Getting the type of 'classmethod' (line 185)
classmethod_273255 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 185, 31), 'classmethod')
# Getting the type of 'obj' (line 185)
obj_273256 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 185, 26), 'obj')
(may_be_273257, more_types_in_union_273258) = may_be_subtype(classmethod_273255, obj_273256)
if may_be_273257:
if more_types_in_union_273258:
# Runtime conditional SSA (line 185)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'idiom if')
else:
module_type_store = module_type_store
# Assigning a type to the variable 'obj' (line 185)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 185, 12), 'obj', remove_not_subtype_from_union(obj_273256, classmethod))
# Assigning a Attribute to a Name (line 186):
# Getting the type of 'obj' (line 186)
obj_273259 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 186, 23), 'obj')
# Obtaining the member '__func__' of a type (line 186)
func___273260 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 186, 23), obj_273259, '__func__')
# Assigning a type to the variable 'func' (line 186)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 186, 16), 'func', func___273260)
# Assigning a Attribute to a Name (line 187):
# Getting the type of 'func' (line 187)
func_273261 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 187, 26), 'func')
# Obtaining the member '__doc__' of a type (line 187)
doc___273262 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 187, 26), func_273261, '__doc__')
# Assigning a type to the variable 'old_doc' (line 187)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 187, 16), 'old_doc', doc___273262)
@norecursion
def finalize(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function 'finalize'
module_type_store = module_type_store.open_function_context('finalize', 189, 16, False)
# Passed parameters checking function
finalize.stypy_localization = localization
finalize.stypy_type_of_self = None
finalize.stypy_type_store = module_type_store
finalize.stypy_function_name = 'finalize'
finalize.stypy_param_names_list = ['wrapper', 'new_doc']
finalize.stypy_varargs_param_name = None
finalize.stypy_kwargs_param_name = None
finalize.stypy_call_defaults = defaults
finalize.stypy_call_varargs = varargs
finalize.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'finalize', ['wrapper', 'new_doc'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'finalize', localization, ['wrapper', 'new_doc'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'finalize(...)' code ##################
# Assigning a Call to a Name (line 190):
# Call to (...): (line 190)
# Processing the call arguments (line 190)
# Getting the type of 'wrapper' (line 190)
wrapper_273268 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 190, 52), 'wrapper', False)
# Processing the call keyword arguments (line 190)
kwargs_273269 = {}
# Call to wraps(...): (line 190)
# Processing the call arguments (line 190)
# Getting the type of 'func' (line 190)
func_273265 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 190, 46), 'func', False)
# Processing the call keyword arguments (line 190)
kwargs_273266 = {}
# Getting the type of 'functools' (line 190)
functools_273263 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 190, 30), 'functools', False)
# Obtaining the member 'wraps' of a type (line 190)
wraps_273264 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 190, 30), functools_273263, 'wraps')
# Calling wraps(args, kwargs) (line 190)
wraps_call_result_273267 = invoke(stypy.reporting.localization.Localization(__file__, 190, 30), wraps_273264, *[func_273265], **kwargs_273266)
# Calling (args, kwargs) (line 190)
_call_result_273270 = invoke(stypy.reporting.localization.Localization(__file__, 190, 30), wraps_call_result_273267, *[wrapper_273268], **kwargs_273269)
# Assigning a type to the variable 'wrapper' (line 190)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 190, 20), 'wrapper', _call_result_273270)
# Assigning a Name to a Attribute (line 191):
# Getting the type of 'new_doc' (line 191)
new_doc_273271 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 191, 38), 'new_doc')
# Getting the type of 'wrapper' (line 191)
wrapper_273272 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 191, 20), 'wrapper')
# Setting the type of the member '__doc__' of a type (line 191)
module_type_store.set_type_of_member(stypy.reporting.localization.Localization(__file__, 191, 20), wrapper_273272, '__doc__', new_doc_273271)
# Call to classmethod(...): (line 192)
# Processing the call arguments (line 192)
# Getting the type of 'wrapper' (line 192)
wrapper_273274 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 192, 39), 'wrapper', False)
# Processing the call keyword arguments (line 192)
kwargs_273275 = {}
# Getting the type of 'classmethod' (line 192)
classmethod_273273 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 192, 27), 'classmethod', False)
# Calling classmethod(args, kwargs) (line 192)
classmethod_call_result_273276 = invoke(stypy.reporting.localization.Localization(__file__, 192, 27), classmethod_273273, *[wrapper_273274], **kwargs_273275)
# Assigning a type to the variable 'stypy_return_type' (line 192)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 192, 20), 'stypy_return_type', classmethod_call_result_273276)
# ################# End of 'finalize(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'finalize' in the type store
# Getting the type of 'stypy_return_type' (line 189)
stypy_return_type_273277 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 189, 16), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273277)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'finalize'
return stypy_return_type_273277
# Assigning a type to the variable 'finalize' (line 189)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 189, 16), 'finalize', finalize)
if more_types_in_union_273258:
# Runtime conditional SSA for else branch (line 185)
module_type_store.open_ssa_branch('idiom else')
if ((not may_be_273257) or more_types_in_union_273258):
# Assigning a type to the variable 'obj' (line 185)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 185, 12), 'obj', remove_subtype_from_union(obj_273256, classmethod))
# Assigning a Name to a Name (line 194):
# Getting the type of 'obj' (line 194)
obj_273278 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 194, 23), 'obj')
# Assigning a type to the variable 'func' (line 194)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 194, 16), 'func', obj_273278)
# Assigning a Attribute to a Name (line 195):
# Getting the type of 'func' (line 195)
func_273279 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 195, 26), 'func')
# Obtaining the member '__doc__' of a type (line 195)
doc___273280 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 195, 26), func_273279, '__doc__')
# Assigning a type to the variable 'old_doc' (line 195)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 195, 16), 'old_doc', doc___273280)
@norecursion
def finalize(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function 'finalize'
module_type_store = module_type_store.open_function_context('finalize', 197, 16, False)
# Passed parameters checking function
finalize.stypy_localization = localization
finalize.stypy_type_of_self = None
finalize.stypy_type_store = module_type_store
finalize.stypy_function_name = 'finalize'
finalize.stypy_param_names_list = ['wrapper', 'new_doc']
finalize.stypy_varargs_param_name = None
finalize.stypy_kwargs_param_name = None
finalize.stypy_call_defaults = defaults
finalize.stypy_call_varargs = varargs
finalize.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'finalize', ['wrapper', 'new_doc'], None, None, defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'finalize', localization, ['wrapper', 'new_doc'], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'finalize(...)' code ##################
# Assigning a Call to a Name (line 198):
# Call to (...): (line 198)
# Processing the call arguments (line 198)
# Getting the type of 'wrapper' (line 198)
wrapper_273286 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 198, 52), 'wrapper', False)
# Processing the call keyword arguments (line 198)
kwargs_273287 = {}
# Call to wraps(...): (line 198)
# Processing the call arguments (line 198)
# Getting the type of 'func' (line 198)
func_273283 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 198, 46), 'func', False)
# Processing the call keyword arguments (line 198)
kwargs_273284 = {}
# Getting the type of 'functools' (line 198)
functools_273281 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 198, 30), 'functools', False)
# Obtaining the member 'wraps' of a type (line 198)
wraps_273282 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 198, 30), functools_273281, 'wraps')
# Calling wraps(args, kwargs) (line 198)
wraps_call_result_273285 = invoke(stypy.reporting.localization.Localization(__file__, 198, 30), wraps_273282, *[func_273283], **kwargs_273284)
# Calling (args, kwargs) (line 198)
_call_result_273288 = invoke(stypy.reporting.localization.Localization(__file__, 198, 30), wraps_call_result_273285, *[wrapper_273286], **kwargs_273287)
# Assigning a type to the variable 'wrapper' (line 198)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 198, 20), 'wrapper', _call_result_273288)
# Assigning a Name to a Attribute (line 199):
# Getting the type of 'new_doc' (line 199)
new_doc_273289 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 199, 38), 'new_doc')
# Getting the type of 'wrapper' (line 199)
wrapper_273290 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 199, 20), 'wrapper')
# Setting the type of the member '__doc__' of a type (line 199)
module_type_store.set_type_of_member(stypy.reporting.localization.Localization(__file__, 199, 20), wrapper_273290, '__doc__', new_doc_273289)
# Getting the type of 'wrapper' (line 200)
wrapper_273291 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 200, 27), 'wrapper')
# Assigning a type to the variable 'stypy_return_type' (line 200)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 200, 20), 'stypy_return_type', wrapper_273291)
# ################# End of 'finalize(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'finalize' in the type store
# Getting the type of 'stypy_return_type' (line 197)
stypy_return_type_273292 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 197, 16), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273292)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'finalize'
return stypy_return_type_273292
# Assigning a type to the variable 'finalize' (line 197)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 197, 16), 'finalize', finalize)
if (may_be_273257 and more_types_in_union_273258):
# SSA join for if statement (line 185)
module_type_store = module_type_store.join_ssa_context()
if (may_be_273241 and more_types_in_union_273242):
# SSA join for if statement (line 169)
module_type_store = module_type_store.join_ssa_context()
# Assigning a Call to a Name (line 202):
# Call to _generate_deprecation_message(...): (line 202)
# Processing the call arguments (line 202)
# Getting the type of 'since' (line 203)
since_273294 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 203, 20), 'since', False)
# Getting the type of 'message' (line 203)
message_273295 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 203, 27), 'message', False)
# Getting the type of 'name' (line 203)
name_273296 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 203, 36), 'name', False)
# Getting the type of 'alternative' (line 203)
alternative_273297 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 203, 42), 'alternative', False)
# Getting the type of 'pending' (line 203)
pending_273298 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 203, 55), 'pending', False)
# Getting the type of 'obj_type' (line 204)
obj_type_273299 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 204, 20), 'obj_type', False)
# Getting the type of 'addendum' (line 204)
addendum_273300 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 204, 30), 'addendum', False)
# Processing the call keyword arguments (line 202)
kwargs_273301 = {}
# Getting the type of '_generate_deprecation_message' (line 202)
_generate_deprecation_message_273293 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 202, 18), '_generate_deprecation_message', False)
# Calling _generate_deprecation_message(args, kwargs) (line 202)
_generate_deprecation_message_call_result_273302 = invoke(stypy.reporting.localization.Localization(__file__, 202, 18), _generate_deprecation_message_273293, *[since_273294, message_273295, name_273296, alternative_273297, pending_273298, obj_type_273299, addendum_273300], **kwargs_273301)
# Assigning a type to the variable 'message' (line 202)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 202, 8), 'message', _generate_deprecation_message_call_result_273302)
@norecursion
def wrapper(localization, *varargs, **kwargs):
global module_type_store
# Assign values to the parameters with defaults
defaults = []
# Create a new context for function 'wrapper'
module_type_store = module_type_store.open_function_context('wrapper', 206, 8, False)
# Passed parameters checking function
wrapper.stypy_localization = localization
wrapper.stypy_type_of_self = None
wrapper.stypy_type_store = module_type_store
wrapper.stypy_function_name = 'wrapper'
wrapper.stypy_param_names_list = []
wrapper.stypy_varargs_param_name = 'args'
wrapper.stypy_kwargs_param_name = 'kwargs'
wrapper.stypy_call_defaults = defaults
wrapper.stypy_call_varargs = varargs
wrapper.stypy_call_kwargs = kwargs
arguments = process_argument_values(localization, None, module_type_store, 'wrapper', [], 'args', 'kwargs', defaults, varargs, kwargs)
if is_error_type(arguments):
# Destroy the current context
module_type_store = module_type_store.close_function_context()
return arguments
# Initialize method data
init_call_information(module_type_store, 'wrapper', localization, [], arguments)
# Default return type storage variable (SSA)
# Assigning a type to the variable 'stypy_return_type'
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 0, 0), 'stypy_return_type', None)
# ################# Begin of 'wrapper(...)' code ##################
# Call to warn(...): (line 207)
# Processing the call arguments (line 207)
# Getting the type of 'message' (line 207)
message_273305 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 207, 26), 'message', False)
# Getting the type of 'mplDeprecation' (line 207)
mplDeprecation_273306 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 207, 35), 'mplDeprecation', False)
# Processing the call keyword arguments (line 207)
int_273307 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 207, 62), 'int')
keyword_273308 = int_273307
kwargs_273309 = {'stacklevel': keyword_273308}
# Getting the type of 'warnings' (line 207)
warnings_273303 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 207, 12), 'warnings', False)
# Obtaining the member 'warn' of a type (line 207)
warn_273304 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 207, 12), warnings_273303, 'warn')
# Calling warn(args, kwargs) (line 207)
warn_call_result_273310 = invoke(stypy.reporting.localization.Localization(__file__, 207, 12), warn_273304, *[message_273305, mplDeprecation_273306], **kwargs_273309)
# Call to func(...): (line 208)
# Getting the type of 'args' (line 208)
args_273312 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 208, 25), 'args', False)
# Processing the call keyword arguments (line 208)
# Getting the type of 'kwargs' (line 208)
kwargs_273313 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 208, 33), 'kwargs', False)
kwargs_273314 = {'kwargs_273313': kwargs_273313}
# Getting the type of 'func' (line 208)
func_273311 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 208, 19), 'func', False)
# Calling func(args, kwargs) (line 208)
func_call_result_273315 = invoke(stypy.reporting.localization.Localization(__file__, 208, 19), func_273311, *[args_273312], **kwargs_273314)
# Assigning a type to the variable 'stypy_return_type' (line 208)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 208, 12), 'stypy_return_type', func_call_result_273315)
# ################# End of 'wrapper(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'wrapper' in the type store
# Getting the type of 'stypy_return_type' (line 206)
stypy_return_type_273316 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 206, 8), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273316)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'wrapper'
return stypy_return_type_273316
# Assigning a type to the variable 'wrapper' (line 206)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 206, 8), 'wrapper', wrapper)
# Assigning a Call to a Name (line 210):
# Call to strip(...): (line 210)
# Processing the call arguments (line 210)
str_273325 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 210, 55), 'str', '\n')
# Processing the call keyword arguments (line 210)
kwargs_273326 = {}
# Call to dedent(...): (line 210)
# Processing the call arguments (line 210)
# Evaluating a boolean operation
# Getting the type of 'old_doc' (line 210)
old_doc_273319 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 210, 34), 'old_doc', False)
str_273320 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 210, 45), 'str', '')
# Applying the binary operator 'or' (line 210)
result_or_keyword_273321 = python_operator(stypy.reporting.localization.Localization(__file__, 210, 34), 'or', old_doc_273319, str_273320)
# Processing the call keyword arguments (line 210)
kwargs_273322 = {}
# Getting the type of 'textwrap' (line 210)
textwrap_273317 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 210, 18), 'textwrap', False)
# Obtaining the member 'dedent' of a type (line 210)
dedent_273318 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 210, 18), textwrap_273317, 'dedent')
# Calling dedent(args, kwargs) (line 210)
dedent_call_result_273323 = invoke(stypy.reporting.localization.Localization(__file__, 210, 18), dedent_273318, *[result_or_keyword_273321], **kwargs_273322)
# Obtaining the member 'strip' of a type (line 210)
strip_273324 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 210, 18), dedent_call_result_273323, 'strip')
# Calling strip(args, kwargs) (line 210)
strip_call_result_273327 = invoke(stypy.reporting.localization.Localization(__file__, 210, 18), strip_273324, *[str_273325], **kwargs_273326)
# Assigning a type to the variable 'old_doc' (line 210)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 210, 8), 'old_doc', strip_call_result_273327)
# Assigning a Call to a Name (line 211):
# Call to strip(...): (line 211)
# Processing the call keyword arguments (line 211)
kwargs_273330 = {}
# Getting the type of 'message' (line 211)
message_273328 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 211, 18), 'message', False)
# Obtaining the member 'strip' of a type (line 211)
strip_273329 = module_type_store.get_type_of_member(stypy.reporting.localization.Localization(__file__, 211, 18), message_273328, 'strip')
# Calling strip(args, kwargs) (line 211)
strip_call_result_273331 = invoke(stypy.reporting.localization.Localization(__file__, 211, 18), strip_273329, *[], **kwargs_273330)
# Assigning a type to the variable 'message' (line 211)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 211, 8), 'message', strip_call_result_273331)
# Assigning a BinOp to a Name (line 212):
str_273332 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 212, 20), 'str', '\n.. deprecated:: %(since)s\n %(message)s\n\n')
# Obtaining an instance of the builtin type 'dict' (line 214)
dict_273333 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 214, 20), 'dict')
# Adding type elements to the builtin type 'dict' instance (line 214)
# Adding element type (key, value) (line 214)
str_273334 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 214, 21), 'str', 'since')
# Getting the type of 'since' (line 214)
since_273335 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 214, 30), 'since')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 214, 20), dict_273333, (str_273334, since_273335))
# Adding element type (key, value) (line 214)
str_273336 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 214, 37), 'str', 'message')
# Getting the type of 'message' (line 214)
message_273337 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 214, 48), 'message')
set_contained_elements_type(stypy.reporting.localization.Localization(__file__, 214, 20), dict_273333, (str_273336, message_273337))
# Applying the binary operator '%' (line 212)
result_mod_273338 = python_operator(stypy.reporting.localization.Localization(__file__, 212, 20), '%', str_273332, dict_273333)
# Getting the type of 'old_doc' (line 214)
old_doc_273339 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 214, 60), 'old_doc')
# Applying the binary operator '+' (line 212)
result_add_273340 = python_operator(stypy.reporting.localization.Localization(__file__, 212, 19), '+', result_mod_273338, old_doc_273339)
# Assigning a type to the variable 'new_doc' (line 212)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 212, 8), 'new_doc', result_add_273340)
# Getting the type of 'old_doc' (line 215)
old_doc_273341 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 215, 15), 'old_doc')
# Applying the 'not' unary operator (line 215)
result_not__273342 = python_operator(stypy.reporting.localization.Localization(__file__, 215, 11), 'not', old_doc_273341)
# Testing the type of an if condition (line 215)
if_condition_273343 = is_suitable_condition(stypy.reporting.localization.Localization(__file__, 215, 8), result_not__273342)
# Assigning a type to the variable 'if_condition_273343' (line 215)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 215, 8), 'if_condition_273343', if_condition_273343)
# SSA begins for if statement (line 215)
module_type_store = SSAContext.create_ssa_context(module_type_store, 'if')
# Getting the type of 'new_doc' (line 218)
new_doc_273344 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 218, 12), 'new_doc')
str_273345 = get_builtin_python_type_instance(stypy.reporting.localization.Localization(__file__, 218, 23), 'str', '\\ ')
# Applying the binary operator '+=' (line 218)
result_iadd_273346 = python_operator(stypy.reporting.localization.Localization(__file__, 218, 12), '+=', new_doc_273344, str_273345)
# Assigning a type to the variable 'new_doc' (line 218)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 218, 12), 'new_doc', result_iadd_273346)
# SSA join for if statement (line 215)
module_type_store = module_type_store.join_ssa_context()
# Call to finalize(...): (line 220)
# Processing the call arguments (line 220)
# Getting the type of 'wrapper' (line 220)
wrapper_273348 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 220, 24), 'wrapper', False)
# Getting the type of 'new_doc' (line 220)
new_doc_273349 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 220, 33), 'new_doc', False)
# Processing the call keyword arguments (line 220)
kwargs_273350 = {}
# Getting the type of 'finalize' (line 220)
finalize_273347 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 220, 15), 'finalize', False)
# Calling finalize(args, kwargs) (line 220)
finalize_call_result_273351 = invoke(stypy.reporting.localization.Localization(__file__, 220, 15), finalize_273347, *[wrapper_273348, new_doc_273349], **kwargs_273350)
# Assigning a type to the variable 'stypy_return_type' (line 220)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 220, 8), 'stypy_return_type', finalize_call_result_273351)
# ################# End of 'deprecate(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'deprecate' in the type store
# Getting the type of 'stypy_return_type' (line 162)
stypy_return_type_273352 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 162, 4), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273352)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'deprecate'
return stypy_return_type_273352
# Assigning a type to the variable 'deprecate' (line 162)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 162, 4), 'deprecate', deprecate)
# Getting the type of 'deprecate' (line 222)
deprecate_273353 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 222, 11), 'deprecate')
# Assigning a type to the variable 'stypy_return_type' (line 222)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 222, 4), 'stypy_return_type', deprecate_273353)
# ################# End of 'deprecated(...)' code ##################
# Teardown call information
teardown_call_information(localization, arguments)
# Storing the return type of function 'deprecated' in the type store
# Getting the type of 'stypy_return_type' (line 109)
stypy_return_type_273354 = module_type_store.get_type_of(stypy.reporting.localization.Localization(__file__, 109, 0), 'stypy_return_type')
module_type_store.store_return_type_of_current_context(stypy_return_type_273354)
# Destroy the current context
module_type_store = module_type_store.close_function_context()
# Return type of the function 'deprecated'
return stypy_return_type_273354
# Assigning a type to the variable 'deprecated' (line 109)
module_type_store.set_type_of(stypy.reporting.localization.Localization(__file__, 109, 0), 'deprecated', deprecated)
# ################# End of the type inference program ##################
module_errors = stypy.errors.type_error.StypyTypeError.get_error_msgs()
module_warnings = stypy.errors.type_warning.TypeWarning.get_warning_msgs()
|
988,942 | 765e7c4b81a598a413322c1f030fbc14180e2a3e | import nemaktis as nm
import numpy as np
import os.path
if os.path.isfile("optical_fields.vti"):
# If the optical field were already calculated and exported by this
# script, we directly load them
output_fields = nm.OpticalFields(vti_file="optical_fields.vti")
else:
# Else, we need to load a director field and propagate fields
# through it. We use a simple ansatz for a double twist droplet.
q = 2*np.pi/20
def nx(x,y,z):
r = np.sqrt(x**2+y**2)
return -q*y*np.sinc(q*r)
def ny(x,y,z):
r = np.sqrt(x**2+y**2)
return q*x*np.sinc(q*r)
def nz(x,y,z):
r = np.sqrt(x**2+y**2)
return np.cos(q*r)
nfield = nm.DirectorField(
mesh_lengths=(10, 10, 10), mesh_dimensions=(80, 80, 80))
nfield.init_from_funcs(nx,ny,nz)
nfield.normalize()
nfield.rotate_90deg("x")
nfield.extend(2,2)
nfield.set_mask(mask_type="droplet")
# The LCMaterial object contains the details of the materials
# of the LC sample: LC layer + possible isotropic layers above it
# (a glass plate for example). We also assume an index-matched objective
# by setting nout to 1.51
mat = nm.LCMaterial(
lc_field=nfield,
ne="1.6933+0.0078/lambda^2+0.0028/lambda^4",
no="1.4990+0.0072/lambda^2+0.0003/lambda^4",
nhost=1.55, nin=1.51, nout=1.51)
mat.add_isotropic_layer(nlayer=1.51, thickness=1000)
# Since we assumed an index-matched objective, we can set NA above 1
wavelengths = np.linspace(0.4, 0.8, 11)
sim = nm.LightPropagator(
material=mat, wavelengths=wavelengths, max_NA_objective=1.1)
output_fields = sim.propagate_fields(method="bpm")
# We save the optical fields in a vti file
output_fields.save_to_vti("optical_fields")
# Finally, the optical fields are visualized as in a real microscope
viewer = nm.FieldViewer(output_fields)
viewer.plot() |
988,943 | 3b187a83070a05fac3c0728ddc31d3086eaf8c22 | #!/usr/bin/env python
import time
import threading
import RPi.GPIO as gpio
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
trig = 14
echo = 2
gpio.setup(trig, gpio.OUT)
gpio.setup(echo, gpio.IN)
gpio.output(trig, False)
print "Waiting..."
time.sleep(2)
gpio.output(trig, 1)
time.sleep(0.00001)
gpio.output(trig, 0)
while gpio.input(echo) == 0:
pulse_start = time.time()
while gpio.input(echo) == 1:
pulse_end = time.time()
duration = pulse_end - pulse_start
distance = duration * 17150
distance = round(distance, 2)
print "distance: %s cm" % distance
gpio.cleanup()
|
988,944 | a3e2d319e3d78b96bfeb348e2b5d6df4da44c7ee | import sys
import numpy as np
import random
def create_sample(path, num):
t_list = []
file = open(path, 'w')
for i in range(num):
a = random.randint(1, 1000)
t_list.append(a)
count = 0
for j in range(2, (int)(a / 2)):
if a % j == 0:
count = count + 1
result = 0
if(count == 0) and (a != 1):
result = 1
else:
result = 0
t_list.append(result)
file.writelines(str(t_list) + '\n')
t_list.clear()
if(len(sys.argv) > 1):
pathname = sys.argv[1]
totalsample = sys.argv[2]
print("Create file: " + str(pathname))
print("total size: " + str(totalsample))
create_sample(sys.argv[1], int(sys.argv[2]))
print("Done") |
988,945 | 57b8e9671a065c24836b7d4096f10f6aa1bd42ab | from __future__ import unicode_literals
from django.db import models
class Newsletter(models.Model):
txt = models.CharField(max_length=30)
status = models.IntegerField()
def __str__(self):
return self.txt
|
988,946 | 51bfadfe0a9ebe3974e4b87f10e6b4fa00351ec2 | import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import os
from wordcloud import WordCloud, STOPWORDS
from collections import defaultdict
from nltk.corpus import stopwords
from collections import defaultdict
import string
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
import eli5
#print os.listdir('./Quora-Insincere-Questions-Classifica
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
print('Train data: \nRows:{} \ncolumns: {}' .format(train.shape[0],train.shape[1]) )
print(train.columns)
print('Test data: \nRows:{} \ncolumns: {}' .format(test.shape[0],test.shape[1]) )
print(test.columns)
#check for the number of positive and negative classes
pd.crosstab(index=train.target,columns='count')
#col_0 count
#target
#0 1225312
#1 80810
#Define the word cloud function with a max of 200 words
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0,16.0),
title = None, title_size=40, image_color=False):
stopwords = set(STOPWORDS)
#define additional stop words that are not contained in the dictionary
more_stopwords = {'one', 'br', 'Po', 'th', 'sayi', 'fo', 'Unknown'}
stopwords = stopwords.union(more_stopwords)
#Generate the word cloud
wordcloud = WordCloud(background_color='black',
stopwords = stopwords,
max_words = max_words,
max_font_size = max_font_size,
random_state = 42,
width=800,
height=400,
mask = mask)
wordcloud.generate(str(text))
#set the plot parameters
plt.figure(figsize=figure_size)
if image_color:
image_colors = ImageColorGenerator(mask);
plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear");
plt.title(title, fontdict={'size': title_size,
'verticalalignment': 'bottom'})
else:
plt.imshow(wordcloud);
plt.title(title, fontdict={'size': title_size, 'color': 'black',
'verticalalignment': 'bottom'})
plt.axis('off');
plt.tight_layout()
# Select insincere questions from training set
insincere = train.loc[train['target']==1]
plot_wordcloud(insincere['question_text'],title='Word Cloud of Insincere Questions')
#Select sincere questions from training dataset
sincere = train.loc[train['target']==0]
plot_wordcloud(sincere['question_text'],title='Word Cloud of sincere Questions')
#side by side plot comparison using N-gram
def ngram_extractor(text, n_gram):
token = [token for token in text.lower().split(" ") if token != "" if token not in STOPWORDS]
ngrams = zip(*[token[i:] for i in range(n_gram)])
return [" ".join(ngram) for ngram in ngrams]
# Function to generate a dataframe with n_gram and top max_row frequencies
def generate_ngrams(df, col, n_gram, max_row):
temp_dict = defaultdict(int)
for question in df[col]:
for word in ngram_extractor(question, n_gram):
temp_dict[word] += 1
temp_df = pd.DataFrame(sorted(temp_dict.items(), key=lambda x: x[1])[::-1]).head(max_row)
temp_df.columns = ["word", "wordcount"]
return temp_df
#Function to construct side by side comparison plots
def comparison_plot(df_1,df_2,col_1,col_2, space):
fig, ax = plt.subplots(1, 2, figsize=(20,10))
sns.barplot(x=col_2, y=col_1, data=df_1, ax=ax[0], color="royalblue")
sns.barplot(x=col_2, y=col_1, data=df_2, ax=ax[1], color="royalblue")
ax[0].set_xlabel('Word count', size=14)
ax[0].set_ylabel('Words', size=14)
ax[0].set_title('Top words in sincere questions', size=18)
ax[1].set_xlabel('Word count', size=14)
ax[1].set_ylabel('Words', size=14)
ax[1].set_title('Top words in insincere questions', size=18)
fig.subplots_adjust(wspace=space)
plt.show()
#Obtain sincere and insincere ngram based on 1 gram (top 20)
sincere_1gram = generate_ngrtrain["question_text"]ams(train[train["target"]==0], 'question_text', 1, 20)
insincere_1gram = generate_ngrams(train[train["target"]==1], 'question_text', 1, 20)
#compare the bar plots
comparison_plot(sincere_1gram,insincere_1gram,'word','wordcount', 0.25)
#Obtain sincere and insincere ngram based on 2 gram (top 20)
sincere_1gram = generate_ngrams(train[train["target"]==0], 'question_text', 2, 20)
insincere_1gram = generate_ngrams(train[train["target"]==1], 'question_text', 2, 20)
#compare the bar plots
comparison_plot(sincere_1gram,insincere_1gram,'word','wordcount', 0.25)
#Obtain sincere and insincere ngram based on 3 gram (top 20)
sincere_1gram = generate_ngrams(train[train["target"]==0], 'question_text', 3, 20)
insincere_1gram = generate_ngrams(train[train["target"]==1], 'question_text', 3, 20)
#compare the bar plots
comparison_plot(sincere_1gram,insincere_1gram,'word','wordcount', 0.25)
# Number of words in the questions
#Insincere questions have more words per question
train['word_count']= train["question_text"].apply(lambda x:len(str(x).split()))
test['word_count']= test["question_text"].apply(lambda x:len(str(x).split()))
fig, ax = plt.subplots(figsize=(15,2))
sns.boxplot(x='word_count',y='target',data=train,ax=ax,palette=sns.color_palette("RdYlGn_r", 10),orient='h')
ax.set_xlabel('Word Count', size=10, color="#0D47A1")
ax.set_ylabel('Target', size=10, color="#0D47A1")
ax.set_title('[Horizontal Box Plot] Word Count distribution', size=12, color="#0D47A1")
plt.gca().xaxis.grid(True)
plt.show()
# Number of characters in the questions
# Insincere questions have more characters than sincere questions
train["char_length"] = train["question_text"].apply(lambda x: len(str(x)))
test["char_length"] = test["question_text"].apply(lambda x: len(str(x)))
fig, ax = plt.subplots(figsize=(15,2))
sns.boxplot(x="char_length", y="target", data=train, ax=ax, palette=sns.color_palette("RdYlGn_r", 10), orient='h')
ax.set_xlabel('Character Length', size=10, color="#0D47A1")
ax.set_ylabel('Target', size=10, color="#0D47A1")
ax.set_title('[Horizontal Box Plot] Character Length distribution', size=12, color="#0D47A1")
plt.gca().xaxis.grid(True)
plt.show()
# Number of stop words in the questions
# Insincere questions have more stop words than sincere questions
train["stop_words_count"] = train["question_text"].apply(lambda x:len([ w for w in str(x).lower().split() if w in STOPWORDS ]))
test["stop_words_count"] = test["question_text"].apply(lambda x:len([ w for w in str(x).lower().split() if w in STOPWORDS ]))
fig, ax = plt.subplots(figsize=(15,2))
sns.boxplot(x="stop_words_count", y="target", data=train, ax=ax, palette=sns.color_palette("RdYlGn_r", 10), orient='h')
ax.set_xlabel('Number of stop words', size=10, color="#0D47A1")
ax.set_ylabel('Target', size=10, color="#0D47A1")
ax.set_title('[Horizontal Box Plot] Number of Stop Words distribution', size=12, color="#0D47A1")
plt.gca().xaxis.grid(True)
plt.show()
# Mean word length in the questions
train["word_length"] = train["question_text"].apply(lambda x: np.mean([len(w) for w in str(x).split()]))
test["word_length"] = test["question_text"].apply(lambda x: np.mean([len(w) for w in str(x).split()]))
fig, ax = plt.subplots(figsize=(15,2))
sns.boxplot(x="word_length", y="target", data=train[train['word_length']<train['word_length'].quantile(.99)], ax=ax, palette=sns.color_palette("RdYlGn_r", 10), orient='h')
ax.set_xlabel('Mean word length', size=10, color="#0D47A1")
ax.set_ylabel('Target', size=10, color="#0D47A1")
ax.set_title('[Horizontal Box Plot] Distribution of mean word length', size=12, color="#0D47A1")
plt.gca().xaxis.grid(True)
plt.show()
# Get the tfidf vectors
tfidf_vec = TfidfVectorizer(stop_words='english',ngram_range=(1,3))
tfidf_vec.fit_transform(train['question_text'].values.tolist() + test['question_text'].values.tolist())
train_tfidf= tfidf_vec.transform(train['question_text'].values.tolist())
test_tfidf = tfidf_vec.transform(test['question_text'].values.tolist())
y_train = train["target"].values
x_train = train_tfidf
x_test = test_tfidf
model = linear_model.LogisticRegression(C=5., solver='sag')
model.fit(x_train, y_train)
y_test = model.predict_proba(x_test)[:,1]
eli5.show_weights(model, vec=tfidf_vec, top=100, feature_filter=lambda x: x != '<BIAS>')
|
988,947 | e541126105ee4b869050a207706449cb8dfe8bb2 | from airhockey import AirHockeySim
try:
import Tkinter as tk # Python 2
except:
import tkinter as tk # Python 3
'''
Used to verify tkinter works properly
'''
def test_gui():
root = tk.Tk()
canvas = tk.Canvas(root,width=400,height=400)
canvas.pack()
circle = canvas.create_oval(50,50,80,80,outline="white",fill="blue")
def redraw():
canvas.after(100,redraw)
canvas.move(circle,2,2)
canvas.after(100,redraw)
root.mainloop()
'''
Used to verify physics engine working properly
'''
def test_airhockey():
sim = AirHockeySim()
# Canvas Boilerplate
width, height = sim._rink_dim
BORDER = 25
root = tk.Tk()
canvas = tk.Canvas(root, width=width+2*BORDER, height=height+2*BORDER)
canvas.pack()
# Draw Rink
rect = canvas.create_rectangle(BORDER, BORDER, width+BORDER, height+BORDER)
g1 = canvas.create_line(BORDER+width/2-sim._net_width/2, BORDER, BORDER+width/2+sim._net_width/2, BORDER, fill='red', width='5')
g2 = canvas.create_line(BORDER+width/2-sim._net_width/2, BORDER+height, BORDER+width/2+sim._net_width/2, BORDER+height, fill='red', width='5')
#Draw Puck and Players
p1 = canvas.create_oval(sim._p1.get_corner_pos(shift=BORDER))
p2 = canvas.create_oval(sim._p2.get_corner_pos(shift=BORDER))
puck = canvas.create_oval(sim._puck.get_corner_pos(shift=BORDER))
def redraw():
canvas.after(1,redraw) # wait time (animation speed)
sim.update()
canvas.coords(p1, sim._p1.get_corner_pos(shift=BORDER))
canvas.coords(p2, sim._p2.get_corner_pos(shift=BORDER))
canvas.coords(puck, sim._puck.get_corner_pos(shift=BORDER))
redraw()
root.mainloop()
if __name__ == '__main__':
ah = AirHockeySim()
#test_gui()
test_airhockey()
|
988,948 | 4bc735748322aa3a930f812ee6095a1dcb4c58c3 | from re import match
import pytest
from hyp3lib import GranuleError
from hyp3_insar_gamma import ifm_sentinel
def test_get_copol():
assert ifm_sentinel.get_copol('S1B_WV_SLC__1SSV_20200923T184541_20200923T185150_023506_02CA71_AABB') == 'vv'
assert ifm_sentinel.get_copol('S1B_IW_GRDH_1SDV_20200924T092954_20200924T093026_023515_02CABC_6C62') == 'vv'
assert ifm_sentinel.get_copol('S1B_IW_GRDH_1SSH_20200924T112903_20200924T112932_023516_02CAC7_D003') == 'hh'
assert ifm_sentinel.get_copol('S1B_IW_OCN__2SDH_20200924T090450_20200924T090519_023515_02CAB8_917B') == 'hh'
with pytest.raises(GranuleError):
ifm_sentinel.get_copol('S1A_EW_GRDM_1SHH_20150513T080355_20150513T080455_005900_007994_35D2')
with pytest.raises(GranuleError):
ifm_sentinel.get_copol('S1A_EW_GRDM_1SHV_20150509T230833_20150509T230912_005851_00787D_3BE5')
with pytest.raises(GranuleError):
ifm_sentinel.get_copol('S1A_IW_SLC__1SVH_20150706T015744_20150706T015814_006684_008EF7_9B69')
with pytest.raises(GranuleError):
ifm_sentinel.get_copol('S1A_IW_GRDH_1SVV_20150706T015720_20150706T015749_006684_008EF7_54BA')
def test_least_precise_orbit_of():
precise = 'S1A_OPER_AUX_POEORB_OPOD_20160616T121500_V20160526T225943_20160528T005943'
restituted = 'S1B_OPER_AUX_RESORB_OPOD_20200907T115242_V20200906T042511_20200906T074241'
predicted = None
assert ifm_sentinel.least_precise_orbit_of([precise, precise]) == 'P'
assert ifm_sentinel.least_precise_orbit_of([precise, restituted]) == 'R'
assert ifm_sentinel.least_precise_orbit_of([precise, predicted]) == 'O'
assert ifm_sentinel.least_precise_orbit_of([restituted, restituted]) == 'R'
assert ifm_sentinel.least_precise_orbit_of([restituted, predicted]) == 'O'
assert ifm_sentinel.least_precise_orbit_of([predicted, predicted]) == 'O'
def test_get_product_name():
payload = {
'reference_name': 'S1A_IW_SLC__1SSV_20160527T014319_20160527T014346_011438_011694_26B0',
'secondary_name': 'S1A_IW_SLC__1SSV_20160714T014322_20160714T014349_012138_012CE7_96A0',
'orbit_files': [
'S1A_OPER_AUX_POEORB_OPOD_20160616T121500_V20160526T225943_20160528T005943.EOF',
'S1A_OPER_AUX_POEORB_OPOD_20160616T121500_V20160526T225943_20160528T005943.EOF',
],
'pixel_spacing': 80,
}
name = ifm_sentinel.get_product_name(**payload)
assert match(r'S1AA_20160527T014319_20160714T014322_VVP049_INT80_G_ueF_[0-9A-F]{4}$', name)
payload = {
'reference_name': 'S1B_IW_SLC__1SDH_20200918T073646_20200918T073716_023426_02C7FC_6374',
'secondary_name': 'S1A_IW_SLC__1SDH_20200906T073646_20200906T073716_023251_02C278_AE75',
'orbit_files': [
'S1B_OPER_AUX_RESORB_OPOD_20200907T115242_V20200906T042511_20200906T074241.EOF',
'S1A_OPER_AUX_POEORB_OPOD_20160616T121500_V20160526T225943_20160528T005943.EOF',
],
'pixel_spacing': 40
}
name = ifm_sentinel.get_product_name(**payload)
assert match(r'S1BA_20200918T073646_20200906T073646_HHR012_INT40_G_ueF_[0-9A-F]{4}$', name)
payload = {
'reference_name': 'S1A_IW_SLC__1SSV_20150101T230038_20150101T230114_003984_004CC1_0481',
'secondary_name': 'S1B_IW_SLC__1SDV_20200924T005722_20200924T005750_023510_02CA91_4873',
'orbit_files': [
'S1B_OPER_AUX_RESORB_OPOD_20200907T115242_V20200906T042511_20200906T074241.EOF',
None,
],
'pixel_spacing': 40
}
name = ifm_sentinel.get_product_name(**payload)
assert match(r'S1AB_20150101T230038_20200924T005722_VVO2093_INT40_G_ueF_[0-9A-F]{4}$', name)
|
988,949 | 14694ce17b4d6f3e7da5482d28560d91a67ddaf0 | from collections import Counter
import fst
from hsst.decoding.OpenFST import OpenFST
from hsst.decoding import helpers
class AlignmentOpenFST(OpenFST):
def __init__(self, wmap_filename, lm_vcb_filename, rule_id_offset=0):
"""
Initialize AlignmentOpenFST object
:param wmap_filename: Path to the word map
:param lm_vcb_filename: Path to the LM vocabulary
:param rule_id_offset: Starting rule id for HSST rules
:return:
"""
super(AlignmentOpenFST, self).__init__(wmap_filename, lm_vcb_filename)
self.rule_id_offset = rule_id_offset
def fst_tostring(self, fst_1, idx=False):
"""
Construct a string describing the FST.
:param fst_1: Input FST object
:param idx: Whether to not map labels using word map
:return: String representation of the FST
"""
fst_string = 'Transducer\n'
for state in fst_1.states:
for arc in state.arcs:
olabel = self.wmap[arc.olabel].encode('utf-8') if not idx and arc.olabel in self.wmap else arc.olabel
fst_string += '{} -> {} / {} : {} / {}\n'.format(state.stateid, arc.nextstate, arc.ilabel, olabel,
float(arc.weight))
if state.final:
fst_string += '%s / %s\n' % (state.stateid, state.final)
fst_string += '-------\n'
return fst_string
@staticmethod
def create_empty_fst():
empty_fst = fst.Transducer()
empty_fst.add_arc(0, 1, 0, 0)
empty_fst[1].final = True
return empty_fst
@staticmethod
def create_root_fst(label, int_coverage_cells):
"""
Create a root FST consisting of a single (nonterminal) transition
:param label: Nonterminal transition label
:param int_coverage_cells: Dictionary of integer coverages and associated FSTs
:return: Root FST
"""
root_fst = fst.Transducer(isyms=fst.SymbolTable(), osyms=fst.SymbolTable())
root_fst.osyms[label] = int(label)
# Adding epsilon input label using symbol table lookup for id=0
root_fst.add_arc(0, 1, root_fst.isyms.find(0), label)
root_fst[1].final = True
# Create root FST symbol table
for int_coverage, cell_fst in int_coverage_cells.items():
root_fst.osyms[int_coverage] = int(int_coverage)
return root_fst
def create_rule_fst(self, rule, feature_weights_dict):
"""
Create rule FST accepting the sequence of target side tokens.
:param rule: Rule object
:param feature_weights_dict: Dictionary of feature names and their weights
:return: Rule FST
"""
# Determine whether to use word insertion penalty
if 'word_insertion_penalty' in feature_weights_dict and not rule.hiero_intersection_rule:
wip = feature_weights_dict['word_insertion_penalty']
else:
wip = None
# Offset rule_id by rule_id_offset to prevent clashes with Hiero rule id space
rule_id = rule.id
if not rule.hiero_intersection_rule:
rule_id += self.rule_id_offset
rule_fst = fst.Transducer()
# Insert rule arc at the start of the transducer (rule_id:epsilon)
rule_fst.add_arc(0, 1, int(rule_id), 0)
# Add arcs representing target tokens one after the other
# Index is adjusted to account for rule arc
index = 1
for index, token in enumerate(rule.target_side, 1):
self.add_arc(rule_fst, index, token, rule.nonterminal_coverages, weight=wip)
# Compute rule weight in a log linear model
rule_weight = helpers.loglinear_rule_weight(rule.feature_dict, feature_weights_dict)
# Add the rule weight to the final state in the FST
rule_fst[index + 1].final = rule_weight
if rule.hiero_intersection_rule:
print rule_weight
print self.fst_tostring(rule_fst)
return rule_fst
@staticmethod
def add_arc(rule_fst, index, token, nonterminal_coverages, weight=None):
"""
Add an arc to rule FST
:param rule_fst: Rule FST being built
:param index: Token index
:param token: Token
:param nonterminal_coverages: Dictionary of nonterminal symbols mapped to their bit coverages
:param weight: Arc weight if specified (e.g. word insertion penalty)
:return:
"""
# Add arc of the form epsilon:token
# Nonterminal symbol
if token in nonterminal_coverages:
rule_fst.add_arc(index, index + 1, 0, int(nonterminal_coverages[token]))
elif int(token) == OpenFST.DR:
rule_fst.add_arc(index, index + 1, 0, OpenFST.DR)
elif weight is None:
rule_fst.add_arc(index, index + 1, 0, int(token))
# Terminal symbol
else:
rule_fst.add_arc(index, index + 1, 0, int(token), weight=-weight)
@staticmethod
def add_start_and_end_of_sentence_symbols(fst_1):
"""
Concatenate start (beginning) and end (end) of sentence symbols to the FST.
:param fst_1: FST object
:return: FST with prepended start of sentence symbol and appended end of sentence symbol.
"""
# Create start of sentence FSA
# 1 is start of sentence label
start_of_sentence = fst.Transducer()
start_of_sentence.add_arc(0, 1, 0, 1)
start_of_sentence[1].final = True
# Create end of sentence FSA
# 2 is end of sentence label
end_of_sentence = fst.Transducer()
end_of_sentence.add_arc(0, 1, 0, 2)
end_of_sentence[1].final = True
# Modify start_of_sentence by concatenating fst_1
start_of_sentence.concatenate(fst_1)
# Modify joint start_of_sentence and fst_1 by concatenating end_of_sentence
start_of_sentence.concatenate(end_of_sentence)
return start_of_sentence
@staticmethod
def count_nonterminal_arcs(fst_1):
nt_arcs = Counter()
for state in fst_1.states:
for arc in state.arcs:
label = str(arc.olabel)
if len(label) >= 10:
nt_arcs[label] += 1
return sum(nt_arcs.values()), len(nt_arcs)
|
988,950 | d99d0c703742471d898fbe069030a3d3ad701368 | #author:秦大粤
import random
import numpy as np
import matplotlib.pyplot as plt
class clusters:
def __init__(self,size=100,number=10):
self.l=size
self.n=number
self._fig=[[0]*self.l for i in range(self.l)]
def growth(self):
#initialization
self._fig[int(self.l/2)][int(self.l/2)]=1
counter=0
while counter<self.n:
#随机产生粒子
tempx=random.randint(0,self.l-1)
tempy=random.randint(0,self.l-1)
if tempx==0:
continue
elif tempx==self.l-1:
continue
elif tempy==0:
continue
elif tempy==self.l-1:
continue
self._fig[tempx][tempy]=1
if self._fig[tempx-1][tempy]==1:
continue
elif self._fig[tempx+1][tempy]==1:
continue
elif self._fig[tempx][tempy-1]==1:
continue
elif self._fig[tempx][tempy+1]==1:
continue
#walk
while(1):
self._fig[tempx][tempy]=0
temp=random.random()
if temp<0.25:
tempx-=1
elif 0.25<temp<0.5:
tempy-=1
elif 0.5<temp<0.75:
tempx+=1
elif temp>0.75:
tempy+=1
self._fig[tempx][tempy]=1
if tempx==0:
self._fig[tempx][tempy]=0
break
elif tempx==self.l-1:
self._fig[tempx][tempy]=0
break
elif tempy==0:
self._fig[tempx][tempy]=0
break
elif tempy==self.l-1:
self._fig[tempx][tempy]=0
break
if self._fig[tempx-1][tempy]==1:
counter+=1
break
elif self._fig[tempx+1][tempy]==1:
counter+=1
break
elif self._fig[tempx][tempy-1]==1:
counter+=1
break
elif self._fig[tempx][tempy+1]==1:
counter+=1
break
def show(self):
for i in range(self.l):
for j in range(self.l):
if self._fig[i][j]==1:
plt.plot(i,j,'g.')
plt.xlim(0,self.l)
plt.ylim(0,self.l)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.title('DLA cluster,number of particles=%.f'%self.n)
a=clusters()
a.growth()
a.show()
|
988,951 | e371001d76cd0aef56efa63be3ef7b7c409d18ac | #
# File: database_operations.py
#
# Author: Andreas Skielboe (skielboe@gmail.com)
# Date: August 2012
#
# Summary of File:
#
# Functions that connect to and modify the database at the lowest levels in SQLalchemy
#
import settings as s
def create_session():
#--------------------------------------------------------------------------------
# Define database
#--------------------------------------------------------------------------------
from sqlalchemy import create_engine
engine = create_engine('sqlite:///'+s.dbPath)
#--------------------------------------------------------------------------------
# Create a session to start talking to the database
#--------------------------------------------------------------------------------
from sqlalchemy.orm import sessionmaker
# Since the engine is already created we can bind to it immediately
Session = sessionmaker(bind=engine)
return Session()
|
988,952 | 27dceb2dfed29c17673a7bee1d510981159258a4 |
def metodo():
print("")
print("Método executado!")
def main():
metodo()
# Executa o programa (O programa começa aqui)
if __name__ == "__main__":
main()
|
988,953 | 5f1e7e6a6e60f3352055d384a916d42ed410132d | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:chunxiusun
import requests,unittest,pexpect,re,json,random,time,os
IP = "192.168.2.16"
PORT = "8900"
TYPE = "mock"
mock_config = "mock_1.json"
front_port = 1111
def creat_instance():
print "##creat instance##"
url = "http://%s:%s/olympus/v1/instance?type=%s"%(IP,PORT,TYPE)
config = open(mock_config,'r').read()
pre = [{"pre_fetch_cmd": "echo hello framework!!"}]
data = {}
data["config_json"] = config
data["pre_executor"] = json.dumps(pre)
r = requests.post(url,data=data)
print r.status_code
print r.content
print r.elapsed.microseconds
resp = json.loads(r.content)
instance_id = resp["Data"]["instance_id"]
#get instance
state = ""
while state!= "RUNNING":
url = "http://%s:%s/olympus/v1/instance?iid=%s"%(IP,PORT,instance_id)
r1 = requests.get(url)
resp1 = json.loads(r1.content)
code1 = resp1["Code"]
state = resp1["Data"]["State"]
return instance_id
def creat_group():
print "##creat group##"
url = "http://%s:%s/olympus/v1/group"%(IP,PORT)
data = {}
data["frontend"] = front_port
r = requests.post(url,data=data)
print r.status_code
print r.content
print r.elapsed.microseconds
resp = json.loads(r.content)
code = resp["Code"]
if code == 0:
group_id = resp["Data"]["group_id"]
else:
group_id = ""
return group_id
def group_add_instance(gid,iid):
print "##group add instance##"
url = "http://%s:%s/olympus/v1/group/add/instance?gid=%s"%(IP,PORT,gid)
data = {}
data["iid"] = iid
data["backend"] = "%s:%s"%(IP,front_port)
r = requests.post(url,data=data)
print r.status_code
print r.content
print r.elapsed.microseconds
def delete_instance(iid):
print "##delete instance##"
url = "http://%s:%s/olympus/v1/instance/delete?iid=%s"%(IP,PORT,iid)
r = requests.post(url)
print r.status_code
print r.content
print r.elapsed.microseconds
resp_code = 0
while resp_code != 400:
url = "http://%s:%s/olympus/v1/instance?iid=%s"%(IP,PORT,iid)
r1 = requests.get(url)
resp_code = r1.status_code
def delete_group(gid):
print "##delete group##"
url = "http://%s:%s/olympus/v1/group/delete?gid=%s"%(IP,PORT,gid)
r = requests.post(url)
print r.status_code
print r.content
print r.elapsed.microseconds
resp_code = 0
while resp_code != 500:
url = "http://%s:%s/olympus/v1/group?gid=%s"%(IP,PORT,gid)
r1 = requests.get(url)
print r1.status_code
resp_code = r1.status_code
def run():
instance_id = creat_instance()
#group_id = creat_group()
#group_add_instance(group_id,instance_id)
#delete_instance(instance_id)
#delete_group(group_id)
if __name__ == '__main__':
run()
|
988,954 | d5243a833c752667f09fe9befd6076893cc21c7f | import pygame, sys, pymunk
SCREEN_SIZE = 600,600
class Screen:
def __init__(self):
self.name = "SCREEN"
self.running = True
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.screen_color = 255,255,255
pygame.display.set_caption(self.name)
def quit_event(self, events ):
# anticipate a quit event
for ev in events:
if ev.type == pygame.QUIT:
pygame.quit()
self.running = False
sys.exit()
def display(self):
ev = pygame.event.get()
keys = pygame.key.get_pressed()
self.quit_event(ev)
def run(self):
while self.running:
self.screen = pygame.display.set_mode(SCREEN_SIZE)
self.screen.fill(self.screen_color)
pygame.display.update()
Screen().run() |
988,955 | c8d1d49d2aedc21ea5a198235029433a7e707480 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
from os import listdir
from os.path import isfile, join
from py2neo import Graph, Node, Relationship, authenticate
from Fiche import Fiche
from Source import Source
def analyseFiche(nom_fichier, dossier, graph):
with open(join(dossier, nom_fichier)) as f:
content = f.readlines()
content = [x.strip('\n') for x in content]
fiche_contenu_list = []
fiche_id = ''
fiche_titre = ''
fiche_contenu = ''
fiche_date = ''
fiche_auteur = ''
fiche_references = ''
for line in content:
if line.startswith('fiche n°'):
fiche_id = line.strip('fiche n°').strip()
elif line.startswith('titre:'):
fiche_titre = line.strip('titre:').strip()
elif line.startswith('Références associées:'):
fiche_references = line.strip('Références associées:').strip().split(' ')
elif line.startswith('auteur:'):
fiche_auteur = line.strip('auteur:').strip()
elif line.startswith('date:'):
fiche_date = line.strip('date:').strip()
else:
if line != '\n':
fiche_contenu_list.append(line)
fiche_contenu = '\n'.join(fiche_contenu_list)
fiche = Fiche(fiche_id, fiche_titre, fiche_auteur,
fiche_contenu, fiche_date, fiche_references)
fiche.create_node(graph)
return fiche
def ficheDocumentation(fiche, type_doc, dossier, nom_fichier, graph):
with open(join(dossier, nom_fichier)) as ref_file:
content = ref_file.readlines()
content = [x.strip('\n') for x in content]
for line in content:
if (type_doc == "references"):
ref_id, _, ref_leg = line.partition('@')
if ref_id in fiche.get_references():
reference_trouvee = Source('Reference', ref_leg)
reference_trouvee.create_source(graph)
fiche.create_doc(graph, reference_trouvee, '')
elif (type_doc == "images"):
infos = line.split('@')
fiche_id = infos[0]
filename = infos[1]
legende = infos[2]
if fiche_id == fiche.get_tmp_id():
reference_trouvee = Source('Image', legende, filename)
reference_trouvee.create_source(graph)
fiche.create_doc(graph, reference_trouvee, '')
def main(project_directory, ignore_files):
authenticate("localhost:7474", "neo4j", "haruspex")
graph_db = Graph()
dossier = os.path.join(project_directory + "/pages")
if os.path.exists(dossier):
# Pour chaque fiche, analyser son contenu
# et créer les noeuds/liens correspondants
files = [f for f in listdir(dossier) if isfile(join(dossier, f))]
for fiche in files:
if (fiche not in ignore_files):
fiche_analysee = analyseFiche(fiche, dossier, graph_db)
ficheDocumentation(fiche_analysee, "references", dossier,
ignore_files[0], graph_db)
ficheDocumentation(fiche_analysee, "images", dossier,
ignore_files[1], graph_db)
else:
files = [f for f in listdir(project_directory) if (isfile(join(project_directory, f)) and f.endswith('.txt'))]
#TODO récupérer les métadonnées d'omeka sur les documents
for document in files:
print(document.strip(project_directory))
fiche = Fiche(document.replace(project_directory,'').replace('.txt', ''), '', '',
'', '', '')
fiche.create_node(graph_db)
if __name__ == "__main__":
project_directory = sys.argv[1]
ignore_files = ["references", "images"]
main(project_directory, ignore_files)
|
988,956 | 5568c16f1c72e6340980fde4c3102188b0fa5dcd | import matplotlib.pyplot as plt
import networkx as nx
class MyGraph(nx.Graph):
def Diameter(self):
self.diamlen = nx.diameter(self.component)
for somenode in self.component.nodes:
for anothernode in self.component.nodes:
if nx.shortest_path_length(self.component, somenode, anothernode) == self.diamlen:
diametern = nx.shortest_path(self.component, source = somenode, target = anothernode)
return diametern
def dfs(self, v):
self.N.append(v)
for w in self.neighbors(v):
if(self.pozn[w] == -1):
self.rebra_kist.append((w, v))
self.pozn[w] = 0
self.dfs(w)
def Kist(self):
self.pozn = dict.fromkeys(self.component.nodes, -1)
self.v = next(iter(self.component.nodes))
self.pozn[self.v] = 0
self.dfs(self.v)
def Supgraph(self):
self.rebra_kist = []
self.N = []
self.diameter_nodes = []
self.diameter_edges = []
for numberofcomponent, c in enumerate(nx.connected_components(self)):
self.component = self.subgraph(c)
self.diam = self.Diameter()
self.diameter_nodes.extend(self.diam)
self.numberofcomponent = numberofcomponent
self.Subgraph_info()
self.Output_Diameter()
self.Kist()
def Output_Diameter(self):
print(" Diameter edges:")
for i in range(len(self.diam)-1):
self.diameter_edges.append((self.diam[i], self.diam[i+1]))
print(" {}-{}".format(self.diam[i], self.diam[i+1]), end = " ")
print()
def Nodes_Edges(self):
print(" has:\n nodes:", len(self.component.nodes), "\n edges:", len(self.component.edges))
def Degrees(self):
print(" Degrees:")
for j in self.component.degree():
print(" {}:{}".format(j[0], j[1]))
def Eccentricitys(self):
print(" Eccentricity:")
for i in nx.eccentricity(self.component).items():
print(" {}:{}".format(i[0], i[1]))
def Subgraph_Radius(self):
print(" Radius of component:", nx.radius(self.component))
def Subgraph_Diameter(self):
print("Diameter of component:", self.diamlen)
def Subgraph_info(self):
print("{} component".format(self.numberofcomponent + 1), end = "")
self.Nodes_Edges()
self.Degrees()
self.Eccentricitys()
self.Subgraph_Radius()
self.Subgraph_Diameter()
color1 = 'w'
color2 = 'black'
color3 = 'b'
color4 = 'g'
color5 = 'r'
color6 = 'y'
G = nx.read_edgelist("data.txt", create_using = MyGraph(), nodetype = str)
plt.figure(1)
nx.draw(G, node_color = color1, edgecolors = color2, with_labels = True, font_color = color2)
nodes_coords = {'A' : (1, 1), 'B' : (6, 1), 'C' : (9, 2), 'D' : (7, 2), 'E' : (4, 2), 'F' : (5, 4), 'G' : (-4, 4),
'J' : (-5, 1), 'I' : (-2, 2), 'K' : (11, 4), 'L' : (13, 1), 'M' : (0, 2), 'N' : (10, 2), 'O' : (0, 4)}
plt.figure(2)
nx.draw(G, pos = nodes_coords, node_color = color1, edgecolors = color2, with_labels = True, font_color = color2)
plt.savefig("Graph2.png", format = "PNG")
G.Supgraph()
plt.figure(3)
nx.draw(G, pos = nodes_coords, node_color = color1, edgecolors = color2, with_labels = True, font_color = color2)
nx.draw_networkx_nodes(G, nodelist = G.diameter_nodes, pos = nodes_coords, node_color = color3)
nx.draw_networkx_edges(G, edgelist = G.diameter_edges, pos = nodes_coords, edge_color = color4, width = 5)
plt.savefig("Graph3.png", format = "PNG")
plt.figure(4)
nx.draw(G, pos = nodes_coords, node_color = color1, edgecolors = color2, with_labels = True, font_color = color2)
nx.draw_networkx_edges(G, edgelist = G.rebra_kist, pos = nodes_coords, edge_color = color6, width = 5)
plt.savefig("Graph4.png", format = "PNG")
plt.show(block = False)
input()
|
988,957 | 10d1b8de395bdb0f63ea0f462bc63ac44d16a879 | from datetime import datetime
import globals as g
from utils import get_videogame_type, get_week_info, get_match_cet_time
from PIL import Image, ImageFont, ImageDraw
import random
import os
import uuid
from dadjokes import Dadjoke
import json
dadjoke = Dadjoke()
def get_schedule_header_footer():
week_number, week_start, week_end = get_week_info()
sched_header = f"**{g.EMOJIS.ASTRALIS} Schedule week {week_number} for Astralis Esport {week_start.strftime('%B %-d')} - {week_end.strftime('%B %-d')} {g.EMOJIS.ASTRALIS}** \n\n"
sched_footer = f"See the full schedule here :point_down_tone2: \n<https://astralis.gg/schedule>"
return sched_header, sched_footer
def get_schedule_message_for_type(sched, type):
message = ''
type_emoji = g.EMOJIS.CSGO if type == 'csgo' or type == 'talent' else g.EMOJIS.LOL
type_header = 'Counter Strike: Global Offensive' if type == 'csgo' else 'League of Legends' if type == 'lol' else 'CS:GO - Astralis Talent'
type_channel = f'- <#{g.CHANNELS.CSGO.id}>' if type == 'csgo' else f'- <#{g.CHANNELS.LOL.id}>' if type == 'lol' else ''
message += f"{type_emoji} **__{type_header}__** {type_channel}\n\n"
if not sched:
message += f"{type_emoji} No games this week. \n"
else:
event = sched[0]['league']['name'] + ' ' + sched[0]['serie']['full_name']
message += f"**{event}** \n"
for match in sched:
scheduled = get_match_cet_time(match).strftime('%b %-d - %H:%M')
match_text = f"{type_emoji} VS {match['opponent']} on {scheduled}"
if datetime.strptime(match['scheduled_at'], '%Y-%m-%dT%H:%M:%SZ').date() < datetime.now().date():
match_text = f"~~{match_text}~~"
message += f"{match_text}\n"
return message
def get_match_starts_message(match):
type = get_videogame_type(match)
type_channel = g.CHANNELS.CSGO_LIVE.id if type == 'csgo' else g.CHANNELS.LOL_LIVE.id
type_role_id = g.ROLES.CSGO.id if type == 'csgo' else g.ROLES.LOL.id
return f"""<@&{type_role_id}>
{g.EMOJIS.ASTRALIS}We're going live against {match['opponent']}. The live discussion is now open at <#{type_channel}>. Come cheer with the other fans.{g.EMOJIS.ASTRALIS}
:point_down:
:tv: <{match['official_stream_url']}>"""
def get_match_score_update_message(match):
type = get_videogame_type(match)
if match['status'] == 'finished':
text = 'The match has ended! Click the spoiler to show the result'
else:
text = 'The score has been updated! Click the spoiler to show the result'
team_id = g.type2pandaid[type]
ast_score = next(result['score'] for result in match['results'] if result['team_id'] == team_id)
other_score = next(result['score'] for result in match['results'] if result['team_id'] != team_id)
line = f'{str(ast_score)} - {str(other_score)}'
opponent = f"{match['opponent']}".upper()
path = f"Graphics/{type}/score"
img_path = f'{path}/ahead' if ast_score > other_score else f'{path}/behind' if ast_score < other_score else f'{path}/equal'
to_path = f"temp/{str(uuid.uuid4())}.png"
pic = random.choice(os.listdir(img_path))
img = Image.open(f"{img_path}/{pic}")
draw = ImageDraw.Draw(img)
font_little = ImageFont.truetype("Fonts/RiformaLL-Bold.otf", 126)
font_big = ImageFont.truetype("Fonts/RiformaLL-Bold.otf", 200)
draw.text((40, 110), line, (255, 255, 255), font=font_big)
draw.text((40, 625), opponent, (255, 255, 255), font=font_little)
img.save(to_path)
return text, to_path
def get_match_end_text(match):
is_ast_win = match['winner_id'] == g.type2pandaid[get_videogame_type(match)]
end_text = f"The match has ended. This channel will remain open for an hour. "
if is_ast_win:
end_text += "Celebrate the victory with the other fans. "
else:
end_text += "Spend the time discussing the game with the other fans. "
end_text += "See you at the next match. "
return end_text
def get_match_graphics_text(csgo_matches, lol_matches, is_monday):
joke = dadjoke.joke
def get_string(match):
videogame_type = get_videogame_type(match)
emoji = g.EMOJIS.CSGO if videogame_type == 'csgo' else g.EMOJIS.LOL
if is_monday:
return f"{emoji} VS {match['opponent']} {match['dl_url']} \n"
else:
return f"{emoji} VS {match['opponent']} ({match['update_reason']}) {match['dl_url']} \n"
strings = "".join(get_string(match) for match in csgo_matches + lol_matches)
text = f"On a serious note, <@&{g.ROLES.COMCOORDS.id}> & <@&{g.ROLES.SOME.id}>; "
if is_monday:
text += f"Match Graphics for this weeks games are now up! Download them here:"
else:
text += f"There's an update to the match graphics for this week! Download it here:"
return f"{joke}\n\n{text}\n\n{strings}\nIf you find any issues with the match graphics, please let Nicolaj know. " |
988,958 | 43480cb0209074e63e591cf0a4266f3612893ec2 | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
pd.set_option('display.width', None)
matplotlib.style.use('ggplot')
inpath = '~/Documents/tbl_20180621_01_trg/heppy/set_EWK_QCD/sel_030/020_counts/tbl_n.process.htbin.mhtbin.min4Dphi.txt'
outpath = 'log_n.process.htbin.mhtbin.min4Dphi.png'
d = pd.read_table(inpath, delim_whitespace=True)
d['log10n'] = np.log10(d['n'])
d = d[d.mhtbin != 0]
d = d[d.mhtbin != 260]
d = d[d.mhtbin != 270]
d = d[d.mhtbin != 280]
d = d[d.mhtbin != 290]
d = d[d.mhtbin != 300]
d = d[d.htbin != 50]
d = d[d.htbin != 100]
d = d[d.htbin != 150]
d = d[d.htbin != 850]
d = d[d.htbin != 900]
g = sns.FacetGrid(d, col="mhtbin", row="htbin", hue='process', margin_titles=True, legend_out = False, sharey=False)
g.map(plt.step, 'min4Dphi', 'log10n')
g.add_legend()
plt.savefig(outpath)
|
988,959 | 243d336d508971f6fbd49b4206456b2a71e2fbf6 | import numpy as np
from scipy.linalg import sqrtm
def calculate_fid(act1, act2):
"""
Calculte FID scores between 2 activations
"""
mu1, sigma1 = act1.mean(axis=0), np.cov(act1, rowvar=False)
mu2, sigma2 = act2.mean(axis=0), np.cov(act2, rowvar=False)
# Sum squared difference of means..
ssdiff = np.sum((mu1-mu2)**2.0)
# sqrt of product between covariance matrix
covmean = sqrtm(sigma1.dot(sigma2))
if np.iscomplexobj(covmean):
covmean = covmean.real
fid = ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean)
return fid
if __name__ == "__main__":
# Test with 2 random collections of activations
act1 = np.random.random(10*2048)
act1 = act1.reshape((10, 2048))
act2 = np.random.random(10*2048)
act2 = act2.reshape((10, 2048))
# calculate fid between act1 and act1
# should return 0
fid = calculate_fid(act1, act1)
print("FID (same) :{:.3f}".format(fid))
# FID between act1 and act2
# Should return large number...
fid = calculate_fid(act1, act2)
print("FID (different) : {:.3f}".format(fid)) |
988,960 | e3c82819a0a926e9153ae1a80463d92d14b94a20 | from helper import *
if __name__ == '__main__':
#beam_width_list = [3,5,10,15,20,25]
out_file_match_rate = open('out_match_rate_with_bw/ana_match_rate.txt','w')
out_file_match_count = open('out_match_rate_with_bw/ana_match_count.txt','w')
out_file_unk_count = open('out_match_rate_with_bw/ana_unk_count.txt','w')
print >> out_file_match_rate, 'Beam Width & Beam Search & Base Line'
print >> out_file_match_count, 'Beam Width & Beam Search & Base Line'
print >> out_file_unk_count, 'Beam Width & Beam Search & Base Line'
for bw in xrange(5,55,5):
beamsearch = '../updated/updated_out.bw' + str(bw) + '.ns0.sfw0.type33.txt'
baseline = '../baseline/baseline_result_33per_' + str(bw) + 'width.txt'
beamsearch_dict = GetDataDict(beamsearch)
baseline_dict = GetDataDict(baseline)
res_bms = match_rate(beamsearch_dict)
res_bl = match_rate(baseline_dict)
'''
print result
'''
print_result(out_file_match_rate, out_file_match_count, out_file_unk_count, bw, res_bms, res_bl) |
988,961 | fcc4232f3ac7a1f4c80f282066aa206e59ce270e | import smtplib
# open a file with the list of emails.
with open("out","r") as f:
l = f.read()
l = l.split("\n")
l = [i for i in l if "@" in i]
# open the file that has the message you want to send.
with open("file.txt","r") as f:
msg = f.read()
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.ehlo()
s.login("TYPE YOUR EMAIL HERE","TYPE YOUR PASSWORD HERE")
for i in l:
print (i)
s.sendmail("TYPE YOUR EMAIL HERE",i,"Subject: TYPE THE SUBJECT HERE\n\n" + msg)
print("Done") |
988,962 | bb769c04be6f60c1af1a549b1a44593cd5ee0e5c | import sys
stdout = sys.stdout
sys.stdout = sys.stderr
from smart.common.rdf_ontology import *
sys.stdout = stdout
def strip_smart(s):
return s.replace("http://smartplatforms.org", "")
def type_start(t):
name = type_name_string(t)
description = t.description
example = t.example
print "==%s RDF==\n"%name
if len(t.parents) == 1:
print "%s is a subtype of and inherits properties from: [[#%s RDF| %s]]\n"%(type_name_string(t), type_name_string(t.parents[0]),type_name_string(t.parents[0]))
if description: print "%s"%description+"\n"
if example:
print "<pre>%s</pre>\n"%example
def properties_start(type):
print """'''%s Properties'''\n{| border="1" cellpadding="20" cellspacing="0"
|+ align="bottom" style="color:#e76700;" |''%s Predicates''
|-""" % (type, type)
def properties_row(property, name, description):
print "|%s\n|%s\n|%s\n|-"%(property,name, description)
def properties_end():
print """|}"""
def wiki_batch_start(batch):
print "\n=%s=\n"%batch
def type_name_string(t):
if t.name:
return str(t.name)
try:
return str(t.node).rsplit("#")[1]
except:
return str(t.node).rsplit("/")[1]
def wiki_payload_for_type(t):
type_start(t)
wiki_properties_for_type(t)
def wiki_properties_for_type(t):
if len(t.restrictions) == 0:
return
properties_start(t.node)
for c in sorted(t.restrictions, key=lambda r: str(r.node)):
name = c.doc.name and c.doc.name or ""
desc = c.doc.description and c.doc.description or ""
if c.on_class != None:
desc = desc + "[[#%s RDF | (details...)]]"%(type_name_string(ontology[c.on_class]))
properties_row(str(c.property), name, desc)
properties_end()
def wiki_api_for_type(t):
print "=== %s ==="%t.name
print "[[Developers_Documentation:_RDF_Data#%s_RDF | RDF Payload description]]\n"%t.name
calls_for_t = sorted(t.calls)
last_description = ""
for call in calls_for_t:
if (str(call.method) != "GET"): continue # Document only the GET calls for now!
if (str(call.description) != last_description):
print str(call.description)
print " ", strip_smart(str(call.method)), str(call.path)
if (str(call.description) != last_description):
print ""
last_description = str(call.description)
main_types = []
helper_types = []
for t in api_types:
if (t.base_path == None):
helper_types.append(t)
else:
main_types.append(t)
def type_sort_order(x):
return str(x.calls[0].category).split("_")[0].capitalize()
def call_sort_order(x):
m = {"GET" : 10, "POST":20,"PUT":30,"DELETE":40}
ret = m[x.method]
if ("items" in x.category): ret -= 1
return ret
main_types = sorted(main_types, key=lambda x: type_sort_order(x) + str(x.name))
helper_types = sorted(helper_types, key=lambda x: str(x.node))
import sys
if __name__=="__main__":
if "payload" in sys.argv:
current_batch = None
for t in main_types:
if type_sort_order(t) != current_batch:
current_batch = type_sort_order(t)
wiki_batch_start(current_batch+" Types") # e.g. "Record Items" or "Container Items"
wiki_payload_for_type(t)
wiki_batch_start("Core Data Types") # e.g. "Record Items" or "Container Items"
for t in helper_types:
wiki_payload_for_type(t)
if "api" in sys.argv:
current_batch = None
for t in main_types:
if type_sort_order(t) != current_batch:
current_batch = type_sort_order(t)
wiki_batch_start(current_batch+" Calls")
wiki_api_for_type(t)
|
988,963 | bf7b0399f94dbf15fa07223b4264643429bbe50b | from mingus.midi import fluidsynth
from mingus.containers.note import Note
from mingus.containers.note_container import NoteContainer
import time
fluidsynth.init("../assets/sound_fonts/Drama Piano.sf2", 'alsa')
# # and b with notes
#fluidsynth.play_Note(Note("A"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A##"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("B"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("Bb"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("Bbb"))
#time.sleep(0.25)
# Transpose note
#fluidsynth.play_Note(Note("A-1"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A-2"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A-3"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A-4"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A-5"))
#time.sleep(0.25)
# Multiple notes at once
#fluidsynth.play_NoteContainer(NoteContainer(["C", "E"]))
#time.sleep(0.25)
# Fur elise
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("B"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("C"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A"))
#time.sleep(1)
#
#fluidsynth.play_Note(Note("C"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("B"))
#time.sleep(1)
#
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("G#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("B"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("C"))
#time.sleep(1.0)
#
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D#"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("E"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("B"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("D"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("C"))
#time.sleep(0.25)
#fluidsynth.play_Note(Note("A"))
#time.sleep(1)
|
988,964 | 290560a4431604c984eb453860bcc4c9ddcafb6a | i = 0
# while True:
# if i == 3 or i == 7:
# i += 1
# continue
# if i == 10:
# break
# print(f"test {i}")
# i += 1
# else:
# print("hello from the else")
while i < 10:
i += 1
if i == 3 or i == 7:
continue
print(f"test {i}")
else:
print("hello from the else\n\n\n")
x = [1,2.5,3,"4","test"]
for i in x:
# if i == "4":
# break
if i == 2.5:
continue
print(i)
else:
print("hello from the else\n\n\n")
print(i)
d = {1: "one", 2: "two"}
for d_key, d_value in d.items():
print(d_key, d_value)
print("\n\n\n")
for i in range(5, 12, 2):
print(i)
print("\n\n\n")
for i, e in enumerate(x):
print(i, e)
print("\n\n\n")
x = [[1,2,3], [4,5,6], [7,8,9]]
for k, i in enumerate(x):
print(f"{k+1} row")
for j in i:
if j % 3 == 0:
continue
if j == 5:
break
print(j)
print(f"end of {k+1} row")
if True:
for i in [1,2,3,4]:
print(i) |
988,965 | 40ec4d03027f94977bbbd1f175f505fd1af38d9a | import spacy
from functools import reduce
from typing import List
nlp = spacy.load("en_core_web_sm")
def merge_sentences_min_len(text: List[str], min_len: int) -> List[str]:
"""
Combine multiple sentences to ensure
every one has at least a length of `min_len`
"""
def reducer(acc, x):
if acc and (sum(map(len, acc[-1])) < min_len):
acc[-1].append(x)
return acc
else:
return acc + [[x]]
new_text = ['. '.join(sents) for sents in reduce(reducer, text, [])]
return new_text
def merge_sentences(text: List[str], min_len: int) -> List[str]:
"""
Combine multiple sentences to ensure
every one has a minimum number of tokens
"""
def reducer(acc, x):
x = x.strip()
if acc and (len(nlp(acc[-1])) < min_len):
if acc[-1] and (acc[-1][-1]) not in ['.', ':']:
acc[-1] += '. {}'.format(x)
else:
acc[-1] += ' {}'.format(x)
return acc
else:
return acc + [x]
new_text = reduce(reducer, text, [])
return new_text
|
988,966 | 6e34b374a61411e2d73309737ddcb2227cb2cb52 | n = int(input())
s = input()
ans = ""
for i in s:
t = ord(i)
t += n
if t > 90:
t -= 26
ans += chr(t)
print(ans)
|
988,967 | 2eb65d7e981408ccd21b7225717e56dd4976bd12 | # Copyright VeHoSoft - Vertical & Horizontal Software
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Facturación Electrónica CFDI v3.3 FacturaTool',
'summary': 'Permite emitir Facturas CFDI v3.3 validas para el SAT',
'version': '12.0.1.0.1',
'category': 'Invoicing Management',
'author': 'VeHoSoft',
'website': 'http://www.vehosoft.com',
'license': 'AGPL-3',
'depends': [
'facturatool_account',
],
'data': [
'security/ir.model.access.csv',
'views/partner_views.xml',
'views/product_views.xml',
'views/account_views.xml',
'views/report_invoice.xml',
],
'qweb': [
],
'application': False,
'installable': True,
"external_dependencies": {
"python": ["zeep"],
},
}
|
988,968 | 88e274663a3e1d693a670c064769b3be6c87ffd0 | from scabbard import get_client
import json
import datetime
def test__v3_4_0_shop_altairports_flights_post():
client = get_client()
today = datetime.datetime.now()
departure1_datetime = (today + datetime.timedelta(days=10)).strftime("%Y-%m-%dT%H:%M:%S")
departure2_datetime = (today + datetime.timedelta(days=11)).strftime("%Y-%m-%dT%H:%M:%S")
j = json.loads('''{
"OTA_AirLowFareSearchRQ": {
"OriginDestinationInformation": [
{
"DepartureDateTime": "''' + departure1_datetime + '''",
"DestinationLocation": {
"LocationCode": "LAX"
},
"OriginLocation": {
"LocationCode": "DFW"
},
"RPH": "1"
},
{
"DepartureDateTime": "''' + departure2_datetime + '''",
"DestinationLocation": {
"LocationCode": "DFW"
},
"OriginLocation": {
"LocationCode": "LAX"
},
"RPH": "2"
}
],
"POS": {
"Source": [
{
"PseudoCityCode":"F9CE",
"RequestorID": {
"CompanyName": {
"Code": "TN"
},
"ID": "REQ.ID",
"Type": "0.AAA.X"
}
}
]
},
"TPA_Extensions": {
"IntelliSellTransaction": {
"RequestType": {
"Name": "50ITINS"
}
}
},
"TravelerInfoSummary": {
"AirTravelerAvail": [
{
"PassengerTypeQuantity": [
{
"Code": "ADT",
"Quantity": 1
}
]
}
]
}
}
}''')
itineraries = client.Air_Search\
.V3_4_0ShopAltairportsFlightsPost(bargainfindermaxalternateairportrequest=j,
mode='live',
limit='50',
offset=1
)\
.result()
assert '3.4.0' == itineraries['OTA_AirLowFareSearchRS']['Version']
|
988,969 | 384bbd6cbe8e81dc123e71a144e8068fe6d31c8d | # 인스턴스 메서드
# - 인스턴스 변수(self)들을 이용해 기능을 구현하는 메서드
# 클래스 메서드
# - 클래스 변수(cls)들을 활용해 기능을 구현하는 메서드
# - 메서드 바로 위에 @classmethod를 추가해서 표시한다
# 문제 1. 승객의 화물을 bus_trunk 딕셔너리에 추가하는 메서드를 만들어보세요
# 승객의 좌석번호가 Key값이 되고, value는 승객의 화물 리스트입니다.
# bus_trunk에 물건을 실었다면 승객의 cargo리스트는 텅 비어야 합니다.
class Passenger:
bus_trunk = {}
def __init__(self, name, seat, cargo):
self.name = name
self.seat = seat
self.cargo = cargo
# 일반적인 인스턴스 메서드
def information(self):
# 인스턴스 메서드에서 클래스 영역의 자원을 활용할 수 있다
return f'승객명 - {self.name} / 좌석 - {self.seat} / ' \
f'트렁크 - {Passenger.bus_trunk}'
# 사용하면 승객이 버스 트렁크에 물건을 싣는 메서드
def load(self):
Passenger.addToBusTrunk(self.seat, self.cargo)
self.cargo = list()
@classmethod
def addToBusTrunk(cls, seat, cargo):
# 클래스 영역에서는 인스턴스 영역의 자원을 활용할 수 없다
cls.bus_trunk[seat] = cargo
pass01 = Passenger('개똥이', 'A1', ['치약', '칫솔', '물', '화장품'])
pass01.load()
print(Passenger.bus_trunk) |
988,970 | 2c625343d3bd23c95d1d74345f5739368f8d39ca | name = "pywebcollect"
from pywebcollect.pywebcollect import WebCollect
__all__ = ["WebCollect"]
|
988,971 | b81d425d2cb7e60022348da4b1065c446d3f42fa | import os
import io
import re
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
return io.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', 'Arguments to pass into py.test')]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args.split(' '))
sys.exit(errno)
setup(
name='pyglotz',
version='0.1.2',
description='Python interface to the Glotz API (www.glotz.info)',
url='https://github.com/3OW/pyglotz',
author='3OW',
author_email='',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords='glotz',
packages=find_packages(),
include_package_data=True,
install_requires=['requests'],
cmdclass={'test': PyTest},
tests_require = [
'flake8>=3.7.7',
'flake8-docstrings>=1.3.0',
'flake8-import-order>=0.18',
'flake8-quotes>=1.0.0',
'pep8-naming>=0.7.0',
'pycodestyle>=2.4.0',
'pytest>=5.0.0 ; python_version >= "3.5"',
'pytest-cov>=2.6.1',
'pytest-flake8>=1.0.2'
],
)
|
988,972 | dab4ca66150263c243bc956b977191ee131a285b | import numpy as np
from random import seed
from .node import Node
class AritificalNeuralNetwork:
def __init__(self, num_inputs, num_hidden_layers, num_nodes_hidden, num_nodes_output):
self.num_inputs = num_inputs;
self.network = self.__initialize_network__(num_inputs, num_hidden_layers, num_nodes_hidden, num_nodes_output)
def __compute_weighted_sum__(self, inputs, weights, bias):
return np.sum(inputs * weights) + bias
def __node_activation__(self, weighted_sum):
return 1.0 / (1.0 + np.exp(-1 * weighted_sum))
def __initialize_network__(self, num_inputs, num_hidden_layers, num_nodes_hidden, num_nodes_output):
num_nodes_previous = num_inputs
network = {}
for layer in range(num_hidden_layers + 1):
if layer == num_hidden_layers:
layer_name = 'output'
num_nodes = num_nodes_output
else:
layer_name = 'layer_{}'.format(layer + 1)
num_nodes = num_nodes_hidden[layer]
network[layer_name] = {}
for node in range(num_nodes):
node_name = 'node_{}'.format(node+1)
network[layer_name][node_name] = Node(
np.around(np.random.uniform(size=num_nodes_previous), decimals=2),
np.around(np.random.uniform(size=1), decimals=2))
num_nodes_previous = num_nodes
return network
def generate_random_inputs(self):
np.random.seed(12)
return np.around(np.random.uniform(size=self.num_inputs), decimals=2)
def forward_propagate(self, inputs):
layer_inputs = list(inputs) # start with the input layer as the input to the first hidden layer
for layer in self.network:
layer_data = self.network[layer]
layer_outputs = []
for layer_node in layer_data:
node_data = layer_data[layer_node]
# compute the weighted sum and the output of each node at the same time
node_output = self.__node_activation__(self.__compute_weighted_sum__(layer_inputs, node_data.weights, node_data.bias))
layer_outputs.append(np.around(node_output[0], decimals=4))
if layer != 'output':
print('The outputs of the nodes in hidden layer number {} is {}'.format(layer.split('_')[1], layer_outputs))
layer_inputs = layer_outputs # set the output of this layer to be the input to next layer
network_predictions = layer_outputs
return network_predictions
|
988,973 | 734cc8035d7bc3c708936a0312e11324158cca51 | # ==ElementTreeでRSSをパースする==
from xml.etree import ElementTree
# parse()でファイルを読み込んで、ElementTreeオブジェクトを得る。
tree = ElementTree.parse('rss2.xml')
# getroot()でXMLのルート要素(この例ではRSS要素)に対応するElementオブジェクトを得る
root = tree.getroot()
# channel/item要素以下のtitle要素とlink要素の文字列を取得し、表示する。
for item in root.findall('channel/item'):
title = item.find('title').text
link = item.find('link').text
print(link, title) |
988,974 | 08a00c81184f0bd8ae89c434839713bfd1332a45 | #!/usr/bin/env python
"""
This module contains functions for fitting models to the numerically
generated average levenshtein distances between random strings.
"""
import numpy as np
import json
from importlib_resources import files
codegolf_ref = """https://codegolf.stackexchange.com/questions/197565/
can-you-calculate-the-average-levenshtein-distance-exactly/197576#197576"""
_precomputed = {20: files("expected_levenshtein.models").joinpath(
"k20_n6k_r10k_models.json")}
def load_precomputed(k: int):
"""Load precomputed models that come with the package.
The models describe polynomials of degree 5 that were fitted
to average levenshtein distances generated for a specific
alphabet size k. The values of k for which models are
available are listed in expected_levenshtein.fit._precomputed.
Args:
k (int): Alphabet size
Returns:
array_like: row indices for which models were computed
array_like: coefficients of the fitted polynomials
array_like: mean squared deviations between values predicted by the
models and the input data.
"""
k = int(k)
assert k in _precomputed, (
'The current version of expected-levenshtein',
'Does not come with models for k={}.'
'k values of available models are listed in',
'expected_levenshtein.fit._precomputed').format(k)
with open(_precomputed[k], "r") as fin:
return json.load(fin)
def poly(x, coeffs):
"""Evaluate a polynomial with the given `coeffs` at `x`.
Args:
x (array_like, float or int): x-positions at which
to evaluate the polynomial
coeffs (array_like): array of polynomial coefficients,
e.g. [c0, c1, c2] for the polynomial
c0 + c1 * x + c2 * x ** 2
Returns:
array_like: y-values for the polynomial at the given x positions
"""
return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)
def _fit_poly(y_data, deg=5):
"""Fit polynomial of degree `deg` to the given y_data.
x-values are assumed to be the integers in the interval [1, len(y_data)].
Args:
y_data (array_like): data to fit the model to.
deg (int, optional): degree of the polynomial to fit.
Returns:
array_like: array of the deg + 1 coefficients of the fitted polynomial
array_like: mean squared error of the model
in the interval [1, len(y_data)].
"""
x = np.arange(1, len(y_data) + 1)
coeffs = np.polynomial.polynomial.polyfit(
x, y_data, deg=deg)
y_pred = poly(x, coeffs)
return coeffs, np.mean((y_data - y_pred) ** 2)
def model_average_levenshtein(sampled_levenshtein, model_rows='all', deg=5):
"""Fit polynomial models to rows obtained from a
sample.random_average_levenshtein() run.
For a particular length n, the model is fitted only to the
data for lengths <= n. DO NOT use a model generated for a length
n to predict an expected distance between a string of length n
a longer one!
Args:
sampled_levenshtein (array_like): distance matrix as returned by
sample.random_average_levenshtein()
model_rows (str, optional): the rows in the distance matrix to which
models should be fitted. Only rows >= 25
are accepted. If not specified, models
will be generated for all rows
with index >= 25.
deg (int, optional): Degree of the polynomials that will be fitted.
Returns:
array_like: row indices for which models were computed
array_like: coefficients of the fitted polynomials
array_like: mean squared deviations between values predicted by the
models and the input data.
"""
n = sampled_levenshtein.shape[0]
assert n >= 25, """Modeling is not supported for n < 25.
The exact expected distances are known for these lengths: {}""".format(
codegolf_ref)
if model_rows == 'all':
model_rows = np.arange(25, n)
else:
model_rows = np.array(model_rows)
model_rows = model_rows[model_rows >= 25]
assert len(model_rows) > 0, """Modeling is not
supported for n < 25. The exact expected distances
are known for these lengths: {}""".format(
codegolf_ref)
n_rows = len(model_rows)
coeffs = np.empty(shape=(n_rows, deg + 1))
mses = np.empty(n_rows)
for i, row in enumerate(model_rows):
c, m = _fit_poly(sampled_levenshtein[row, :row+1])
coeffs[i] = c
mses[i] = m
return model_rows, coeffs, mses
|
988,975 | 92081a8a847592240ba5e03f4c28c8ec857a1ec5 | import PIL.Image
import PIL.ImageDraw
import face_recognition
image = face_recognition.load_image_file("testImage.jpg")
#Find all the faces in the Image
face_locations = face_recognition.face_locations(image)
number_of_faces = len(face_locations)
print("Found {} faces in this image".format(number_of_faces))
#Load the image into Python Imge Library object
pil_image = PIL.Image.fromarray(image)
for face_location in face_locations:
#Print the location of each faces
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {},".format(
top,left,bottom,right))
# Drae a box around the face
draw = PIL.ImageDraw.Draw(pil_image)
draw.rectangle([left,top,right,bottom],outline="red")
print('Done Looping')
pil_image.show()
|
988,976 | 993c2535e0ebe74a095232a5521c79159c38f273 | print("counting freq. of element in list")
print(" Different Ways:")
print("Taking list as input:")
s = list(map(int, input().split(" ")))
print("Using Dictionary")
# using traditional way
freq = {}
for i in s: #loop
if i in freq: #checking element in dict
freq[i]+=1 # if yes count +=1
else:
freq[i]=1
for k,v in freq.items():
print(k,v)
# using short way
counter = {} #creating dict
for i in s:
counter[i] = counter.get(0,1) +1
print(counter)
print("Using Counter")
f=[]
for v in sorted(counter.items()): #using counter
for i in range(v):
f.append(k)
print(f)
|
988,977 | d8ce4ae8a1c6b2b98253d24f081c50dd02a0ada7 | # https://docs.python.org/3/library/urllib.parse.html
# <scheme>://<netloc>/<path>;<parameters>?<query>#<fragment>
from urllib.parse import urlparse
a = urlparse("http://goodinfo.tw/StockInfo/StockDetail.asp?STOCK_ID=3008")
print(a)
print(a.netloc)
print(a.path)
print(a.query)
|
988,978 | 13e336e70fdad9766eeca485cef231cc1bdd7522 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#血型個性資料
dict1 = {'A':'內向穩重','B':'外向樂觀','O':'堅強自信','AB':'聰明自然'}
blood = input("輸入血型")
name = dict1.get(blood)
if name == None:
print("沒有這個血型喔")
else:
print(name)
|
988,979 | b881c7b6bcb955bd61ed7da3dc4ab9ccc424a53e | # coding:utf-8
# coding:utf-8
import visa
import pyvisa
import threading
import time
import csv
import os
from aw.core.Input import SUC
from aw.core.Input import getLbsCaseLogPath, getCurCaseName
class Battery(object):
current_value_list = []
def __init__(self, ip):
self.ip = "TCPIP::{}::INSTR".format(ip)
self.visa_data = visa.ResourceManager()
self.inst = self.visa_data.open_resource("TCPIP::10.100.5.112::INSTR")
self.status = False
def start_record_current(self):
csvpath = os.path.join(getLbsCaseLogPath, getCurCaseName + '.csv')
self.f = open(csvpath, 'w', 'a')
self.csvWrite = csv.writer(self.f)
self.status = True
self.setSampCount()
thd = threading.Thread(target=self.readCurrentFtch, args=(self.csvWrite))
thd.setDaemon(True)
thd.start()
def getCurrentValue(self):
i = 1
end = time.time() + 10
while time.time() < end:
self.inst.write("READ?")
VALUE = self.inst.read()
i += 1
print(VALUE)
def write_command(self, command=None):
self.status = False
if command:
ret = self.inst.write(command)
return SUC, ret
def setNplcDC(self, nplc = 0.2):
'''
@summary: 设置nplc
@param nplc:设置的nplc
'''
cmd = 'CONF:CURR:DC'
self.write_command(cmd)
cmd = 'CURR:DC:NPLC %s' % nplc
self.write_command(cmd)
def setDCAccuracy(self, acc=0.1):
'''
@summary: 设置精度
@param nplc:设置的精度值
'''
cmd = 'CONF:CURR:DC %s 0.001' % acc
self.write_command(cmd)
def uploadFileToPC(self, fileName):
'''
@summary: 下载文件到电脑
@param fileName: 要下载的文件名
'''
cmd = r'MMEM:UPL? "USB:\%s.csv"' % fileName
now = time.localtime(time.time())
ret = self.write_command(cmd)
VALUE = self.inst.read()
self.current_value_list.append(VALUE.split(',')[0:7])
f = open(fileName + '.txt', 'w')
f.write(VALUE)
f.close
print(len(VALUE.split(',')))
def saveFileToUSB(self, fileName):
'''
@summary: 保存文件到USB
@param fileName: 要下载的文件名
'''
import datetime
now = time.localtime(time.time())
cmd = r'MMEM:STOR:DATA RDG_STORE,"USB:\%s.csv"' % fileName
ret = self.write_command(cmd)
print(ret)
def setSampCount(self, COUNT=5000):
'''
@summary: 设置采集次数
@param count:要采集的次数
'''
ret = self.inst.write("SAMP:COUNT 5000")
ret = self.inst.write("TRIG:COUNT 1")
ret = self.inst.write("INIT")
def readCurrentFtch(self, csvWrite):
i = 0
while self.status:
ret = self.inst.write("R? 300")
VALUE = self.inst.read()
self.csvWrite.rows(VALUE.split(','))
i += len(VALUE.split(','))
print(len(VALUE.split(',')))
print(i)
def stopReadCurrent(self):
'''
@summary: 停止连接
'''
self.status = False
self.inst.close()
self.visa_data.close()
self.f.close()
if __name__=="__main__":
rm = visa.ResourceManager()
inst = rm.open_resource("TCPIP::192.168.1.103::INSTR")
inst.set_visa_attribute(pyvisa.constants.VI_ATTR_TMO_VALUE, 2000000000)
# inst.set_visa_attribute( pyvisa.constants.VI_ATTR_TMO_VALUE, pyvisa.constants.VI_TMO_INFINITE )
# print( inst.get_visa_attribute( pyvisa.constants.VI_ATTR_TMO_VALUE) )
for i in range(0,100000):
inst.write("*idn?")
str = inst.read()
if ( len(str) < 16 ):
raise Exception("error on %d" % (i) )
# print( inst.read() )
inst.close()
rm.close() |
988,980 | 9e07b96082cc0dfaf6e8918e50989161823dcd58 | # Generated by Django 3.1 on 2020-11-10 15:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('learningLevel', '0003_mdlcourse_mdlenrol_mdluserenrolments'),
]
operations = [
migrations.CreateModel(
name='Indexkeyword',
fields=[
('keyword', models.CharField(blank=True, max_length=50, null=True)),
('indexnum', models.CharField(blank=True, db_column='indexNum', max_length=50, null=True)),
('k_id', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'indexKeyword',
'managed': False,
},
),
]
|
988,981 | eda85e3a7cb170917e5f08d6f24d35a888874dc5 | def is_two_palindrome(word) :
'''The function tests if the given string is a “two palindrome” or not.
in this function i used:
slicing for string in order to get the two half and the reverse for each
one and compare them.
i used special condition for 1 length word so it match the expected result.
'''
return True if len(word) == 1 else word[:len(word)//2] == \
word[len(word)//2-1::-1] and word[len(word)//2+len(word)%2:] == \
word[:len(word)//2-(len(word)+1)%2:-1]
def uni_sort(firstList,secondList):
'''The function combines two unsorted lists of integers into one sorted
list.
at first i merge both list, without sorting. then i used enumerate so i can
scan the rest of the list to get rid of duplicates, and used 'sorted'
on the result'''
combindLists = firstList + secondList
return sorted([cell for index,cell in enumerate(combindLists) \
if cell not in combindLists[:index]])
def dot_product(firstVector,secondVector):
'''The function returns the dot product of two vectors.
in this function i used:
zip - make iterators from sequence. specificly it get the two values
from each 'i' index in both lists to both variables.
sum - summarize the whole values we get'''
return sum(firstVectorValue*secondVectorValue for firstVectorValue,
secondVectorValue in zip(firstVector,secondVector))
def list_intersection(firstList,secondList):
'''The function returns a new list sorted in ascending order. The
output list contain those integers that appear in both input lists.
in this function i used:
set on the values from firstList if they appear on the second'''
return sorted(list(set([cell for cell in firstList if cell in secondList])))
def list_difference(firstList,secondList):
'''The function returns a list sorted in ascending order. The output
list contain those integers that appear in just one of the input lists.
in this function i used:
set on the values from firstList if they not in the second and the
value from the secondList if they not in the first'''
return sorted(list(set( \
[cell for cell in firstList if cell not in secondList] + \
[cell for cell in secondList if cell not in firstList])))
import random , string
def random_string(numberOfChars):
'''The function generates a random string of a given length.
in this function i used:
''.join - add char that have been chosen from the random function
random.choice - return a random element from given sequence -
need to import random
string.ascii_lowercase - all the lowercase letters -
need to import string'''
return ''.join(random.choice(string.ascii_lowercase) \
for index in range(numberOfChars))
import re
def word_mapper(string):
'''The function returns a dictionary mapping from the words in the input
text to their number appearances.
in this function I used:
re.sub - replace the given char in another char. (need to import 're').
specificity, I used it to replace all none letters/numbers in white
space. ([\W] mean that we replace all none [a-zA-Z0-9], and i add also '_')
then I put it in list (using split()) and convert it to lowercase
(using lower()).
list.count - count the number of appearance in my list'''
wordsList = re.sub("[\W_]", " ", string).lower().split()
return {item : wordsList.count(item) for item in wordsList}
def gimme_a_value(func,start):
'''The function get a function and a starting point and returns a
generator - on first call the starting point and then returns the value
after pushing him to the function'''
while True:
yield start
start = func(start)
|
988,982 | 94576df7e69ca8390e2c2f0f20fc670a9d0fe2a1 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import pymysql
class MysqlUtil(object):
def __init__(self, host: str, port: int, user: str, password: str, db: str, charset: str):
if charset.strip() == '':
charset = 'utf8'
db = pymysql.connect(host=host, port=port, user=user, password=password, db=db, charset=charset)
self.db = db
self.cursor = db.cursor()
def executeQuery(self, query_sql):
if isinstance(query_sql, str):
try:
# 执行sql语句
self.cursor.execute(query_sql)
result = self.cursor.fetchall()
self.db.commit()
return result
# 提交到数据库执行
except Exception as e:
# Rollback in case there is any error
print('Query Exception: ', e)
else:
print('error ,plesase input string')
return None
def execute(self, execute_sql):
if isinstance(execute_sql, str):
try:
# 执行sql语句
self.cursor.execute(execute_sql)
self.db.commit()
# 提交到数据库执行
except Exception as e:
# Rollback in case there is any error
print('Exception: ', e)
self.db.rollback()
else:
print('error ,plesase input string')
def execute_batch(self, batch_sql):
if isinstance(batch_sql, list):
for each in batch_sql:
try:
# 执行sql语句
self.cursor.execute(each)
self.db.commit()
# 提交到数据库执行
except Exception as e:
# Rollback in case there is any error
print(batch_sql)
print('Exception: ', e)
self.db.rollback()
else:
print('error ,plesase input list')
def closes(self):
# 关闭数据库连接
self.db.close()
def get_cursor(self):
self.cursor
if __name__ == '__main__':
host = "47.101.146.57"
port = 2018
user = "root"
password = "Liuku!!!111"
db = "dm_report"
charset = 'utf8'
mysqlUtil = MysqlUtil(host, port, user, password, db, charset)
sql = "INSERT INTO `toutiao_video`" \
"(`source_site`,`source_site_tag`,`video_id`,`media_name`,`title`,`abstract`,`keywords`,`tag`," \
"`video_duration`,`source_url`,`article_type`,`large_mode`,`large_image_url`,`publish_time`," \
"`create_time`,`check_status`,`check_user_id`,`check_time`)" \
"VALUES('source_site','source_site_tag','video_id','media_name','title','abstract','keywords','tag'," \
"'video_duration','source_url','article_type','large_mode','large_image_url','publish_time'," \
"'2018-11-24 21:24:08','0','',NULL);";
mysqlUtil.execute(sql)
|
988,983 | 5c097752b933fca23f0bfc90d9460bcb940f75dc | # Generated by Django 3.1.7 on 2021-03-18 03:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_order'),
]
operations = [
migrations.AlterField(
model_name='cart',
name='count',
field=models.IntegerField(default=1),
),
]
|
988,984 | 3f92c803d845a4a9c52f1c450d551facd31c5947 | import typing as t
from types import TracebackType
import httpx
import requests
from . import managers as do_managers
class BaseClient:
API_DOMAIN = "api.digitalocean.com"
API_VERSION = "v2"
account: t.Optional[do_managers.AccountManager] = None
actions: t.Optional[do_managers.ActionsManager] = None
cdn_endpoints: t.Optional[do_managers.CDNEndpointsManager] = None
certificates: t.Optional[do_managers.CertificatesManager] = None
databases: t.Optional[do_managers.DatabasesManager] = None
domains: t.Optional[do_managers.DomainsManager] = None
droplets: t.Optional[do_managers.DropletsManager] = None
firewalls: t.Optional[do_managers.FirewallsManager] = None
floating_ips: t.Optional[do_managers.FloatingIPsManager] = None
images: t.Optional[do_managers.ImagesManager] = None
invoices: t.Optional[do_managers.InvoicesManager] = None
kubernetes: t.Optional[do_managers.KubernetesManager] = None
load_balancers: t.Optional[do_managers.LoadBalancersManager] = None
projects: t.Optional[do_managers.ProjectsManager] = None
regions: t.Optional[do_managers.RegionsManager] = None
registry: t.Optional[do_managers.RegistryManager] = None
snapshots: t.Optional[do_managers.SnapshotsManager] = None
ssh_keys: t.Optional[do_managers.SSHKeysManager] = None
tags: t.Optional[do_managers.TagsManager] = None
volumes: t.Optional[do_managers.VolumesManager] = None
vpcs: t.Optional[do_managers.VPCsManager] = None
def __init__(self, token: str = None):
if token is None:
raise NotImplementedError("Need you api token.")
self._token = token
self._ratelimit_limit: t.Optional[int] = None
self._ratelimit_remaining: t.Optional[int] = None
self._ratelimit_reset: t.Optional[int] = None
self._load_managers()
self.headers = {
"Authorization": "Bearer {token}".format(token=self._token),
"Content-Type": "application/json",
}
def _load_managers(self) -> None:
for manager in do_managers.__all__:
klass = getattr(do_managers, manager)
if issubclass(klass, do_managers.base.BaseManager):
obj = klass(client=self)
setattr(self, klass.endpoint, obj)
def _process_response(
self, response: t.Union[httpx._models.Response, requests.models.Response]
) -> None:
if "Ratelimit-Limit" in response.headers:
self._ratelimit_limit = int(response.headers.get("Ratelimit-Limit"))
if "Ratelimit-Remaining" in response.headers:
self._ratelimit_remaining = int(response.headers.get("Ratelimit-Remaining"))
if "Ratelimit-Reset" in response.headers:
self._ratelimit_reset = int(response.headers.get("Ratelimit-Reset"))
class Client(BaseClient):
def _load_managers(self) -> None:
for manager in do_managers.__sync_managers__:
klass = getattr(do_managers, manager)
if issubclass(klass, do_managers.base.BaseManager):
obj = klass(client=self)
setattr(self, klass.endpoint, obj)
def request_raw(
self,
endpoint: str = "account",
method: str = "get",
params: dict = {},
json: dict = None,
data: str = None,
) -> requests.models.Response:
assert method in [
"get",
"post",
"put",
"delete",
"head",
], "Invalid method {method}".format(method=method)
url = "https://{domain}/{version}/{endpoint}".format(
domain=self.API_DOMAIN,
version=self.API_VERSION,
endpoint=endpoint,
)
response = requests.request(
method=method,
url=url,
headers=self.headers,
params=params,
json=json,
data=data,
)
# raise exceptions in case of errors
if not response.ok:
print(response.content)
response.raise_for_status()
# save data to client from response
self._process_response(response)
return response
def request(
self,
endpoint: str = "account",
method: str = "get",
params: dict = {},
json: dict = None,
data: str = None,
) -> t.Dict[str, t.Any]:
response = self.request_raw(endpoint, method, params, json, data)
if response.status_code in [
requests.codes["no_content"],
requests.codes["accepted"],
]:
return {}
return response.json()
def fetch_all(
self,
endpoint: str,
key: str,
params: dict = {},
) -> t.List[t.Dict[str, t.Any]]:
def get_next_page(result: t.Dict[str, t.Any] = None) -> t.Optional[str]:
if (
result is None
or "links" not in result
or "pages" not in result["links"]
or "next" not in result["links"]["pages"]
):
return None
return result["links"]["pages"]["next"]
params["per_page"] = 200
response = self.request(endpoint=endpoint, params=params)
# in case of strange result like " "firewalls": null "
if response[key] is None:
result = []
elif isinstance(response[key], list):
result = response[key]
else:
result = list(response[key])
while True:
next_url = get_next_page(response)
if next_url is None:
break
res = requests.get(next_url, headers=self.headers)
if not res.ok:
res.raise_for_status()
response = res.json()
result += response[key]
return result
class AsyncClient(BaseClient):
def __init__(self, token: str = None):
super().__init__(token)
self._rclient = httpx.AsyncClient()
def _load_managers(self) -> None:
for manager in do_managers.__async_managers__:
klass = getattr(do_managers, manager)
if issubclass(klass, do_managers.base.AsyncBaseManager):
obj = klass(client=self)
setattr(self, klass.endpoint, obj)
async def request_raw(
self,
endpoint: str = "account",
method: str = "get",
params: dict = {},
json: dict = None,
data: str = None,
) -> httpx.Response:
assert method in [
"get",
"post",
"put",
"delete",
"head",
], "Invalid method {method}".format(method=method)
url = "https://{domain}/{version}/{endpoint}".format(
domain=self.API_DOMAIN,
version=self.API_VERSION,
endpoint=endpoint,
)
response = await self._rclient.request(
method=method,
url=url,
headers=self.headers,
params=params,
json=json,
data=data,
)
# raise exceptions in case of errors
response.raise_for_status()
# save data to client from response
self._process_response(response)
return response
async def request(
self,
endpoint: str = "account",
method: str = "get",
params: dict = {},
json: dict = None,
data: str = None,
) -> t.Dict[str, t.Any]:
response = await self.request_raw(endpoint, method, params, json, data)
if response.status_code in [
requests.codes["no_content"],
requests.codes["accepted"],
]:
return {}
return response.json()
async def fetch_all(
self,
endpoint: str,
key: str,
params: dict = {},
) -> t.List[t.Dict[str, t.Any]]:
def get_next_page(result: t.Dict[str, t.Any] = None) -> t.Optional[str]:
if (
result is None
or "links" not in result
or "pages" not in result["links"]
or "next" not in result["links"]["pages"]
):
return None
return result["links"]["pages"]["next"]
params["per_page"] = 200
response = await self.request(endpoint=endpoint, params=params)
# in case of strange result like " "firewalls": null "
if response[key] is None:
result = []
elif isinstance(response[key], list):
result = response[key]
else:
result = list(response[key])
while True:
next_url = get_next_page(response)
if next_url is None:
break
res = requests.get(next_url, headers=self.headers)
res.raise_for_status()
response = res.json()
result += response[key]
return result
async def __aenter__(self) -> "AsyncClient":
return self
async def __aexit__(
self,
exc_type: t.Type[BaseException] = None,
exc_value: BaseException = None,
traceback: TracebackType = None,
) -> None:
pass
|
988,985 | 8109b7777bbc8a6225560056f2f9a316d3285a8b | import sys
sys.path.append('src')
from functions import *
import numpy as np
from numpy.testing import assert_allclose
def test_half_disp():
dz = np.random.randn()
shape1 = 7
shape2 = 2**12
u1 = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
u1 *= 10
Dop = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
u_python = np.fft.ifft(np.exp(Dop*dz/2) * np.fft.fft(u1))
u_cython = half_disp_step(u1, Dop/2, dz, shape1, shape2)
assert_allclose(np.asarray(u_cython), u_python)
def test_cython_norm():
shape1 = 7
shape2 = 2**12
A = np.random.randint(0,100)* np.random.randn(shape1, shape2) + np.random.randint(0,100)* 1j * np.random.randn(shape1, shape2)
cython_norm = np.asarray(norm(A,shape1,shape2))
python_norm = np.linalg.norm(A,2, axis = -1).max()
assert_allclose(cython_norm, python_norm)
def test_fftishit():
shape1 = 7
shape2 = 2**12
A = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
cython_shift = np.asarray(cyfftshift(A))
python_shift = np.fft.fftshift(A, axes = -1)
assert_allclose(cython_shift, python_shift)
def test_fft():
shape1 = 7
shape2 = 2**12
A = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
cython_fft = cyfft(A)
python_fft = np.fft.fft(A)
assert_allclose(cython_fft, python_fft)
def test_ifft():
shape1 = 7
shape2 = 2**12
A = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
cython_fft = cyifft(A)
python_fft = np.fft.ifft(A)
assert_allclose(cython_fft, python_fft)
class Test_CK_operators:
shape1 = 7
shape2 = 2**12
u1 = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
A1 = np.random.randn(shape1, shape2) + 1j * np.random.randn(shape1, shape2)
A2 = np.asarray(A2_temp(u1, A1, shape1, shape2))
A3 = np.asarray(A3_temp(u1, A1, A2, shape1,shape2))
A4 = np.asarray(A4_temp(u1, A1, A2, A3, shape1,shape2))
A5 = np.asarray(A5_temp(u1, A1, A2, A3, A4, shape1,shape2))
A6 = np.asarray(A6_temp(u1, A1, A2, A3, A4, A5, shape1,shape2))
A = np.asarray(A_temp(u1, A1, A3, A4, A6, shape1,shape2))
Afourth = np.asarray(Afourth_temp(u1, A1, A3, A4, A5, A6, A, shape1,shape2))
def test_A2(self):
A2_python = self.u1 + (1./5)*self.A1
assert_allclose(self.A2, A2_python)
def test_A3(self):
A3_python = self.u1 + (3./40)*self.A1 + (9./40)*self.A2
assert_allclose(self.A3, A3_python)
def test_A4(self):
A4_python = self.u1 + (3./10)*self.A1 - (9./10)*self.A2 + (6./5)*self.A3
assert_allclose(self.A4, A4_python)
def test_A5(self):
A5_python = self.u1 - (11./54)*self.A1 + (5./2)*self.A2 - (70./27)*self.A3 + (35./27)*self.A4
assert_allclose(self.A5, A5_python)
def test_A6(self):
A6_python = self.u1 + (1631./55296)*self.A1 + (175./512)*self.A2 + (575./13824)*self.A3 +\
(44275./110592)*self.A4 + (253./4096)*self.A5
assert_allclose(self.A6, A6_python)
def test_A(self):
A_python = self.u1 + (37./378)*self.A1 + (250./621)*self.A3 + (125./594) * \
self.A4 + (512./1771)*self.A6
assert_allclose(self.A, A_python)
def test_Afourth(self):
Afourth_python = self.u1 + (2825./27648)*self.A1 + (18575./48384)*self.A3 + (13525./55296) * \
self.A4 + (277./14336)*self.A5 + (1./4)*self.A6
Afourth_python = self.A - Afourth_python
assert_allclose(self.Afourth, Afourth_python)
def pulse_prop(P_p, betas, ss, lamda_c, lamp, lams, N, z, type='CW'):
u, U, int_fwm, sim_wind, Dop, non_integrand = \
wave_setup(P_p, betas, ss, lamda_c, lamp, lams, N, z, type='CW')
factors_xpm, factors_fwm,gama,tsh, w_tiled = \
non_integrand.factors_xpm, non_integrand.factors_fwm,\
non_integrand.gama, non_integrand.tsh, non_integrand.w_tiled
dz,dzstep,maxerr = int_fwm.dz,int_fwm.dzstep,int_fwm.maxerr
Dop = np.ascontiguousarray(Dop)
factors_xpm = np.ascontiguousarray(factors_xpm)
factors_fwm = np.ascontiguousarray(factors_fwm)
gama = np.ascontiguousarray(gama)
tsh = np.ascontiguousarray(tsh)
w_tiled = np.ascontiguousarray(w_tiled)
u_or, U_or = np.copy(u), np.copy(U)
U, dz = pulse_propagation(u,dz,dzstep,maxerr, Dop,factors_xpm, factors_fwm, gama,tsh,w_tiled)
u = np.fft.ifft(np.fft.ifftshift(U, axes = -1))
return u_or, U_or, u, U
def wave_setup(P_p, betas, ss, lamda_c, lamp, lams, N, z, type='CW'):
n2 = 2.5e-20
alphadB = 0
maxerr = 1e-13
dz_less = 1e10
gama = 10e-3
fr = 0.18
int_fwm = sim_parameters(n2, 1, alphadB)
int_fwm.general_options(maxerr, ss)
int_fwm.propagation_parameters(N, z, 2, dz_less)
lamda = lamp * 1e-9 # central wavelength of the grid[m]
M = Q_matrixes(int_fwm.nm, int_fwm.n2, lamda, gama)
fv, where, f_centrals = fv_creator(
lamda * 1e9, lams, lamda_c, int_fwm, betas, M, 5,0)
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
fv, where, f_centrals = fv_creator(
lamp, lams, lamda_c, int_fwm, betas, M, P_p,0, Df_band=25)
p_pos, s_pos, i_pos = where
sim_wind = sim_window(fv, lamda, f_centrals, lamda_c, int_fwm)
"----------------------------------------------------------"
"---------------------Loss-in-fibres-----------------------"
slice_from_edge = (sim_wind.fv[-1] - sim_wind.fv[0]) / 100
loss = Loss(int_fwm, sim_wind, amax=0)
int_fwm.alpha = loss.atten_func_full(fv)
int_fwm.gama = np.array(
[-1j * n2 * 2 * M * pi * (1e12 * f_c) / (c) for f_c in f_centrals])
"----------------------------------------------------------"
"--------------------Dispersion----------------------------"
Dop = dispersion_operator(betas, lamda_c, int_fwm, sim_wind)
"----------------------------------------------------------"
"---------------------Raman Factors------------------------"
ram = Raman_factors(fr)
ram.set_raman_band(sim_wind)
"----------------------------------------------------------"
"--------------------Noise---------------------------------"
noise_obj = Noise(int_fwm, sim_wind)
keys = ['loading_data/green_dot_fopo/pngs/' +
str(i) + str('.png') for i in range(7)]
D_pic = [plt.imread(i) for i in keys]
ex = Plotter_saver(True, False, sim_wind.fv, sim_wind.t)
non_integrand = Integrand(int_fwm.gama, sim_wind.tsh,
sim_wind.w_tiled, ss,ram, cython_tick=True,
timer=False)
noise_new = noise_obj.noise_func(int_fwm)
u = np.copy(noise_new)
if type == 'CW':
u[3, :] += (P_p)**0.5
# print(np.max(u))
u[2, :] += (0.000001)**0.5
U = fftshift(fft(u), axes=-1)
return u, U, int_fwm, sim_wind, Dop, non_integrand
class Test_energy_conserve():
lamda_c = 1051.85e-9
lamp = 1048
lams = 1245.98
betas = np.array([0, 0, 0, 6.756e-2,
-1.002e-4, 3.671e-7]) * 1e-3
N = 10
P_p = 10
z = 20
def test_energy_conserve_s0(self):
ss = 0
u_or, U_or, u, U =\
pulse_prop(self.P_p, self.betas, ss,
self.lamda_c, self.lamp, self.lams, self.N, self.z, type='CW')
E1 = np.sum(np.linalg.norm(u_or, 2, axis = -1)**2)
E2 = np.sum(np.linalg.norm(u, 2, axis = -1)**2)
assert_allclose(E1, E2)
def test_energy_conserve_s1(self):
ss = 1
u_or, U_or, u, U =\
pulse_prop(self.P_p, self.betas, ss,
self.lamda_c, self.lamp, self.lams, self.N, self.z, type='CW')
E1 = np.sum(np.linalg.norm(u_or, 2, axis = -1)**2)
E2 = np.sum(np.linalg.norm(u, 2, axis = -1)**2)
assert_allclose(E1, E2)
class Test_cython():
lamda_c = 1051.85e-9
lamp = 1048
lams = 1245.98
betas = np.array([0, 0, 0, 6.756e-2,
-1.002e-4, 3.671e-7]) * 1e-3
N = 10
P_p = 10
z = 20
dz = 0.01
def test_s1(self):
ss = 1
u, U, int_fwm, sim_wind, Dop, non_integrand = \
wave_setup(self.P_p, self.betas, ss, self.lamda_c, self.lamp,
self.lams, self.N, self.z, type='CW')
N1 = non_integrand.cython_s1(u, self.dz)
N2 = non_integrand.python_s1(u, self.dz)
assert_allclose(N1, N2)
def test_s0(self):
ss = 0
u, U, int_fwm, sim_wind, Dop, non_integrand = \
wave_setup(self.P_p, self.betas, ss, self.lamda_c, self.lamp,
self.lams, self.N, self.z, type='CW')
N1 = non_integrand.cython_s0(u, self.dz)
N2 = non_integrand.python_s0(u, self.dz)
assert_allclose(N1, N2)
|
988,986 | 06a1c5e72db2cfb13d6b6c3df0d46c849b74a845 | # @Author : lightXu
# @File : sobel_filter.py
# @Time : 2019/7/8 0008 下午 16:26
import numpy as np
from cv2 import imread, cvtColor, COLOR_BGR2GRAY, imshow, waitKey
from digital_image_processing.filters.convolve import img_convolve
def sobel_filter(image):
kernel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
kernel_y = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
dst_x = img_convolve(image, kernel_x)
dst_y = img_convolve(image, kernel_y)
dst = np.sqrt((np.square(dst_x)) + (np.square(dst_y))).astype(np.uint8)
degree = np.arctan2(dst_y, dst_x)
return dst, degree
if __name__ == '__main__':
# read original image
img = imread('../image_data/lena.jpg')
# turn image in gray scale value
gray = cvtColor(img, COLOR_BGR2GRAY)
sobel, d = sobel_filter(gray)
# show result images
imshow('sobel filter', sobel)
imshow('sobel degree', d)
waitKey(0)
|
988,987 | eac344f05f90989dc775814c78e82cf0aa77507f | import pygame
pygame.init()
COLOR_INACTIVE = (211,211,211)
COLOR_ACTIVE = (255,255,255)
FONT = pygame.font.SysFont(None, 15)
##Classe utilizada para permitir adicionar texto no meio dos blocos
class InputText:
def __init__(self, x, y, w, h, text='', only = False, qtdMax = 3):
##Define os parametros principais
self.rect = pygame.Rect(x, y, w, h)
self.color = COLOR_INACTIVE
self.text = text
self.txt_surface = FONT.render(text, True, (0,0,0))
self.active = False
## Only é uma lista contendo os caracteres permitidos de serem escritos. Em caso de False, significa que todos os caracteres são permitidos
self.only = only
##Define a quantidade maxima de caractertes
self.maxCar = qtdMax
##Método usado para verificar os eventos sobre a caixa de texto. O parametro passado é event do pygame
def handle_event(self, event):
result = False
## Verifica se foi um clique de mouse
if event.type == pygame.MOUSEBUTTONDOWN:
# Verifica se o clique ocorreu sobre a caixa
# E muda a cor de fundo da caixa de texto de acordo com tal
if self.rect.collidepoint(event.pos):
# Habilita a escrita na caixa de texto
self.active = True
self.color = COLOR_ACTIVE
result = True
##Caso contrário, desabilita
else:
self.active = False
self.color = COLOR_INACTIVE
## Verifica se alguma tecla foi apertada
if event.type == pygame.KEYDOWN:
## Verifica se a escrita esta habilitada
if self.active:
result = True
## Verifica se apertou o enter. Se sim, desabilita a escrita
if event.key == pygame.K_RETURN:
self.active = False
self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
## Verifica se apertou o backspace. Se sim, apaga o ultimo caracterer
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
## Se foi outra tecla, verifica se o caracter pertence aqueles permitidos, e se
## o tamanho maximo de caracteres já nao foi esgotado
else:
if self.only != False:
if event.unicode in self.only and len(self.text) < self.maxCar:
self.text += event.unicode
else:
if len(self.text) < self.maxCar:
self.text += event.unicode
# Renderiza novamente o texto
self.txt_surface = FONT.render(self.text, True, (0, 0, 0))
self.update()
return result
# Método para mudar o tamanho do retangulo que contem o texto, de acordo com a quantidade de caracteres
def update(self):
width = max(18, self.txt_surface.get_width()+5)
self.rect.w = width
## Método usado para mostrar o retangulo e o texto
def show(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
screen.blit(self.txt_surface, (self.rect.x+5, self.rect.y+5))
## Método para retornar o texto escrito até o momento
def getText(self):
return self.text
##Método para retornar setar a posição do texto
def setPos(self, pos):
self.rect.x = int(pos[0])
self.rect.y = int(pos[1])
|
988,988 | a11301ebf9c9040dae76311322696f7e6d01005b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2016 Taylor C. Richberger <taywee@gmx.com>
# This code is released under the license described in the LICENSE file
from __future__ import division, absolute_import, print_function, unicode_literals
from datetime import timedelta
import six
from ssllabs.object import Object
class Info(Object):
'''The info object, accessed through :meth:`ssllabs.client.Client.info`'''
def __init__(self, data):
self.__version = data.get('version')
self.__criteriaVersion = data.get('criteriaVersion')
self.__maxAssessments = data.get('maxAssessments')
self.__currentAssessments = data.get('currentAssessments')
self.__newAssessmentCoolOff = timedelta(milliseconds=data['newAssessmentCoolOff']) if 'newAssessmentCoolOff' in data else None
self.__messages = data.get('messages', list())
@property
def version(self):
'''SSL Labs software version as a string (e.g., "1.11.14")'''
return self.__version
@property
def criteriaVersion(self):
'''rating criteria version as a string (e.g., "2009f")'''
return self.__criteriaVersion
@property
def maxAssessments(self):
'''the maximum number of concurrent assessments the client is allowed
to initiate.'''
return self.__maxAssessments
@property
def currentAssessments(self):
'''the number of ongoing assessments submitted by this client.'''
return self.__currentAssessments
@property
def newAssessmentCoolOff(self):
'''the cool-off period after each new assessment, as a timedelta;
you're not allowed to submit a new assessment before the cool-off
expires, otherwise you'll get a 429.'''
return self.__newAssessmentCoolOff
@property
def messages(self):
'''a list of messages (strings). Messages can be public (sent to
everyone) and private (sent only to the invoking client). Private
messages are prefixed with "[Private]".'''
return self.__messages
|
988,989 | 91a9fec824707e8707f6e3226f08b7081740e1f1 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Time:
2021-01-27 11:53
Author:
huayang
Subject:
Bert 原生分词器,移除了兼容 python2 的内容
References:
https://github.com/google-research/bert/blob/master/tokenization.py
"""
import os
import doctest
from collections import OrderedDict
from huaytools.nlp.normalization import (
is_cjk,
is_whitespace,
is_control,
is_punctuation,
remove_accents,
convert_to_unicode
)
__all__ = [
'BertTokenizer',
'tokenizer'
]
def load_vocab(vocab_file, encoding='utf8'):
"""Loads a vocabulary file into a dictionary."""
vocab = OrderedDict()
index = 0
with open(vocab_file, encoding=encoding) as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def split_by_whitespace(text):
"""Runs basic whitespace cleaning and splitting on a piece of text.
Examples:
>>> _text = '我爱python,我爱编程;I love python, I like programming.'
>>> split_by_whitespace(_text)
['我爱python,我爱编程;I', 'love', 'python,', 'I', 'like', 'programming.']
"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def split_by_punctuation(text):
"""Splits punctuation on a piece of text.
Examples:
>>> _text = '我爱python,我爱编程;I love python, I like programming.'
>>> split_by_punctuation(_text)
['我爱python', ',', '我爱编程', ';', 'I love python', ',', ' I like programming', '.']
"""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
class WordPieceTokenizer(object):
"""Runs WordPiece Tokenizer."""
def __init__(self, vocab, unk_token='[UNK]', max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
Examples:
>>> _vocab = load_vocab(_default_vocab_path)
>>> _tokenizer = WordPieceTokenizer(_vocab)
>>> _tokenizer.tokenize('unaffable')
['u', '##na', '##ff', '##able']
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
# text = convert_to_unicode(text)
output_tokens = []
for token in split_by_whitespace(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BasicTokenizer(object):
""""""
def __init__(self, do_lower_case=True):
""""""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self.clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._add_space_around_cjk_chars(text)
orig_tokens = split_by_whitespace(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = remove_accents(token)
split_tokens.extend(split_by_punctuation(token))
output_tokens = split_by_whitespace(" ".join(split_tokens))
return output_tokens
@staticmethod
def clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or is_control(char):
continue
if is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
@staticmethod
def _add_space_around_cjk_chars(text):
"""
Examples:
>>> _text = '我爱python,我爱编程;I love python, I like programming.'
>>> BasicTokenizer._add_space_around_cjk_chars(_text)
' 我 爱 python, 我 爱 编 程 ;I love python, I like programming.'
"""
output = []
for char in text:
cp = ord(char)
if is_cjk(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
class BertTokenizer(object):
"""@NLP Utils
Bert 分词器
Examples:
>>> text = '我爱python,我爱编程;I love python, I like programming. Some unkword'
# WordPiece 切分
>>> tokens = tokenizer.tokenize(text)
>>> assert [tokens[2], tokens[-2], tokens[-7]] == ['python', '##nk', 'program']
# 模型输入
>>> token_ids, token_type_ids = tokenizer.encode(text, return_token_type_ids=True)
>>> assert token_ids[:6] == [101, 2769, 4263, 9030, 8024, 2769]
>>> assert token_type_ids == [0] * len(token_ids)
# 句对模式
>>> txt1 = '我爱python'
>>> txt2 = '我爱编程'
>>> token_ids, masks = tokenizer.encode(txt1, txt2, return_masks=True)
>>> assert token_ids == [101, 2769, 4263, 9030, 102, 2769, 4263, 5356, 4923, 102]
>>> assert masks == [1] * 10
>>> # batch 模式
>>> ss = ['我爱python', '深度学习', '机器学习']
"""
token2id_map: dict # {token: id}
id2token_map: dict # {id: token}
def __init__(self, vocab_file,
do_lower_case=True,
token_cls='[CLS]',
token_sep='[SEP]',
token_unk='[UNK]',
token_mask='[MASK]',
token_pad='[PAD]',
verbose=0):
self.token2id_map = load_vocab(vocab_file)
self.id2token_map = {v: k for k, v in self.token2id_map.items()}
if verbose > 0:
print(f'Vocab size={len(self.token2id_map)}')
# self.do_lower_case = do_lower_case
self.basic_tokenizer = BasicTokenizer(do_lower_case)
self.word_piece_tokenizer = WordPieceTokenizer(vocab=self.token2id_map)
# self.basic_tokenize = lambda text: tokenize(text, do_lower_case)
# self.word_piece_tokenize = WordPieceTokenizer(vocab=self.token2id_map).tokenize
self.token_cls = token_cls
self.token_sep = token_sep
self.token_unk = token_unk
self.token_mask = token_mask
self.token_pad = token_pad
self._padding_token_id = self.token2id_map[token_pad]
def basic_tokenize(self, text):
""""""
return self.basic_tokenizer.tokenize(text)
def word_piece_tokenize(self, text):
return self.word_piece_tokenizer.tokenize(text)
def encode(self, txt1, txt2=None, max_len=None,
return_token_type_ids=False,
return_masks=False):
tokens_txt1 = self.tokenize(txt1)
tokens_txt2 = self.tokenize(txt2) if txt2 is not None else None
self._truncate(tokens_txt1, tokens_txt2, max_len)
tokens, len_txt1, len_txt2 = self._concat(tokens_txt1, tokens_txt2)
# 是否计算 token_type_ids 和 masks,时间相差无几,故统一都计算,根据参数确定返回值
token_ids = self.convert_tokens_to_ids(tokens)
token_type_ids = [0] * len_txt1 + [1] * len_txt2
masks = [1] * (len_txt1 + len_txt2)
if max_len is not None:
padding_len = max_len - len_txt1 - len_txt2
token_ids += [self._padding_token_id] * padding_len
token_type_ids += [0] * padding_len
masks += [0] * padding_len
inputs = [token_ids]
if return_token_type_ids:
inputs.append(token_type_ids)
if return_masks:
inputs.append(masks)
return inputs if len(inputs) > 1 else inputs[0]
def batch_encode(self,
texts,
max_len=None,
convert_fn=None,
return_token_type_ids=False,
return_masks=False):
"""
Args:
texts:
max_len:
convert_fn: 常用的 `np.asarray`, `torch.as_tensor`, `tf.convert_to_tensor`
return_token_type_ids:
return_masks:
"""
assert len(texts) > 0
if max_len is None:
# 注意,这里是将句子当做 char 算出的最长长度,而不是 token(比如英文单词)
extra_len = 3 if len(texts[0]) > 1 else 2 # 特殊字符
max_len = min(512, max(len(txt) for txt in texts) + extra_len)
batch_token_ids = []
batch_token_type_ids = []
batch_masks = []
for seq in texts:
if isinstance(seq, str):
tid, sid, mask = self.encode(txt1=seq, max_len=max_len,
return_token_type_ids=True,
return_masks=True)
elif isinstance(seq, (tuple, list)):
txt1, txt2 = seq[0], seq[1]
tid, sid, mask = self.encode(txt1=txt1, txt2=txt2, max_len=max_len,
return_token_type_ids=True,
return_masks=True)
else:
raise ValueError('Assert seqs are list of txt or (txt1, txt2).')
batch_token_ids.append(tid)
batch_token_type_ids.append(sid)
batch_masks.append(mask)
if convert_fn is not None:
batch_token_ids = convert_fn(batch_token_ids)
batch_token_type_ids = convert_fn(batch_token_type_ids)
batch_masks = convert_fn(batch_masks)
inputs = [batch_token_ids]
if return_token_type_ids:
inputs.append(batch_token_type_ids)
if return_masks:
inputs.append(batch_masks)
return inputs if len(inputs) > 1 else inputs[0]
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenize(text):
for sub_token in self.word_piece_tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return self._convert_by_vocab(self.token2id_map, tokens)
def convert_ids_to_tokens(self, ids):
return self._convert_by_vocab(self.id2token_map, ids)
@staticmethod
def _convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
for item in items:
output.append(vocab[item])
return output
def _concat(self, tokens_1st, tokens_2nd=None):
packed_tokens_1st = [self.token_cls] + tokens_1st + [self.token_sep]
if tokens_2nd is not None:
packed_tokens_2nd = tokens_2nd + [self.token_sep]
return packed_tokens_1st + packed_tokens_2nd, len(packed_tokens_1st), len(packed_tokens_2nd)
else:
return packed_tokens_1st, len(packed_tokens_1st), 0
@staticmethod
def _truncate(tokens_1st, tokens_2nd, max_len):
""""""
if max_len is None:
return
if tokens_2nd is not None:
while True:
total_len = len(tokens_1st) + len(tokens_2nd)
if total_len <= max_len - 3: # 3 for [CLS] .. tokens_a .. [SEP] .. tokens_b [SEP]
break
if len(tokens_1st) > len(tokens_2nd):
tokens_1st.pop()
else:
tokens_2nd.pop()
else:
del tokens_1st[max_len - 2:] # 2 for [CLS] .. tokens .. [SEP]
# 不是单例
# def get_tokenizer(vocab_file=None, **kwargs):
# """
#
# Args:
# vocab_file:
#
# Returns:
#
# """
# if vocab_file is None:
# pwd = os.path.dirname(__file__)
# vocab_file = os.path.join(pwd, '../data/vocab/vocab_21128.txt')
#
# tokenizer = Tokenizer(vocab_file, **kwargs)
# return tokenizer
# 模块内的变量默认为单例模式
_default_vocab_path = os.path.join(os.path.dirname(__file__), '../data_file/vocab_cn.txt')
tokenizer = BertTokenizer(_default_vocab_path)
def _test():
""""""
doctest.testmod()
if __name__ == '__main__':
""""""
_test()
|
988,990 | 360ce4ff790eefdaa8230a0fc0b43a54979f0977 | def karatsuba(x, y):
if (len(str(x)) == 1 or len(str(y)) == 1):
return x * y
n = max(len(str(x)), len(str(y)))
power = int(n // 2)
x1 = int(x // 10 ** power)
x0 = int(x % 10 ** power)
y1 = int(y // 10 ** power)
y0 = int(y % 10 ** power)
z0 = karatsuba(x0, y1)
z2 = karatsuba(x1, y1)
z = karatsuba((x0+x1), (y0 + y1)) - z0-z2
res = z2 * 10 ** n + z * 10 ** power + z0
return res
print(karatsuba(int(input()), int(input())))
|
988,991 | 0c5f0d8af348b1b3dea57eead25a9b9c69fd0d95 | score1 = int(input('필기성적을 입력하세요 : '))
score2 = int(input('실기성적을 입력하세요 : '))
if score1 >= 80 and score2 >= 80 :
print('합격!')
else :
print('불합격!') |
988,992 | 7a9b94a9673237d1869259d5d604c32a4816902b | #*****************************************************************************#
#**
#** WASP Worker Launcher
#**
#** Brian L Thomas, 2011
#**
#** Tools by the Center for Advanced Studies in Adaptive Systems at
#** the School of Electrical Engineering and Computer Science at
#** Washington State University
#**
#** Copyright Washington State University, 2017
#** Copyright Brian L. Thomas, 2017
#**
#** All rights reserved
#** Modification, distribution, and sale of this work is prohibited without
#** permission from Washington State University
#**
#** If this code is used for public research, any resulting publications need
#** to cite work done by Brian L. Thomas at the Center for Advanced Study of
#** Adaptive Systems (CASAS) at Washington State University.
#**
#** Contact: Brian L. Thomas (bthomas1@wsu.edu)
#** Contact: Diane J. Cook (cook@eecs.wsu.edu)
#*****************************************************************************#
import optparse
import os
import shutil
import subprocess
import sys
import time
tmp_dir = "work"
if __name__ == "__main__":
print "WASP Launching Workers"
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.add_option("-n",
"--number",
dest="number",
help="Number of workers to launch.")
parser.add_option("-s",
"--startnum",
dest="startnum",
help="Start number for workers.",
default="0")
parser.add_option("-b",
"--boss",
dest="boss",
help="JID of Boss to connect to.",
default="boss@node01")
(options, args) = parser.parse_args()
if options.number == None:
print "ERROR: Missing -n / --number"
parser.print_help()
sys.exit()
workers = int(float(options.number))
start = int(float(options.startnum))
mdir = "/mnt/pvfs2/bthomas"
if not os.path.isdir(mdir):
os.mkdir(mdir)
mydir = os.path.join(mdir, "%s" % tmp_dir)
if not os.path.isdir(mydir):
os.mkdir(mydir)
for x in range(workers):
num = str(x + start)
if (x + start) < 10:
num = "00%s" % str(x + start)
elif (x + start) < 100:
num = "0%s" % str(x + start)
wkrDir = os.path.join(mydir, "worker%s" % num)
if not os.path.isdir(wkrDir):
os.mkdir(wkrDir)
shutil.copy(os.path.join(os.getcwd(), "ar"), wkrDir)
fname = os.path.join(wkrDir, "run.pbs")
out = open(fname, 'w')
out.write("#PBS -l nodes=1:ppn=1,mem=150M,walltime=7:00:00\n")
out.write("#PBS -N wkr%s\n" % str(num))
out.write("cd ~/wasp\n")
out.write("sleep 20\n")
out.write("~/python/bin/python WASP_Worker.py ")
out.write("--jid=aeolus-worker%s@node01 " % str(num))
out.write("--password=WASPaeolus-worker%s " % str(num))
out.write("--dir=%s " % str(wkrDir))
out.write("--boss=%s " % str(options.boss))
out.write("--pypath=/home/bthomas/python/bin/python ")
out.write("\n")
out.close()
subprocess.call(str("qsub %s" % fname).split())
time.sleep(5)
|
988,993 | fe5df5871871183e47630da6f8e5b2a312663c71 | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import pandas as pd
app = Flask(__name__)
model = pickle.load(open('randomforest_model.pkl', 'rb'))
flights = pd.read_csv("data.csv", low_memory = False)
ip_feat = ['radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean', 'concave points_mean', 'symmetry_mean','fractal_dimension_mean',
'radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se', 'compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se', 'fractal_dimension_se',
'radius_worst', 'texture_worst', 'perimeter_worst', 'area_worst', 'smoothness_worst', 'compactness_worst', 'concavity_worst', 'concave points_worst', 'symmetry_worst','fractal_dimension_worst']
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
cancaer_ip_data = [float(x) for x in request.form.values()]
final_features = [np.array(cancaer_ip_data, dtype = int)]
prediction = model.predict(final_features)
print(" prediction: ", prediction)
if prediction <= 0.5:
x = 'Benign'
return render_template('index11.html', prediction_text='The patient is diagnosied with {}.'.format(x))
else:
x = 'Malignant'
return render_template('index11.html', prediction_text='The patient is diagnosied with {}.'.format(x))
if __name__ == "__main__":
app.run(debug=True) |
988,994 | 3d8a25c103584bdb8789fd1344eed9af1b49f0a3 | # -*- coding: utf-8 -*-
import re,urllib
from resources.lib.libraries import client
def resolve(url):
try:
data = str(url).replace('\r','').replace('\n','').replace('\t','')
doregex = re.compile('\$doregex\[(.+?)\]').findall(data)
for i in range(0, 5):
for x in doregex:
try:
if not '$doregex[%s]' % x in data: raise Exception()
regex = re.compile('<regex>(.+?)</regex>').findall(data)
regex = [r for r in regex if '<name>%s</name>' % x in r][0]
if '$doregex' in regex: raise Exception()
expres = re.compile('<expres>(.+?)</expres>').findall(regex)[0]
try: referer = re.compile('<referer>(.+?)</referer>').findall(regex)[0]
except: referer = ''
referer = urllib.unquote_plus(referer)
referer = client.replaceHTMLCodes(referer)
referer = referer.encode('utf-8')
page = re.compile('<page>(.+?)</page>').findall(regex)[0]
page = urllib.unquote_plus(page)
page = client.replaceHTMLCodes(page)
page = page.encode('utf-8')
result = client.request(page, referer=referer)
result = str(result).replace('\r','').replace('\n','').replace('\t','')
result = str(result).replace('\/','/')
r = re.compile(expres).findall(result)[0]
data = data.replace('$doregex[%s]' % x, r)
except:
pass
url = re.compile('(.+?)<regex>').findall(data)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if not '$doregex' in url: return url
except:
return
|
988,995 | d092c3dff8fe3b0b5deabf1513828249a44978ce | # Are there any duplicate ids in the records?
# No, they aren't!
# Because! id is used to distinguish 1 document |
988,996 | 4a2aeb05dd5e3c2c296a001253d830d9557923ab |
class Node:
def __init__(self, idx):
self.id = id(self)
self.idx = idx
class Edge:
pass
class Graph:
pass
class Task:
pass
class Table:
'''
store Nodes, Edges, and Tasks
'''
pass |
988,997 | 31da3ed694612fe8c7924525b96c922268d6755b | #ax + b = c
a = 4
b = 9
c = 23
for x = (c -b): a
print x
|
988,998 | fe22949c0cafdd66e9bed3876c942035cd64f685 | from __future__ import annotations
import unittest
from monty.tempfile import ScratchDir
from maml.base import KerasModel, SKLModel, is_keras_model, is_sklearn_model
class TestBaseModel(unittest.TestCase):
def test_sklmodel(self):
from sklearn.linear_model import LinearRegression
model = SKLModel(model=LinearRegression())
x = [[1, 2], [3, 4]]
y = [3, 7]
model.fit(x, y)
model.train(x, y)
self.assertAlmostEqual(model.predict_objs([[4, 5]])[0], 9)
with ScratchDir("."):
model.save("test_model.sav")
model.fit([[1, 2], [3, 4]], [6, 14])
self.assertAlmostEqual(model.predict_objs([[4, 5]])[0], 18)
model.load("test_model.sav")
self.assertAlmostEqual(model.predict_objs([[4, 5]])[0], 9)
model2 = SKLModel.from_file("test_model.sav")
self.assertAlmostEqual(model2.predict_objs([[4, 5]])[0], 9)
self.assertAlmostEqual(model2.evaluate([[4, 8], [8, 5]], [12, 13]), 1.0)
assert is_sklearn_model(model)
assert not is_keras_model(model)
def test_keras_model(self):
import numpy as np
import tensorflow as tf
model = KerasModel(model=tf.keras.Sequential([tf.keras.layers.Dense(1, input_dim=2)]))
model.model.compile("adam", "mse")
x = np.array([[1, 2], [3, 4]])
y = np.array([3, 7]).reshape((-1, 1))
model.fit(x, y)
model.train(x, y)
model.model.set_weights([np.array([[1.0], [1.0]]), np.array([0])])
self.assertAlmostEqual(model.predict_objs([[4, 5]])[0], 9)
with ScratchDir("."):
model.save("test_model.sav")
model.fit(np.array([[1, 2], [3, 4]]), np.array([6, 14])[:, None])
model.load("test_model.sav")
self.assertAlmostEqual(model.predict_objs([[4, 5]])[0], 9)
model2 = KerasModel.from_file("test_model.sav")
self.assertAlmostEqual(model2.predict_objs([[4, 5]])[0], 9)
self.assertAlmostEqual(model2.evaluate([[4, 8], [8, 5]], [12, 13]), 0.0)
assert not is_sklearn_model(model)
assert is_keras_model(model)
if __name__ == "__main__":
unittest.main()
|
988,999 | e8eb77ff2cd14426d44ed4fbe0c4c23b342af984 | """ DigitalICS: mobile data collection tool to complete surveys with integrated multimedia
Copyright (C) 2009. Yael Schwartzman
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
Contact information: Yael Schwartzman - yaelsf@gmail.com
"""
class Constants:
""" Defines possible types of Input and Output """
def __init__(self):
#return types
self.RETURN_INTEGER = 1
self.RETURN_STRING = 2
self.RETURN_DATE = 3
self.RETURN_FLOAT = 4
self.RETURN_BOOLEAN = 5
#form control types
self.INPUT = 1
self.SELECT1 = 2
self.SELECT = 3
self.TEXTAREA = 4
self.AUDIO = 5
self.PHOTO = 6
self.FORM = 7
self.FEEDBACK = 9
self.DB_INPUT = 10
#if page is on front_page view
self.FRONT = 8
def get_constant(self, label,value):
if label == u'type':
if value == u"number":
return self.RETURN_INTEGER
elif value == u"string":
return self.RETURN_STRING
elif value == u"date":
return self.RETURN_DATE
elif value == u"select1":
return self.RETURN_SELECT1
elif value == u"select":
return self.RETURN_SELECT_MULTI
elif value == u"boolean":
return self.RETURN_BOOLEAN
elif value == u"audio":
return self.RETURN_AUDIO
elif value == u"photo":
return self.RETURN_PHOTO
else:
raise Exception(" constant not found %s: %s " % (label,value))
elif label == u'input_type':
if value == u"input":
return self.INPUT
elif value == u"select1":
return self.SELECT1
elif value == u"select":
return self.SELECT
elif value == u"textarea":
return self.TEXTAREA
elif value == u"audio":
return self.AUDIO
elif value == u"photo":
return self.PHOTO
elif value == u"form":
return self.FORM
else:
raise Exception(" constant not found %s: %s " % (label,value))
def get_name(self,label, value):
if label == u'type':
if value == self.RETURN_INTEGER :
return "number"
elif value == self.RETURN_STRING :
return "string"
elif value == self.RETURN_DATE:
return "date"
elif value == self.RETURN_SELECT1 :
return "select1"
elif value == self.RETURN_SELECT_MULTI :
return "select"
elif value == self.RETURN_BOOLEAN :
return "boolean"
elif value == self.RETURN_AUDIO :
return "audio"
elif value == self.RETURN_PHOTO :
return "photo"
else:
raise Exception(" constant namne not found %s: %s " % (label,value))
elif label == u'input_type':
if value == self.INPUT :
return "input"
elif value == self.SELECT1 :
return "select1"
elif value == self.SELECT :
return "select"
elif value == self.TEXTAREA :
return "textarea"
elif value == self.AUDIO :
return "audio"
elif value == self.PHOTO :
return "photo"
elif value == self.FORM:
return "form"
else:
raise Exception(" constant name not found %s: %s " % (label,value))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.