id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6521189 | <reponame>him4318/Transformer-ocr
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 16:15:54 2020
@author: himanshu.chaudhary
"""
from torch import nn
import torch
from torch.autograd import Variable
import numpy as np
import time
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size, padding_idx=0, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.criterion(x, Variable(true_dist, requires_grad=False))
def train(model, criterion, optimizer, scheduler, dataloader, vocab_length, device):
model.train()
total_loss = 0
for batch, (imgs, labels_y,) in enumerate(dataloader):
imgs = imgs.to(device)
labels_y = labels_y.to(device)
optimizer.zero_grad()
output = model(imgs.float(),labels_y.long()[:,:-1])
norm = (labels_y != 0).sum()
loss = criterion(output.log_softmax(-1).contiguous().view(-1, vocab_length), labels_y[:,1:].contiguous().view(-1).long()) / norm
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.2)
optimizer.step()
total_loss += (loss.item()*norm)
return total_loss / len(dataloader),output
def evaluate(model, criterion, dataloader, vocab_length, device):
model.eval()
epoch_loss = 0
with torch.no_grad():
for batch, (imgs, labels_y,) in enumerate(dataloader):
imgs = imgs.to(device)
labels_y = labels_y.to(device)
output = model(imgs.float(),labels_y.long()[:,:-1])
norm = (labels_y != 0).sum()
loss = criterion(output.log_softmax(-1).contiguous().view(-1, vocab_length), labels_y[:,1:].contiguous().view(-1).long()) / norm
epoch_loss += (loss.item()*norm)
return epoch_loss / len(dataloader)
def get_memory(model, imgs):
x = model.conv(model.get_feature(imgs))
bs,_,H, W = x.shape
pos = torch.cat([
model.col_embed[:W].unsqueeze(0).repeat(H, 1, 1),
model.row_embed[:H].unsqueeze(1).repeat(1, W, 1),
], dim=-1).flatten(0, 1).unsqueeze(1)
return model.transformer.encoder(pos + 0.1 * x.flatten(2).permute(2, 0, 1))
def single_image_inference(model, img, tokenizer, transform, device):
'''
Run inference on single image
'''
img = transform(img)
imgs = img.unsqueeze(0).float().to(device)
with torch.no_grad():
memory = get_memory(model,imgs)
out_indexes = [tokenizer.chars.index('SOS'), ]
for i in range(128):
mask = model.generate_square_subsequent_mask(i+1).to(device)
trg_tensor = torch.LongTensor(out_indexes).unsqueeze(1).to(device)
output = model.vocab(model.transformer.decoder(model.query_pos(model.decoder(trg_tensor)), memory,tgt_mask=mask))
out_token = output.argmax(2)[-1].item()
if out_token == tokenizer.chars.index('EOS'):
break
out_indexes.append(out_token)
pre = tokenizer.decode(out_indexes[1:])
return pre
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def run_epochs(model, criterion, optimizer, scheduler, train_loader, val_loader, epochs, tokenizer, target_path, device):
'''
run one epoch for a model
'''
best_valid_loss = np.inf
c = 0
for epoch in range(epochs):
print(f'Epoch: {epoch+1:02}','learning rate{}'.format(scheduler.get_last_lr()))
start_time = time.time()
train_loss,outputs = train(model, criterion, optimizer, scheduler, train_loader, tokenizer.vocab_size, device)
valid_loss = evaluate(model, criterion, val_loader, tokenizer.vocab_size, device)
epoch_mins, epoch_secs = epoch_time(start_time, time.time())
c+=1
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), target_path)
c=0
if c>4:
scheduler.step()
c=0
print(f'Time: {epoch_mins}m {epoch_secs}s')
print(f'Train Loss: {train_loss:.3f}')
print(f'Val Loss: {valid_loss:.3f}')
print(best_valid_loss) | StarcoderdataPython |
6464178 | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_person.ipynb (unless otherwise specified).
__all__ = ['Person']
# Cell
class Person:
def __init__(self, name:str = 'Tyler'):
self.name = name
def __str__(self):
"""String representation of the person"""
return f"This person's name is: {self.name}"
def change_name(self, new_name):
""" Change the person's name"""
self.name = new_name | StarcoderdataPython |
8160859 | <filename>src/sprite/sprite_cat.py
from enum import Enum
from PyQt6 import QtGui
from sprite.sprite import Sprite
from sprite.sprite import SpritePos as Pos
import os
import random
class Cat(Sprite):
ANIMATION_FRAME_PATH = 'assets\\cat\\animation_frames\\'
SPRITE_SIZE = Pos(100, 100)
class States(Enum):
idle = 0
idle_to_sleep = 1
sleep = 2
sleep_to_idle = 3
walk_left = 4
walk_right = 5
NEXT_POSSIBLE_STATES = {
States.idle: [States.idle, States.idle_to_sleep, States.walk_left, States.walk_right],
States.idle_to_sleep: [States.sleep],
States.sleep: [States.sleep, States.sleep_to_idle],
States.sleep_to_idle: [States.idle, States.walk_left, States.walk_right],
States.walk_left: [States.idle, States.idle_to_sleep, States.walk_left, States.walk_right],
States.walk_right: [States.idle, States.idle_to_sleep, States.walk_left, States.walk_right],
}
STATE_FRAME_DELAYS = {
States.idle: 400,
States.idle_to_sleep: 100,
States.sleep: 1000,
States.sleep_to_idle: 100,
States.walk_left: 100,
States.walk_right: 100,
}
STATE_DISPLACEMENTS = {
States.idle: Pos(0, 0),
States.idle_to_sleep: Pos(0, 0),
States.sleep: Pos(0, 0),
States.sleep_to_idle: Pos(0, 0),
States.walk_left: Pos(-3, 0),
States.walk_right: Pos(3, 0),
}
# Animation frame cycle
cycle = 0
animationFramePaths: list
currentState: States
pos = Pos(0, 0)
@classmethod
def getCurrentFramePath(cls):
return cls.animationFramePaths[cls.cycle]
@classmethod
def getCurrentFrameDelay(cls):
return cls.STATE_FRAME_DELAYS[cls.currentState]
@classmethod
def getCurrentPos(cls):
return cls.pos
@classmethod
def setCurrentPos(cls, x, y):
cls.pos.x, cls.pos.y = x, y
@classmethod
def init(cls, xcoord: int, ycoord: int, KeepInFrame=True):
cls.pos.x = xcoord
cls.pos.y = ycoord
cls.SpriteProperties.init(cls, KeepInFrame)
cls.currentState = cls.States.idle
cls._update_frame_paths()
@classmethod
def update(cls):
cls._update_frame()
cls._update_pos()
cls.checkSpriteProperties()
@classmethod
def onLeftClick(cls, event: QtGui.QMouseEvent):
if(cls.currentState == cls.States.sleep or cls.currentState == cls.States.idle_to_sleep):
cls._change_state_to(cls.States.idle)
else:
cls._change_state_to(cls.States.sleep_to_idle)
@classmethod
def _update_pos(cls):
cls.pos = Pos(cls.pos.x + cls.STATE_DISPLACEMENTS[cls.currentState].x,
cls.pos.y + cls.STATE_DISPLACEMENTS[cls.currentState].y)
@classmethod
def _update_frame(cls):
if(cls.cycle < len(cls.animationFramePaths) - 1):
cls.cycle += 1
else:
cls.cycle = 0
cls._next_state()
@classmethod
def _next_state(cls):
cls._change_state_to(random.choice(
cls.NEXT_POSSIBLE_STATES[cls.currentState]))
@classmethod
def _change_state_to(cls, state: States):
cls.currentState = state
cls.cycle = 0
cls._update_frame_paths()
@classmethod
def _update_frame_paths(cls):
# print(os.system("dir"))
# print(cls.ANIMATION_FRAME_PATH +
# cls.currentState.name + "\\")
cls.animationFramePaths = [cls.ANIMATION_FRAME_PATH +
cls.currentState.name + "\\" + name for name in os.listdir(cls.ANIMATION_FRAME_PATH +
cls.currentState.name + "\\")]
## Class Properties
class SpriteProperties:
KeepInFrame: bool = True
sprite = None
max_x: int
max_y: int
@classmethod
def init(cls, sprite, KeepInFrame = True):
cls.KeepInFrame = KeepInFrame
cls.sprite = sprite
cls.max_x = QtGui.QGuiApplication.primaryScreen().availableGeometry().width() - sprite.SPRITE_SIZE.x
cls.max_y = QtGui.QGuiApplication.primaryScreen().availableGeometry().height() - sprite.SPRITE_SIZE.y
@classmethod
def checkSpriteProperties(cls) -> None:
if(cls.SpriteProperties.KeepInFrame):
cls.keepSpriteInFrame()
@classmethod
def keepSpriteInFrame(cls) -> None:
if(cls.pos.x <= 0):
cls._change_state_to(cls.States.walk_right)
elif (cls.pos.x >= cls.SpriteProperties.max_x):
cls._change_state_to(cls.States.walk_left) | StarcoderdataPython |
5019620 | #!/usr/bin/env python
# coding: utf-8
import os
import csv
import re
def get_reason_content_simple(verdict, date, file_num):
try:
start_index = verdict.index('理由') + 5
end_index = re.search("^\S、\S*(?:上訴|原告).{0,15}(?:主張|意旨)\S*(?:︰|:)", verdict, re.M).start() - 3
content = verdict[start_index: end_index].replace('\n', '')
content_num = len(content)
except:
content = '*'
content_num = '*'
# save csv file
filepath = 'analysis_' + date + '/reason_content_num_' + date + '.csv'
if not os.path.isfile(filepath):
with open(filepath, 'a', encoding = 'big5', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['案件編號', '事實字數'])
with open(filepath, 'a', encoding = 'big5', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([file_num,content_num])
return content, content_num
| StarcoderdataPython |
11316049 | import pathlib
import panflute
import pandocacro
def test_pandocacro() -> None:
"""Check the prepare and finalize properly add and remove the acronyms
"""
root = pathlib.Path(__file__).parent
text = "\n".join(
[(root / f).open().read() for f in ("metadata.yaml", "example.md")]
)
doc = panflute.convert_text(text, standalone=True)
assert isinstance(doc, panflute.Doc)
assert not hasattr(doc, "acronyms")
pandocacro.prepare(doc)
assert hasattr(doc, "acronyms")
for acro in doc.acronyms:
for key, type_ in (("count", int),
("list", bool),
("total", int),
):
assert key in doc.acronyms[acro]
assert isinstance(doc.acronyms[acro][key], type_)
pandocacro.finalize(doc)
assert not hasattr(doc, "acronyms")
| StarcoderdataPython |
264947 | def check_user(user, session_type=False):
frontend.page(
"dashboard",
expect={
"script_user": testing.expect.script_user(user)
})
if session_type is False:
if user.id is None:
# Anonymous user.
session_type = None
else:
session_type = "normal"
frontend.json(
"sessions/current",
params={ "fields": "user,type" },
expect={
"user": user.id,
"type": session_type
})
anonymous = testing.User.anonymous()
alice = instance.user("alice")
admin = instance.user("admin")
check_user(anonymous)
| StarcoderdataPython |
4841710 | <gh_stars>1-10
from collections import Counter
import logging
import numpy
import os
from multiprocessing import Process, Manager, Value, Semaphore
from random import random
from keras.models import Sequential, load_model
import pysam
from Bio import pairwise2
from Bio.Seq import Seq
from Bio import SeqIO
from advntr.coverage_bias import CoverageBiasDetector, CoverageCorrector
from advntr.hmm_utils import *
from advntr.pacbio_haplotyper import PacBioHaplotyper
from advntr.profiler import time_usage
from advntr.sam_utils import get_reference_genome_of_alignment_file, get_related_reads_and_read_count_in_samfile
from advntr import settings
from advntr.utils import is_low_quality_read
from pomegranate import HiddenMarkovModel as Model
from deep_recruitment import get_embedding_of_string, input_dim
class GenotypeResult:
def __init__(self, copy_numbers, recruited_reads_count, spanning_reads_count, flanking_reads_count, max_likelihood):
self.copy_numbers = copy_numbers
self.recruited_reads_count = recruited_reads_count
self.spanning_reads_count = spanning_reads_count
self.flanking_reads_count = flanking_reads_count
self.maximum_likelihood = max_likelihood
class SelectedRead:
def __init__(self, sequence, logp, vpath, mapq=None, reference_start=None):
self.sequence = sequence
self.logp = logp
self.vpath = vpath
self.mapq = mapq
self.is_mapped = reference_start is not None
def is_mapped(self):
return self.is_mapped
class VNTRFinder:
"""Find the VNTR structure of a reference VNTR in NGS data of the donor."""
def __init__(self, reference_vntr, is_haploid=False, reference_filename=None):
self.reference_vntr = reference_vntr
self.is_haploid = is_haploid
self.reference_filename = reference_filename
self.min_repeat_bp_to_add_read = 2
if len(self.reference_vntr.pattern) < 30:
self.min_repeat_bp_to_add_read = 2
self.min_repeat_bp_to_count_repeats = 2
self.minimum_left_flanking_size = {}
self.minimum_right_flanking_size = {69212: 19, 532789: 12, 400825: 10, 468671: 10}
self.vntr_start = self.reference_vntr.start_point
self.vntr_end = self.vntr_start + self.reference_vntr.get_length()
def get_copies_for_hmm(self, read_length):
return int(round(float(read_length) / len(self.reference_vntr.pattern) + 0.5))
@staticmethod
def get_alignment_file_read_mode(alignment_file):
read_mode = 'r' if alignment_file.endswith('sam') else 'rb'
if alignment_file.endswith('cram'):
read_mode = 'rc'
return read_mode
@time_usage
def build_vntr_matcher_hmm(self, copies, flanking_region_size=100):
patterns = self.reference_vntr.get_repeat_segments()
left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size]
vntr_matcher = get_read_matcher_model(left_flanking_region, right_flanking_region, patterns, copies)
return vntr_matcher
def get_vntr_matcher_hmm(self, read_length):
"""Try to load trained HMM for this VNTR
If there was no trained HMM, it will build one and store it for later usage
"""
logging.info('Using read length %s' % read_length)
copies = self.get_copies_for_hmm(read_length)
base_name = str(self.reference_vntr.id) + '_' + str(read_length) + '.json'
stored_hmm_file = settings.TRAINED_HMMS_DIR + base_name
if settings.USE_TRAINED_HMMS and os.path.isfile(stored_hmm_file):
model = Model()
model = model.from_json(stored_hmm_file)
return model
flanking_region_size = read_length
vntr_matcher = self.build_vntr_matcher_hmm(copies, flanking_region_size)
if settings.USE_TRAINED_HMMS:
json_str = vntr_matcher.to_json()
with open(stored_hmm_file, 'w') as outfile:
outfile.write(json_str)
return vntr_matcher
def get_keywords_for_filtering(self, short_reads=True, keyword_size=21):
vntr = ''.join(self.reference_vntr.get_repeat_segments())
if len(vntr) < keyword_size:
min_copies = int(keyword_size / len(vntr)) + 1
vntr = str(vntr) * min_copies
locus = self.reference_vntr.left_flanking_region[-15:] + vntr + self.reference_vntr.right_flanking_region[:15]
queries = []
step_size = 5 if len(self.reference_vntr.pattern) != 5 else 6
for i in range(0, len(locus) - keyword_size + 1, step_size):
queries.append(locus[i:i+keyword_size])
if not short_reads:
queries = [self.reference_vntr.left_flanking_region[-80:], self.reference_vntr.right_flanking_region[:80]]
queries = set(queries)
return queries
@staticmethod
def add_hmm_score_to_list(sema, hmm, read, result_scores):
logp, vpath = hmm.viterbi(str(read.seq))
rev_logp, rev_vpath = hmm.viterbi(str(Seq(str(read.seq)).reverse_complement()))
if logp < rev_logp:
logp = rev_logp
result_scores.append(logp)
sema.release()
def is_true_read(self, read):
read_start = read.reference_start
reference_name = read.reference_name
if not reference_name.startswith('chr'):
reference_name = 'chr' + reference_name
if reference_name == self.reference_vntr.chromosome and self.vntr_start - len(read.seq) < read_start < self.vntr_end:
return True
return False
def get_min_score_to_select_a_read(self, read_length):
if self.reference_vntr.scaled_score is None or self.reference_vntr.scaled_score == 0:
return None
return self.reference_vntr.scaled_score * read_length
@staticmethod
def recruit_read(logp, vpath, min_score_to_count_read, read_length):
if min_score_to_count_read is not None and logp > min_score_to_count_read:
return True
matches = get_number_of_matches_in_vpath(vpath)
if min_score_to_count_read is None and matches >= 0.9 * read_length and logp > -read_length:
return True
return False
def process_unmapped_read_with_dnn(self, read_segment, hmm, recruitment_score, vntr_bp_in_unmapped_reads, selected_reads, compute_reverse, dnn_model):
logging.info('process unmapped read with DNN')
if read_segment.count('N') <= 0:
sequence = read_segment.upper()
forward_dnn_read = False
reverse_dnn_read = False
logp = 0
vpath = []
rev_logp = 0
rev_vpath = []
embedding = get_embedding_of_string(sequence)
selected = dnn_model.predict(numpy.array([embedding]), batch_size=1)[0]
if selected[0] > selected[1]:
logging.info('%s and %s' % (selected[0], selected[1]))
forward_dnn_read = True
if compute_reverse:
reverse_sequence = str(Seq(sequence).reverse_complement())
embedding = get_embedding_of_string(reverse_sequence)
selected = dnn_model.predict(numpy.array([embedding]), batch_size=1)[0]
if selected[0] > selected[1]:
reverse_dnn_read = True
if forward_dnn_read or reverse_dnn_read:
logging.info('computing HMM viterbi')
if forward_dnn_read:
logp, vpath = hmm.viterbi(sequence)
if reverse_dnn_read:
rev_logp, rev_vpath = hmm.viterbi(reverse_sequence)
if logp < rev_logp:
logging.info('using reversed read')
sequence = reverse_sequence
logp = rev_logp
vpath = rev_vpath
logging.info('this is a VNTR read')
repeat_bps = get_number_of_repeat_bp_matches_in_vpath(vpath)
if self.recruit_read(logp, vpath, recruitment_score, len(sequence)):
if repeat_bps > self.min_repeat_bp_to_count_repeats:
vntr_bp_in_unmapped_reads.value += repeat_bps
if repeat_bps > self.min_repeat_bp_to_add_read:
selected_reads.append(SelectedRead(sequence, logp, vpath))
def process_unmapped_read(self, sema, read_segment, hmm, recruitment_score, vntr_bp_in_unmapped_reads,
selected_reads, compute_reverse=True):
if read_segment.count('N') <= 0:
sequence = read_segment.upper()
logp, vpath = hmm.viterbi(sequence)
if compute_reverse:
reverse_sequence = str(Seq(sequence).reverse_complement())
rev_logp, rev_vpath = hmm.viterbi(reverse_sequence)
if logp < rev_logp:
sequence = reverse_sequence
logp = rev_logp
vpath = rev_vpath
repeat_bps = get_number_of_repeat_bp_matches_in_vpath(vpath)
if self.recruit_read(logp, vpath, recruitment_score, len(sequence)):
if repeat_bps > self.min_repeat_bp_to_count_repeats:
vntr_bp_in_unmapped_reads.value += repeat_bps
if repeat_bps > self.min_repeat_bp_to_add_read:
selected_reads.append(SelectedRead(sequence, logp, vpath))
if sema is not None:
sema.release()
def identify_frameshift(self, location_coverage, observed_indel_transitions, expected_indels, error_rate=0.01):
if observed_indel_transitions >= location_coverage:
return True
from scipy.stats import binom
sequencing_error_prob = binom.pmf(observed_indel_transitions, location_coverage, error_rate)
frameshift_prob = binom.pmf(observed_indel_transitions, location_coverage, expected_indels)
prob = sequencing_error_prob / frameshift_prob
return prob < 0.01
def find_frameshift_from_selected_reads(self, selected_reads):
mutations = {}
repeating_bps_in_data = 0
repeats_lengths_distribution = []
for read in selected_reads:
visited_states = [state.name for idx, state in read.vpath[1:-1]]
repeats_lengths = get_repeating_pattern_lengths(visited_states)
repeats_lengths_distribution += repeats_lengths
current_repeat = None
repeating_bps_in_data += get_number_of_repeat_bp_matches_in_vpath(read.vpath)
for i in range(len(visited_states)):
if visited_states[i].endswith('fix') or visited_states[i].startswith('M'):
continue
if visited_states[i].startswith('unit_start'):
if current_repeat is None:
current_repeat = 0
else:
current_repeat += 1
if current_repeat is None or current_repeat >= len(repeats_lengths):
continue
if not visited_states[i].startswith('I') and not visited_states[i].startswith('D'):
continue
if repeats_lengths[current_repeat] == len(self.reference_vntr.pattern):
continue
state = visited_states[i].split('_')[0]
if state.startswith('I'):
state += get_emitted_basepair_from_visited_states(visited_states[i], visited_states, read.sequence)
if abs(repeats_lengths[current_repeat] - len(self.reference_vntr.pattern)) <= 2:
if state not in mutations.keys():
mutations[state] = 0
mutations[state] += 1
sorted_mutations = sorted(mutations.items(), key=lambda x: x[1])
logging.debug('sorted mutations: %s ' % sorted_mutations)
frameshift_candidate = sorted_mutations[-1] if len(sorted_mutations) else (None, 0)
logging.info(sorted(repeats_lengths_distribution))
logging.info('Frameshift Candidate and Occurrence %s: %s' % frameshift_candidate)
logging.info('Observed repeating base pairs in data: %s' % repeating_bps_in_data)
avg_bp_coverage = float(repeating_bps_in_data) / self.reference_vntr.get_length() / 2
logging.info('Average coverage for each base pair: %s' % avg_bp_coverage)
expected_indel_transitions = 1 / avg_bp_coverage
if self.identify_frameshift(avg_bp_coverage, frameshift_candidate[1], expected_indel_transitions):
logging.info('There is a frameshift at %s' % frameshift_candidate[0])
return frameshift_candidate[0]
return None
def read_flanks_repeats_with_confidence(self, vpath):
minimum_left_flanking = 5
minimum_right_flanking = 5
if self.reference_vntr.id in self.minimum_left_flanking_size:
minimum_left_flanking = self.minimum_left_flanking_size[self.reference_vntr.id]
if self.reference_vntr.id in self.minimum_right_flanking_size:
minimum_right_flanking = self.minimum_right_flanking_size[self.reference_vntr.id]
if get_left_flanking_region_size_in_vpath(vpath) > minimum_left_flanking:
if get_right_flanking_region_size_in_vpath(vpath) > minimum_right_flanking:
return True
return False
def check_if_flanking_regions_align_to_str(self, read_str, length_distribution, spanning_reads):
flanking_region_size = 100
left_flanking = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking = self.reference_vntr.right_flanking_region[:flanking_region_size]
left_alignments = pairwise2.align.localms(read_str, left_flanking, 1, -1, -1, -1)
if len(left_alignments) < 1:
return
min_left, max_left = 10e9, 0
for aln in left_alignments:
if aln[2] < len(left_flanking) * (1 - settings.MAX_ERROR_RATE):
continue
min_left = min(min_left, aln[3])
max_left = max(max_left, aln[3])
if max_left - min_left > 30:
with open('vntr_complex.txt', 'a') as out:
out.write('%s %s\n' % (self.reference_vntr.id, max_left - min_left))
left_align = left_alignments[0]
if left_align[2] < len(left_flanking) * (1 - settings.MAX_ERROR_RATE):
return
right_alignments = pairwise2.align.localms(read_str, right_flanking, 1, -1, -1, -1)
if len(right_alignments) < 1:
return
min_right, max_right = 10e9, 0
for aln in right_alignments:
if aln[2] < len(right_flanking) * (1 - settings.MAX_ERROR_RATE):
continue
min_right = min(min_right, aln[3])
max_right = max(max_right, aln[3])
if max_right - min_right > 30:
with open('vntr_complex.txt', 'a') as out:
out.write('%s %s\n' % (self.reference_vntr.id, max_right - min_right))
right_align = right_alignments[0]
if right_align[2] < len(right_flanking) * (1 - settings.MAX_ERROR_RATE):
return
if right_align[3] < left_align[3]:
return
spanning_reads.append(read_str[left_align[3]:right_align[3]+flanking_region_size])
length_distribution.append(right_align[3] - (left_align[3] + flanking_region_size))
def check_if_pacbio_read_spans_vntr(self, sema, read, length_distribution, spanning_reads):
self.check_if_flanking_regions_align_to_str(str(read.seq).upper(), length_distribution, spanning_reads)
reverse_complement_str = str(Seq(str(read.seq)).reverse_complement())
self.check_if_flanking_regions_align_to_str(reverse_complement_str.upper(), length_distribution, spanning_reads)
sema.release()
def check_if_pacbio_mapped_read_spans_vntr(self, sema, read, length_distribution, spanning_reads):
flanking_region_size = 100
region_start = self.reference_vntr.start_point - flanking_region_size
region_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
if read.get_reference_positions()[0] < region_start and read.get_reference_positions()[-1] > region_end:
read_region_start = None
read_region_end = None
for read_pos, ref_pos in enumerate(read.get_reference_positions()):
if ref_pos >= region_start and read_region_start is None:
read_region_start = read_pos
if ref_pos >= region_end and read_region_end is None:
read_region_end = read_pos
if read_region_start is not None and read_region_end is not None:
result = read.seq[read_region_start:read_region_end+flanking_region_size]
if read.is_reverse:
result = str(Seq(result).reverse_complement())
spanning_reads.append(result)
length_distribution.append(len(result) - flanking_region_size * 2)
sema.release()
@time_usage
def get_spanning_reads_of_unaligned_pacbio_reads(self, unmapped_filtered_reads):
sema = Semaphore(settings.CORES)
manager = Manager()
shared_length_distribution = manager.list()
shared_spanning_reads = manager.list()
process_list = []
for read in unmapped_filtered_reads:
sema.acquire()
p = Process(target=self.check_if_pacbio_read_spans_vntr, args=(sema, read, shared_length_distribution,
shared_spanning_reads))
process_list.append(p)
p.start()
for p in process_list:
p.join()
logging.info('length_distribution of unmapped spanning reads: %s' % list(shared_length_distribution))
return list(shared_spanning_reads), list(shared_length_distribution)
@time_usage
def get_spanning_reads_of_aligned_pacbio_reads(self, alignment_file):
sema = Semaphore(settings.CORES)
manager = Manager()
length_distribution = manager.list()
mapped_spanning_reads = manager.list()
vntr_start = self.reference_vntr.start_point
vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
region_start = vntr_start
region_end = vntr_end
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:]
process_list = []
for read in samfile.fetch(chromosome, region_start, region_end):
sema.acquire()
p = Process(target=self.check_if_pacbio_mapped_read_spans_vntr, args=(sema, read, length_distribution,
mapped_spanning_reads))
process_list.append(p)
p.start()
for p in process_list:
p.join()
logging.info('length_distribution of mapped spanning reads: %s' % list(length_distribution))
return list(mapped_spanning_reads)
def get_conditional_likelihood(self, ck, ci, cj, ru_counts, r, r_e):
if ck == ci == cj:
return 1-r
if cj == 0: # CHECK LATER
return 0.5 * (1-r)
if ck == ci:
return 0.5 * ((1-r) + r_e ** abs(ck-cj))
if ck == cj:
return 0.5 * ((1-r) + r_e ** abs(ck-ci))
if ck != ci and ck != cj:
return 0.5 * (r_e ** abs(ck-ci) + r_e ** abs(ck-cj))
def find_genotype_based_on_observed_repeats(self, observed_copy_numbers):
ru_counts = {}
for cn in observed_copy_numbers:
if cn not in ru_counts.keys():
ru_counts[cn] = 0
ru_counts[cn] += 1
if len(ru_counts.keys()) < 2:
priors = 0.5
ru_counts[0] = 1
else:
priors = 1.0 / (len(ru_counts.keys()) * (len(ru_counts.keys())-1) / 2)
import operator
ru_counts = sorted(ru_counts.items(), key=operator.itemgetter(1), reverse=True)
r = 0.03
r_e = r / (2 + r)
prs = {}
for ck, occ in ru_counts:
if ck == 0:
continue
for i in range(len(ru_counts)):
ci = ru_counts[i][0]
for j in range(len(ru_counts)):
if j < i:
continue
if self.is_haploid and i != j:
continue
cj = ru_counts[j][0]
if (ci, cj) not in prs.keys():
prs[(ci, cj)] = []
prs[(ci, cj)].append(self.get_conditional_likelihood(ck, ci, cj, ru_counts, r, r_e) ** occ)
posteriors = {}
import numpy
for key in prs.keys():
prs[key] = numpy.prod(numpy.array(prs[key]))
posteriors[key] = prs[key] * priors
sum_of_probs = sum(posteriors.values())
max_prob = 1e-20
result = None
for key, value in posteriors.items():
if value / sum_of_probs > max_prob:
max_prob = value / sum_of_probs
result = key
logging.info('Maximum probability for genotyping: %s' % max_prob)
return result, max_prob
def get_dominant_copy_numbers_from_spanning_reads(self, spanning_reads):
if len(spanning_reads) < 1:
logging.info('There is no spanning read')
return None
max_length = 0
for read in spanning_reads:
if len(read) - 100 > max_length:
max_length = len(read) - 100
max_copies = int(round(max_length / float(len(self.reference_vntr.pattern))))
# max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments()))
vntr_matcher = self.build_vntr_matcher_hmm(max_copies)
observed_copy_numbers = []
for haplotype in spanning_reads:
logp, vpath = vntr_matcher.viterbi(haplotype)
rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement()))
if logp < rev_logp:
vpath = rev_vpath
observed_copy_numbers.append(get_number_of_repeats_in_vpath(vpath))
logging.info('flanked repeats: %s' % observed_copy_numbers)
return self.find_genotype_based_on_observed_repeats(observed_copy_numbers)
@time_usage
def get_haplotype_copy_numbers_from_spanning_reads(self, spanning_reads):
if len(spanning_reads) < 1:
logging.info('There is no spanning read')
return None
max_length = 0
for read in spanning_reads:
if len(read) - 100 > max_length:
max_length = len(read) - 100
max_copies = int(round(max_length / float(len(self.reference_vntr.pattern))))
max_copies = min(max_copies, 2 * len(self.reference_vntr.get_repeat_segments()))
vntr_matcher = self.build_vntr_matcher_hmm(max_copies)
haplotyper = PacBioHaplotyper(spanning_reads)
haplotypes = haplotyper.get_error_corrected_haplotypes()
copy_numbers = []
for haplotype in haplotypes:
# print('haplotype: %s' % haplotype)
logp, vpath = vntr_matcher.viterbi(haplotype)
rev_logp, rev_vpath = vntr_matcher.viterbi(str(Seq(haplotype).reverse_complement()))
if logp < rev_logp:
vpath = rev_vpath
copy_numbers.append(get_number_of_repeats_in_vpath(vpath))
return copy_numbers
def find_ru_counts_with_naive_approach(self, length_dist, spanning_reads):
haplotyper = PacBioHaplotyper(spanning_reads)
haplotypes = haplotyper.get_error_corrected_haplotypes(1)
flanking_region_lengths = []
new_spanning_reads = []
if len(haplotypes) == 0:
return None
self.check_if_flanking_regions_align_to_str(haplotypes[0].upper(), flanking_region_lengths, new_spanning_reads)
reverse_complement_str = str(Seq(haplotypes[0]).reverse_complement())
self.check_if_flanking_regions_align_to_str(reverse_complement_str.upper(), flanking_region_lengths, new_spanning_reads)
if len(flanking_region_lengths) > 0:
return [round(flanking_region_lengths[0] / len(self.reference_vntr.pattern))] * 2
else:
return None
def find_ru_counts_from_average_flanking_region_distance(self, length_dist):
if len(length_dist):
ru_counts_list = [round(length / len(self.reference_vntr.pattern)) for length in length_dist]
ru_count_frequencies = Counter(ru_counts_list)
copy_numbers = [ru_count_frequencies[0][0]]
if len(ru_count_frequencies.keys()) > 1 and ru_count_frequencies[1][1] > ru_count_frequencies[0][1] / 5:
copy_numbers.append(ru_count_frequencies[1][0])
else:
copy_numbers = copy_numbers * 2
else:
copy_numbers = None
return copy_numbers
@time_usage
def find_repeat_count_from_pacbio_alignment_file(self, alignment_file, unmapped_filtered_reads):
logging.debug('finding repeat count from pacbio alignment file for %s' % self.reference_vntr.id)
unaligned_spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads)
mapped_spanning_reads = self.get_spanning_reads_of_aligned_pacbio_reads(alignment_file)
spanning_reads = mapped_spanning_reads + unaligned_spanning_reads
copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads)
return copy_numbers
@time_usage
def find_repeat_count_from_pacbio_reads(self, unmapped_filtered_reads, naive=False):
logging.debug('finding repeat count from pacbio reads file for %s' % self.reference_vntr.id)
spanning_reads, length_dist = self.get_spanning_reads_of_unaligned_pacbio_reads(unmapped_filtered_reads)
if naive:
copy_numbers = self.find_ru_counts_with_naive_approach(length_dist, spanning_reads)
else:
copy_numbers = self.get_dominant_copy_numbers_from_spanning_reads(spanning_reads)
return copy_numbers
@time_usage
def iteratively_update_model(self, alignment_file, unmapped_filtered_reads, selected_reads, hmm):
updated_selected_reads = selected_reads
fitness = sum([read.logp for read in selected_reads])
read_length = len(selected_reads[0].sequence)
reference_repeats = []
for reference_repeat in self.reference_vntr.get_repeat_segments():
sequence = str(reference_repeat).upper()
logp, vpath = hmm.viterbi(sequence)
reference_repeats.append(SelectedRead(sequence, logp, vpath))
logging.info('initial fitness: %s' % fitness)
flanking_region_size = read_length
left_flanking_region = self.reference_vntr.left_flanking_region[-flanking_region_size:]
right_flanking_region = self.reference_vntr.right_flanking_region[:flanking_region_size]
copies = self.get_copies_for_hmm(read_length)
max_steps = 1000
min_improvement = 1
for i in range(max_steps):
old_fitness = fitness
current_vpaths = [(read.sequence, read.vpath) for read in updated_selected_reads + reference_repeats]
hmm = get_read_matcher_model(left_flanking_region, right_flanking_region, None, copies, current_vpaths)
updated_selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads, False, hmm)
fitness = sum([read.logp for read in selected_reads])
if fitness - old_fitness < min_improvement:
break
logging.info('final fitness: %s' % fitness)
return updated_selected_reads
@time_usage
def select_illumina_reads(self, alignment_file, unmapped_filtered_reads, update=False, hmm=None):
recruitment_score = None
selected_reads = []
vntr_bp_in_unmapped_reads = Value('d', 0.0)
number_of_reads = 0
read_length = 150
for read_segment in unmapped_filtered_reads:
if number_of_reads == 0:
read_length = len(str(read_segment.seq))
number_of_reads += 1
if not hmm:
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
if not recruitment_score:
recruitment_score = self.get_min_score_to_select_a_read(read_length)
if len(read_segment.seq) < read_length:
continue
self.process_unmapped_read(None, str(read_segment.seq), hmm, recruitment_score, vntr_bp_in_unmapped_reads,
selected_reads)
logging.debug('vntr base pairs in unmapped reads: %s' % vntr_bp_in_unmapped_reads.value)
vntr_bp_in_mapped_reads = 0
vntr_start = self.reference_vntr.start_point
vntr_end = self.reference_vntr.start_point + self.reference_vntr.get_length()
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
chromosome = self.reference_vntr.chromosome if reference == 'HG19' else self.reference_vntr.chromosome[3:]
for read in samfile.fetch(chromosome, vntr_start, vntr_end):
if not recruitment_score:
read_length = len(read.seq)
recruitment_score = self.get_min_score_to_select_a_read(read_length)
if not hmm:
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
if read.is_unmapped:
continue
if len(read.seq) < int(read_length * 0.9):
logging.debug('Rejecting read for short length: %s' % read.seq)
continue
read_end = read.reference_end if read.reference_end else read.reference_start + len(read.seq)
if vntr_start - read_length < read.reference_start < vntr_end or vntr_start < read_end < vntr_end:
if read.seq.count('N') <= 0:
sequence = str(read.seq).upper()
logp, vpath = hmm.viterbi(sequence)
rev_logp, rev_vpath = hmm.viterbi(str(Seq(read.seq).reverse_complement()).upper())
if logp < rev_logp:
sequence = str(Seq(read.seq).reverse_complement()).upper()
logp = rev_logp
vpath = rev_vpath
length = len(sequence)
if is_low_quality_read(read) and not self.recruit_read(logp, vpath, recruitment_score, length):
logging.debug('Rejected Read: %s' % sequence)
continue
selected_reads.append(SelectedRead(sequence, logp, vpath, read.mapq, read.reference_start))
end = min(read_end, vntr_end)
start = max(read.reference_start, vntr_start)
vntr_bp_in_mapped_reads += end - start
logging.debug('vntr base pairs in mapped reads: %s' % vntr_bp_in_mapped_reads)
if update:
selected_reads = self.iteratively_update_model(alignment_file, unmapped_filtered_reads, selected_reads, hmm)
return selected_reads
@time_usage
def find_frameshift_from_alignment_file(self, alignment_file, unmapped_filtered_reads):
logging.debug('finding frameshift from alignment file for %s' % self.reference_vntr.id)
selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads)
return self.find_frameshift_from_selected_reads(selected_reads)
@time_usage
def get_ru_count_with_coverage_method(self, pattern_occurrences, total_counted_vntr_bp, average_coverage):
haplotypes = 1 if self.is_haploid else 2
estimate = [int(pattern_occurrences / (float(average_coverage) * haplotypes))] * 2
return estimate
pattern_occurrences = total_counted_vntr_bp / float(len(self.reference_vntr.pattern))
read_mode = self.get_alignment_file_read_mode(alignment_file)
samfile = pysam.AlignmentFile(alignment_file, read_mode, reference_filename=self.reference_filename)
reference = get_reference_genome_of_alignment_file(samfile)
bias_detector = CoverageBiasDetector(alignment_file, self.reference_vntr.chromosome, reference)
coverage_corrector = CoverageCorrector(bias_detector.get_gc_content_coverage_map())
logging.info('Sequencing mean coverage: %s' % coverage_corrector.get_sequencing_mean_coverage())
observed_copy_number = pattern_occurrences / coverage_corrector.get_sequencing_mean_coverage()
scaled_copy_number = coverage_corrector.get_scaled_coverage(self.reference_vntr, observed_copy_number)
logging.info('scaled copy number and observed copy number: %s, %s' % (scaled_copy_number, observed_copy_number))
return [scaled_copy_number]
@time_usage
def find_repeat_count_from_alignment_file(self, alignment_file, unmapped_filtered_reads, average_coverage=None,
update=False):
logging.debug('finding repeat count from alignment file for %s' % self.reference_vntr.id)
selected_reads = self.select_illumina_reads(alignment_file, unmapped_filtered_reads, update)
covered_repeats = []
flanking_repeats = []
total_counted_vntr_bp = 0
for selected_read in selected_reads:
repeats = get_number_of_repeats_in_vpath(selected_read.vpath)
total_counted_vntr_bp += get_number_of_repeat_bp_matches_in_vpath(selected_read.vpath)
logging.debug('logp of read: %s' % str(selected_read.logp))
logging.debug('left flankign size: %s' % get_left_flanking_region_size_in_vpath(selected_read.vpath))
logging.debug('right flanking size: %s' % get_right_flanking_region_size_in_vpath(selected_read.vpath))
logging.debug(selected_read.sequence)
visited_states = [state.name for idx, state in selected_read.vpath[1:-1]]
if self.read_flanks_repeats_with_confidence(selected_read.vpath):
logging.debug('spanning read visited states :%s' % visited_states)
logging.debug('repeats: %s' % repeats)
covered_repeats.append(repeats)
else:
flanking_repeats.append(repeats)
flanking_repeats = sorted(flanking_repeats)
logging.info('covered repeats: %s' % covered_repeats)
logging.info('flanking repeats: %s' % flanking_repeats)
min_valid_flanked = max(covered_repeats) if len(covered_repeats) > 0 else 0
max_flanking_repeat = [r for r in flanking_repeats if r == max(flanking_repeats) and r >= min_valid_flanked]
if len(max_flanking_repeat) < 5:
max_flanking_repeat = []
exact_genotype, max_prob = self.find_genotype_based_on_observed_repeats(covered_repeats + max_flanking_repeat)
if exact_genotype is not None:
exact_genotype_log = '/'.join([str(cn) for cn in sorted(exact_genotype)])
else:
exact_genotype_log = 'None'
logging.info('RU count lower bounds: %s' % exact_genotype_log)
if average_coverage is None:
return GenotypeResult(exact_genotype, len(selected_reads), len(covered_repeats), len(flanking_repeats),
max_prob)
pattern_occurrences = sum(flanking_repeats) + sum(covered_repeats)
return self.get_ru_count_with_coverage_method(pattern_occurrences, total_counted_vntr_bp, average_coverage)
def find_repeat_count_from_short_reads(self, short_read_files, working_directory='./'):
"""
Map short read sequencing data to human reference genome (hg19) and call find_repeat_count_from_alignment_file
:param short_read_files: short read sequencing data
:param working_directory: directory for generating the outputs
"""
alignment_file = '' + short_read_files
# TODO: use bowtie2 to map short reads to hg19
return self.find_repeat_count_from_alignment_file(alignment_file, working_directory)
@time_usage
def train_classifier_threshold(self, reference_file, read_length=150):
hmm = self.get_vntr_matcher_hmm(read_length=read_length)
simulated_true_reads = self.simulate_true_reads(read_length)
simulated_false_filtered_reads = self.simulate_false_filtered_reads(reference_file)
processed_true_reads = self.find_hmm_score_of_simulated_reads(hmm, simulated_true_reads)
processed_false_reads = self.find_hmm_score_of_simulated_reads(hmm, simulated_false_filtered_reads)
recruitment_score = self.find_recruitment_score_threshold(processed_true_reads, processed_false_reads)
return recruitment_score / float(read_length)
@time_usage
def find_hmm_score_of_simulated_reads(self, hmm, reads):
initial_recruitment_score = -10000
manager = Manager()
processed_reads = manager.list([])
vntr_bp_in_reads = Value('d', 0.0)
for read_segment in reads:
self.process_unmapped_read(None, read_segment, hmm, initial_recruitment_score, vntr_bp_in_reads, processed_reads, False)
return processed_reads
@time_usage
def simulate_false_filtered_reads(self, reference_file, min_match=3):
alphabet = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
m = 4194301
def get_hash(string):
result = 0
for k in range(len(string)):
result = (result + alphabet[string[k].upper()] * (4 ** (keyword_size - k - 1))) % m
return result
false_filtered_reads = []
MAX_FALSE_READS = 10000
read_size = 150
keyword_size = 11
keywords = self.get_keywords_for_filtering(True, keyword_size)
hashed_keywords = set([get_hash(keyword) for keyword in keywords])
match_positions = []
vntr_start = self.reference_vntr.start_point
vntr_end = vntr_start + self.reference_vntr.get_length()
fasta_sequences = SeqIO.parse(open(reference_file), 'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
if name != self.reference_vntr.chromosome:
continue
window_hash = None
for i in range(0, len(sequence) - keyword_size):
if sequence[i].upper() not in 'ACTG' or sequence[i - 1 + keyword_size].upper() not in 'ACTG':
continue
if window_hash is None or sequence[i - 1].upper() not in 'ACTG':
if 'N' in sequence[i:i + keyword_size].upper():
window_hash = None
continue
window_hash = get_hash(sequence[i:i + keyword_size])
continue
window_hash -= alphabet[sequence[i - 1].upper()] * (4 ** (keyword_size - 1))
window_hash = (window_hash * 4 + alphabet[sequence[i - 1 + keyword_size].upper()]) % m
if window_hash in hashed_keywords:
if name == self.reference_vntr.chromosome and vntr_start - read_size < i < vntr_end:
continue
if sequence[i:i + keyword_size].upper() in keywords:
match_positions.append(i)
if len(match_positions) >= min_match and match_positions[-1] - match_positions[-min_match] < read_size:
for j in range(match_positions[-1] - read_size, match_positions[-min_match], 5):
if 'N' not in sequence[j:j + read_size].upper():
false_filtered_reads.append(sequence[j:j + read_size])
if len(false_filtered_reads) > MAX_FALSE_READS:
break
return false_filtered_reads
def simulate_true_reads(self, read_length):
vntr = ''.join(self.reference_vntr.get_repeat_segments())
right_flank = self.reference_vntr.right_flanking_region
left_flank = self.reference_vntr.left_flanking_region
locus = left_flank[-read_length:] + vntr + right_flank[:read_length]
step_size = 1
alphabet = ['A', 'C', 'G', 'T']
sim_reads = []
for i in range(0, len(locus) - read_length + 1, step_size):
sim_reads.append(locus[i:i+read_length].upper())
# add 4 special reads to sim_read
for copies in range(1, len(self.reference_vntr.get_repeat_segments()) - 1):
vntr_section = ''.join(self.reference_vntr.get_repeat_segments()[:copies])
for i in range(1, 11):
sim_reads.append((left_flank[-i:] + vntr_section + right_flank)[:read_length])
sim_reads.append((left_flank + vntr_section + right_flank[:i])[-read_length:])
min_copies = int(read_length / len(vntr)) + 1
for i in range(1, 21):
# print(len((vntr * min_copies)[i:read_length+i]))
sim_reads.append((vntr * min_copies)[i:read_length+i])
# print(len((vntr * min_copies)[-read_length-i:-i]))
sim_reads.append((vntr * min_copies)[-read_length-i:-i])
simulated_true_reads = []
for sim_read in sim_reads:
from random import randint
for i in range(randint(1, 2)):
temp_read = list(sim_read)
temp_read[randint(0, len(sim_read)-1)] = alphabet[randint(0, 3)]
sim_read = ''.join(temp_read)
simulated_true_reads.append(sim_read)
return simulated_true_reads
@time_usage
def find_recruitment_score_threshold(self, processed_true_reads, processed_false_reads):
from sklearn.linear_model import LogisticRegression
true_scores = [read.logp for read in processed_true_reads]
false_scores = [read.logp for read in processed_false_reads]
if len(false_scores) == 0:
false_scores = [min(true_scores) - 2]
clf = LogisticRegression()
x = [[score] for score in true_scores + false_scores]
y = [1] * len(true_scores) + [0] * len(false_scores)
clf.fit(x, y)
recruitment_score = max(true_scores)
for i in range(-1, -300, -1):
if int(clf.predict([[i]])) == 0:
recruitment_score = i
break
return recruitment_score
| StarcoderdataPython |
11241916 | #encoding=utf-8
from __future__ import unicode_literals
import sys
sys.path.append("../")
import codecs
import jieba
import jieba.posseg
import jieba.analyse
import pickle
# extract keys dict
def create_keys_dict():
all_keys = {'女装'}
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\origin_data\alldata.txt', 'r', 'utf-8') as f:
while True:
pline = f.readline()
if pline == '':
break
keys = set(pline.strip().split('\t')[0].split(' '))
all_keys = all_keys|keys
all_keys = list(all_keys)
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\keys_dict.txt', 'w', 'utf-8') as f:
for key in all_keys:
f.write(key)
f.write('\n')
#
def create_know_pair():
freqwords=[]
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\500freqword.str', 'r', 'utf-8') as f:
while True:
pline = f.readline()
if pline == '':
break
freqwords.append(pline.strip())
print(len(freqwords))
#
jieba.load_userdict(r"C:\Users\jmlu\Desktop\Kobe\KOBE\data\keys_dict.txt")
knowledge_dict = {}
line_num = 0
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\aspect-user\preprocessed\train.supporting_facts_str', 'r', 'utf-8') as f:
while True:
pline = f.readline()
if pline == '':
break
pline = ''.join(pline.strip().split(' '))
sentences = pline.split('。')
for s in sentences:
words = jieba.posseg.cut(s)
for word, flag in words:
if (word in freqwords):
knowledge_dict[word] = s+'。'
# print(word, s)
break
line_num+= 1
if line_num %100 == 0:
print(line_num)
# break
print(len(knowledge_dict))
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\know_dict.txt', 'w', 'utf-8') as f:
for key, know in knowledge_dict.items():
f.write(key)
f.write('\t')
f.write(know)
f.write('\n')
def create_know_dict():
know_dict = {}
with codecs.open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\know_dict.txt', 'r', 'utf-8') as f:
while True:
pline = f.readline()
if pline == '':
break
pline = (pline.strip().split('\t'))
know_dict[pline[0]] = pline[1]
# print(know_dict)
fw = open(r'C:\Users\jmlu\Desktop\Kobe\KOBE\data\know_dict.pkl', "wb")
print(fw)
pickle.dump(know_dict, fw)
fw.close() | StarcoderdataPython |
1692210 | """
See https://en.wikipedia.org/wiki/Travelling_salesman_problem
Examples of Execution:
python3 TravelingSalesman.py -data=TravelingSalesman_10-20-0.json
python3 TravelingSalesman.py -data=TravelingSalesman_10-20-0.json -variant=table
"""
from pycsp3 import *
distances = data
nCities = len(distances)
# c[i] is the ith city of the tour
c = VarArray(size=nCities, dom=range(nCities))
# d[i] is the distance between the cities i and i+1 chosen in the tour
d = VarArray(size=nCities, dom=distances)
satisfy(
# Visiting each city only once
AllDifferent(c)
)
if not variant():
satisfy(
# computing the distance between any two successive cities in the tour
distances[c[i]][c[(i + 1) % nCities]] == d[i] for i in range(nCities)
)
elif variant("table"):
table = {(i, j, distances[i][j]) for i in range(nCities) for j in range(nCities) if i != j}
satisfy(
# computing the distance between any two successive cities in the tour
(c[i], c[(i + 1) % nCities], d[i]) in table for i in range(nCities)
)
minimize(
# minimizing the travelled distance
Sum(d)
)
""" Comments
1) writing dom=distances is equivalent (and more compact) than writing dom={v for row in distances for v in row}
""" | StarcoderdataPython |
3311198 | import pandas as pd
class H3Index:
def __init__(self, h3_to_chains_path):
self.h3_to_chains = pd.read_pickle(h3_to_chains_path)
self.valid = set([x for x in self.h3_to_chains.keys()])
self.h3_to_index = {h3: i for i, h3 in enumerate(self.h3_to_chains.keys())}
self.r_h3_to_index = {i: h3 for i, h3 in enumerate(self.h3_to_chains.keys())}
def filter_by(self, h3, chains):
return self.h3_to_chains[h3].intersection(chains)
| StarcoderdataPython |
94961 | # The experiment logic and analysis
import copy
import gym
import json
import matplotlib
import multiprocessing as mp
import warnings
import numpy as np
import platform
import pandas as pd
import traceback
from keras import backend as K
from os import path, environ
from rl.util import *
from rl.agent import *
from rl.memory import *
from rl.policy import *
from rl.preprocessor import *
# TODO fix mp breaking on Mac shit,
# except when running -b with agg backend
# (no GUI rendered,but saves graphs)
# set only if it's not MacOS
if environ.get('CI') or platform.system() == 'Darwin':
matplotlib.rcParams['backend'] = 'agg'
else:
matplotlib.rcParams['backend'] = 'TkAgg'
np.seterr(all='raise')
warnings.filterwarnings("ignore", module="matplotlib")
GREF = globals()
PARALLEL_PROCESS_NUM = mp.cpu_count()
ASSET_PATH = path.join(path.dirname(__file__), 'asset')
SESS_SPECS = json.loads(open(
path.join(ASSET_PATH, 'sess_specs.json')).read())
PROBLEMS = json.loads(open(
path.join(ASSET_PATH, 'problems.json')).read())
# the keys and their defaults need to be implemented by a sys_var
# the constants (capitalized) are problem configs,
# set in asset/problems.json
REQUIRED_SYS_KEYS = {
'RENDER': None,
'GYM_ENV_NAME': None,
'SOLVED_MEAN_REWARD': None,
'MAX_EPISODES': None,
'REWARD_MEAN_LEN': None,
'epi': 0,
't': 0,
'done': False,
'loss': [],
'total_rewards_history': [],
'explore_history': [],
'mean_rewards_history': [],
'mean_rewards': 0,
'total_rewards': 0,
'solved': False,
}
class Grapher(object):
'''
Grapher object that belongs to a Session
to draw graphs from its data
'''
def __init__(self, session):
import matplotlib.pyplot as plt
plt.rcParams['toolbar'] = 'None' # mute matplotlib toolbar
self.plt = plt
self.session = session
self.graph_filename = self.session.graph_filename
self.subgraphs = {}
self.figure = self.plt.figure(facecolor='white', figsize=(8, 9))
self.figure.suptitle(wrap_text(self.session.session_id))
self.init_figure()
def init_figure(self):
if environ.get('CI'):
return
# graph 1
ax1 = self.figure.add_subplot(
311,
frame_on=False,
title="\n\ntotal rewards per episode",
ylabel='total rewards')
p1, = ax1.plot([], [])
self.subgraphs['total rewards'] = (ax1, p1)
ax1e = ax1.twinx()
ax1e.set_ylabel('exploration rate').set_color('r')
ax1e.set_frame_on(False)
p1e, = ax1e.plot([], [], 'r')
self.subgraphs['e'] = (ax1e, p1e)
# graph 2
ax2 = self.figure.add_subplot(
312,
frame_on=False,
title='mean rewards over last 100 episodes',
ylabel='mean rewards')
p2, = ax2.plot([], [], 'g')
self.subgraphs['mean rewards'] = (ax2, p2)
# graph 3
ax3 = self.figure.add_subplot(
313,
frame_on=False,
title='loss over time, episode',
ylabel='loss')
p3, = ax3.plot([], [])
self.subgraphs['loss'] = (ax3, p3)
self.plt.tight_layout() # auto-fix spacing
self.plt.ion() # for live plot
def plot(self):
'''do live plotting'''
sys_vars = self.session.sys_vars
if environ.get('CI'):
return
ax1, p1 = self.subgraphs['total rewards']
p1.set_ydata(
sys_vars['total_rewards_history'])
p1.set_xdata(np.arange(len(p1.get_ydata())))
ax1.relim()
ax1.autoscale_view(tight=True, scalex=True, scaley=True)
ax1e, p1e = self.subgraphs['e']
p1e.set_ydata(
sys_vars['explore_history'])
p1e.set_xdata(np.arange(len(p1e.get_ydata())))
ax1e.relim()
ax1e.autoscale_view(tight=True, scalex=True, scaley=True)
ax2, p2 = self.subgraphs['mean rewards']
p2.set_ydata(
sys_vars['mean_rewards_history'])
p2.set_xdata(np.arange(len(p2.get_ydata())))
ax2.relim()
ax2.autoscale_view(tight=True, scalex=True, scaley=True)
ax3, p3 = self.subgraphs['loss']
p3.set_ydata(sys_vars['loss'])
p3.set_xdata(np.arange(len(p3.get_ydata())))
ax3.relim()
ax3.autoscale_view(tight=True, scalex=True, scaley=True)
self.plt.draw()
self.plt.pause(0.01)
self.save()
def save(self):
'''save graph to filename'''
self.figure.savefig(self.graph_filename)
class Session(object):
'''
The base unit of an Experiment
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Handles its own data, plots and saves its own graphs
Serialized by the parent experiment_id with its session_id
'''
def __init__(self, experiment, session_num=0, num_of_sessions=1):
self.experiment = experiment
self.session_num = session_num
self.num_of_sessions = num_of_sessions
self.session_id = self.experiment.experiment_id + \
'_s' + str(self.session_num)
log_delimiter('Init Session #{} of {}:\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
self.sess_spec = experiment.sess_spec
self.problem = self.sess_spec['problem']
self.Agent = get_module(GREF, self.sess_spec['Agent'])
self.Memory = get_module(GREF, self.sess_spec['Memory'])
self.Policy = get_module(GREF, self.sess_spec['Policy'])
self.PreProcessor = get_module(GREF, self.sess_spec['PreProcessor'])
self.param = self.sess_spec['param']
# init all things, so a session can only be ran once
self.sys_vars = self.init_sys_vars()
self.env = gym.make(self.sys_vars['GYM_ENV_NAME'])
self.preprocessor = self.PreProcessor(**self.param)
self.env_spec = self.set_env_spec()
self.agent = self.Agent(self.env_spec, **self.param)
self.memory = self.Memory(**self.param)
self.policy = self.Policy(**self.param)
self.agent.compile(self.memory, self.policy, self.preprocessor)
# data file and graph
self.base_filename = './data/{}/{}'.format(
self.experiment.prefix_id, self.session_id)
self.graph_filename = self.base_filename + '.png'
# for plotting
self.grapher = Grapher(self)
def init_sys_vars(self):
'''
init the sys vars for a problem by reading from
asset/problems.json, then reset the other sys vars
on reset will add vars (lower cases, see REQUIRED_SYS_KEYS)
'''
sys_vars = PROBLEMS[self.problem]
if not args.render:
sys_vars['RENDER'] = False
if environ.get('CI'):
sys_vars['RENDER'] = False
sys_vars['MAX_EPISODES'] = 4
self.sys_vars = sys_vars
self.reset_sys_vars()
return self.sys_vars
def reset_sys_vars(self):
'''reset and check RL system vars (lower case)
before each new session'''
for k in REQUIRED_SYS_KEYS:
if k.islower():
self.sys_vars[k] = copy.copy(REQUIRED_SYS_KEYS.get(k))
self.check_sys_vars()
return self.sys_vars
def check_sys_vars(self):
'''ensure the requried RL system vars are specified'''
sys_keys = self.sys_vars.keys()
assert all(k in sys_keys for k in REQUIRED_SYS_KEYS)
def set_env_spec(self):
'''Helper: return the env specs: dims, actions, reward range'''
env = self.env
state_dim = env.observation_space.shape[0]
if (len(env.observation_space.shape) > 1):
state_dim = env.observation_space.shape
env_spec = {
'state_dim': state_dim,
'state_bounds': np.transpose(
[env.observation_space.low, env.observation_space.high]),
'action_dim': env.action_space.n,
'actions': list(range(env.action_space.n)),
'reward_range': env.reward_range,
'timestep_limit': env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
}
self.env_spec = self.preprocessor.preprocess_env_spec(
env_spec) # preprocess
return self.env_spec
def debug_agent_info(self):
logger.debug(
"Agent info: {}".format(
format_obj_dict(
self.agent,
['learning_rate', 'n_epoch'])))
logger.debug(
"Memory info: size: {}".format(self.agent.memory.size()))
logger.debug(
"Policy info: {}".format(
format_obj_dict(self.agent.policy, ['e', 'tau'])))
logger.debug(
"PreProcessor info: {}".format(
format_obj_dict(self.agent.preprocessor, [])))
def check_end(self):
'''check if session ends (if is last episode)
do ending steps'''
sys_vars = self.sys_vars
logger.debug(
"RL Sys info: {}".format(
format_obj_dict(
sys_vars, ['epi', 't', 'total_rewards', 'mean_rewards'])))
logger.debug('{:->30}'.format(''))
if (sys_vars['solved'] or
(sys_vars['epi'] == sys_vars['MAX_EPISODES'] - 1)):
logger.info(
'Problem solved? {}\nAt episode: {}\nParams: {}'.format(
sys_vars['solved'], sys_vars['epi'],
to_json(self.param)))
self.env.close()
def update_history(self):
'''
update the data per episode end
'''
sys_vars = self.sys_vars
sys_vars['total_rewards_history'].append(sys_vars['total_rewards'])
sys_vars['explore_history'].append(
getattr(self.policy, 'e', 0) or getattr(self.policy, 'tau', 0))
avg_len = sys_vars['REWARD_MEAN_LEN']
# Calculating mean_reward over last 100 episodes
# case away from np for json serializable (dumb python)
mean_rewards = float(
np.mean(sys_vars['total_rewards_history'][-avg_len:]))
solved = (mean_rewards >= sys_vars['SOLVED_MEAN_REWARD'])
sys_vars['mean_rewards'] = mean_rewards
sys_vars['mean_rewards_history'].append(mean_rewards)
sys_vars['solved'] = solved
self.grapher.plot()
self.check_end()
return sys_vars
def run_episode(self):
'''run ane episode, return sys_vars'''
sys_vars, env, agent = self.sys_vars, self.env, self.agent
sys_vars['total_rewards'] = 0
state = env.reset()
processed_state = agent.preprocessor.reset_state(state)
agent.memory.reset_state(processed_state)
self.debug_agent_info()
for t in range(agent.env_spec['timestep_limit']):
sys_vars['t'] = t # update sys_vars t
if sys_vars.get('RENDER'):
env.render()
processed_state = agent.preprocessor.preprocess_state()
action = agent.select_action(processed_state)
next_state, reward, done, _info = env.step(action)
processed_exp = agent.preprocessor.preprocess_memory(
action, reward, next_state, done)
if processed_exp is not None:
agent.memory.add_exp(*processed_exp)
sys_vars['done'] = done
agent.update(sys_vars)
if agent.to_train(sys_vars):
agent.train(sys_vars)
sys_vars['total_rewards'] += reward
if done:
break
self.update_history()
return sys_vars
def clear_session(self):
if K._BACKEND == 'tensorflow':
K.clear_session() # manual gc to fix TF issue 3388
def run(self):
'''run a session of agent'''
log_delimiter('Run Session #{} of {}\n{}'.format(
self.session_num, self.num_of_sessions, self.session_id))
sys_vars = self.sys_vars
sys_vars['time_start'] = timestamp()
for epi in range(sys_vars['MAX_EPISODES']):
sys_vars['epi'] = epi # update sys_vars epi
try:
self.run_episode()
except Exception:
logger.error('Error in experiment, terminating '
'further session from {}'.format(self.session_id))
traceback.print_exc(file=sys.stdout)
break
if sys_vars['solved']:
break
self.clear_session()
sys_vars['time_end'] = timestamp()
sys_vars['time_taken'] = timestamp_elapse(
sys_vars['time_start'], sys_vars['time_end'])
progress = 'Progress: Experiment #{} Session #{} of {} done'.format(
self.experiment.experiment_num,
self.session_num, self.num_of_sessions)
log_delimiter('End Session:\n{}\n{}'.format(
self.session_id, progress))
return sys_vars
class Experiment(object):
'''
An Experiment for a config on repeat for k time
will run k Sessions, each with identical sess_spec
for a problem, Agent, Memory, Policy, param.
Will spawn as many Sessions for repetition
Handles all the data from sessions
to provide an experiment-level summary for a sess_spec
Its experiment_id is serialized by
problem, Agent, Memory, Policy and timestamp
Data Requirements:
JSON, single file, quick and useful summary,
replottable data, rerunnable specs
Keys:
all below X array of hyper param selection:
- sess_spec (so we can plug in directly again to rerun)
- summary
- time_start
- time_end
- time_taken
- metrics
- sys_vars_array
'''
def __init__(self, sess_spec, times=1,
experiment_num=0, num_of_experiments=1,
run_timestamp=timestamp(),
prefix_id_override=None):
self.sess_spec = sess_spec
self.data = None
self.times = times
self.sess_spec.pop('param_range', None) # single exp, del range
self.experiment_num = experiment_num
self.num_of_experiments = num_of_experiments
self.run_timestamp = run_timestamp
self.prefix_id = prefix_id_override or '{}_{}_{}_{}_{}_{}'.format(
sess_spec['problem'],
sess_spec['Agent'].split('.').pop(),
sess_spec['Memory'].split('.').pop(),
sess_spec['Policy'].split('.').pop(),
sess_spec['PreProcessor'].split('.').pop(),
self.run_timestamp
)
self.experiment_id = self.prefix_id + '_e' + str(self.experiment_num)
self.base_dir = './data/{}'.format(self.prefix_id)
os.makedirs(self.base_dir, exist_ok=True)
self.base_filename = './data/{}/{}'.format(
self.prefix_id, self.experiment_id)
self.data_filename = self.base_filename + '.json'
log_delimiter('Init Experiment #{} of {}:\n{}'.format(
self.experiment_num, self.num_of_experiments,
self.experiment_id), '=')
def analyze(self):
'''mean_rewards_per_epi
helper: analyze given data from an experiment
return metrics
'''
sys_vars_array = self.data['sys_vars_array']
solved_sys_vars_array = list(filter(
lambda sv: sv['solved'], sys_vars_array))
mean_rewards_array = np.array(list(map(
lambda sv: sv['mean_rewards'], sys_vars_array)))
max_total_rewards_array = np.array(list(map(
lambda sv: np.max(sv['total_rewards_history']), sys_vars_array)))
epi_array = np.array(list(map(lambda sv: sv['epi'], sys_vars_array)))
mean_rewards_per_epi_array = np.divide(mean_rewards_array, epi_array)
t_array = np.array(list(map(lambda sv: sv['t'], sys_vars_array)))
time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
sys_vars_array)))
solved_epi_array = np.array(list(map(
lambda sv: sv['epi'], solved_sys_vars_array)))
solved_t_array = np.array(list(map(
lambda sv: sv['t'], solved_sys_vars_array)))
solved_time_taken_array = np.array(list(map(
lambda sv: timestamp_elapse_to_seconds(sv['time_taken']),
solved_sys_vars_array)))
metrics = {
# percentage solved
'num_of_sessions': len(sys_vars_array),
'solved_num_of_sessions': len(solved_sys_vars_array),
'solved_ratio_of_sessions': float(len(
solved_sys_vars_array)) / len(sys_vars_array),
'mean_rewards_stats': basic_stats(mean_rewards_array),
'mean_rewards_per_epi_stats': basic_stats(
mean_rewards_per_epi_array),
'max_total_rewards_stats': basic_stats(max_total_rewards_array),
'epi_stats': basic_stats(epi_array),
't_stats': basic_stats(t_array),
'time_taken_stats': basic_stats(time_taken_array),
'solved_epi_stats': basic_stats(solved_epi_array),
'solved_t_stats': basic_stats(solved_t_array),
'solved_time_taken_stats': basic_stats(solved_time_taken_array),
}
self.data['summary'].update({'metrics': metrics})
return self.data
def save(self):
'''save the entire experiment data grid from inside run()'''
with open(self.data_filename, 'w') as f:
f.write(to_json(self.data))
logger.info(
'Session complete, data saved to {}'.format(self.data_filename))
def to_stop(self):
'''check of experiment should be continued'''
metrics = self.data['summary']['metrics']
failed = metrics['solved_ratio_of_sessions'] < 1.
if failed:
logger.info(
'Failed experiment, terminating sessions for {}'.format(
self.experiment_id))
return failed
def run(self):
'''
helper: run a experiment for Session
a number of times times given a sess_spec from gym_specs
'''
configure_gpu()
time_start = timestamp()
sys_vars_array = []
for s in range(self.times):
sess = Session(experiment=self,
session_num=s, num_of_sessions=self.times)
sys_vars = sess.run()
sys_vars_array.append(copy.copy(sys_vars))
time_end = timestamp()
time_taken = timestamp_elapse(time_start, time_end)
self.data = { # experiment data
'experiment_id': self.experiment_id,
'sess_spec': self.sess_spec,
'summary': {
'time_start': time_start,
'time_end': time_end,
'time_taken': time_taken,
'metrics': None,
},
'sys_vars_array': sys_vars_array,
}
self.analyze()
# progressive update, write when every session is done
self.save()
if self.to_stop():
break
progress = 'Progress: Experiment #{} of {} done'.format(
self.experiment_num, self.num_of_experiments)
log_delimiter(
'End Experiment:\n{}\n{}'.format(
self.experiment_id, progress), '=')
return self.data
def configure_gpu():
'''detect GPU options and configure'''
if K._BACKEND != 'tensorflow':
# skip directly if is not tensorflow
return
real_parallel_process_num = 1 if mp.current_process(
).name == 'MainProcess' else PARALLEL_PROCESS_NUM
tf = K.tf
gpu_options = tf.GPUOptions(
allow_growth=True,
per_process_gpu_memory_fraction=1./float(real_parallel_process_num))
config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True)
sess = tf.Session(config=config)
K.set_session(sess)
return sess
def plot(experiment_or_prefix_id):
'''plot from a saved data by init sessions for each sys_vars'''
prefix_id = prefix_id_from_experiment_id(experiment_or_prefix_id)
experiment_data_array = load_data_array_from_prefix_id(prefix_id)
for data in experiment_data_array:
sess_spec = data['sess_spec']
experiment = Experiment(sess_spec, times=1,
prefix_id_override=prefix_id)
# save with the right serialized filename
experiment.experiment_id = data['experiment_id']
num_of_sessions = len(data['sys_vars_array'])
for s in range(num_of_sessions):
sess = Session(experiment=experiment,
session_num=s, num_of_sessions=num_of_sessions)
sys_vars = data['sys_vars_array'][s]
sess.sys_vars = sys_vars
sess.grapher.plot()
sess.clear_session()
def analyze_param_space(experiment_data_array_or_prefix_id):
'''
get all the data from all experiments.run()
or read from all data files matching the prefix of experiment_id
e.g. usage without running:
prefix_id = 'DevCartPole-v0_DQN_LinearMemoryWithForgetting_BoltzmannPolicy_2017-01-15_142810'
analyze_param_space(prefix_id)
'''
if isinstance(experiment_data_array_or_prefix_id, str):
experiment_data_array = load_data_array_from_prefix_id(
experiment_data_array_or_prefix_id)
else:
experiment_data_array = experiment_data_array_or_prefix_id
flat_metrics_array = []
for data in experiment_data_array:
flat_metrics = flatten_dict(data['summary']['metrics'])
flat_metrics.update({'experiment_id': data['experiment_id']})
flat_metrics_array.append(flat_metrics)
metrics_df = pd.DataFrame.from_dict(flat_metrics_array)
metrics_df.sort_values(
['mean_rewards_per_epi_stats_mean',
'mean_rewards_stats_mean', 'solved_ratio_of_sessions'],
ascending=False
)
experiment_id = experiment_data_array[0]['experiment_id']
prefix_id = prefix_id_from_experiment_id(experiment_id)
param_space_data_filename = './data/{0}/param_space_data_{0}.csv'.format(
prefix_id)
metrics_df.to_csv(param_space_data_filename, index=False)
logger.info(
'Param space data saved to {}'.format(param_space_data_filename))
return metrics_df
def run(sess_name_id_spec, times=1,
param_selection=False, line_search=False,
plot_only=False):
'''
primary method:
specify:
- sess_name(str) or sess_spec(Dict): run new experiment,
- experiment_id(str): rerun experiment from data
- experiment_id(str) with plot_only=True: plot graphs from data
This runs all experiments, specified by the obtained sess_spec
for a specified number of sessions per experiment
Multiple experiments are ran if param_selection=True
'''
# run plots on data only
if plot_only:
plot(sess_name_id_spec)
return
# set sess_spec based on input
if isinstance(sess_name_id_spec, str):
if len(sess_name_id_spec.split('_')) >= 4:
data = load_data_from_experiment_id(sess_name_id_spec)
sess_spec = data['sess_spec']
else:
sess_spec = SESS_SPECS.get(sess_name_id_spec)
else:
sess_spec = sess_name_id_spec
# compose grid and run param selection
if param_selection:
if line_search:
param_grid = param_line_search(sess_spec)
else:
param_grid = param_product(sess_spec)
sess_spec_grid = generate_sess_spec_grid(sess_spec, param_grid)
num_of_experiments = len(sess_spec_grid)
run_timestamp = timestamp()
experiment_array = []
for e in range(num_of_experiments):
sess_spec = sess_spec_grid[e]
experiment = Experiment(
sess_spec, times=times, experiment_num=e,
num_of_experiments=num_of_experiments,
run_timestamp=run_timestamp)
experiment_array.append(experiment)
p = mp.Pool(PARALLEL_PROCESS_NUM)
experiment_data_array = list(p.map(mp_run_helper, experiment_array))
p.close()
p.join()
else:
experiment = Experiment(sess_spec, times=times)
experiment_data = experiment.run()
experiment_data_array = [experiment_data]
return analyze_param_space(experiment_data_array)
| StarcoderdataPython |
1679797 | <reponame>yycho0108/DRLND_Core
#!/usr/bin/env python3
from abc import ABC, abstractmethod
import os
import numpy as np
from enum import Enum
from collections import defaultdict
import pickle
from drlnd.core.agents.base_agent import AgentBase
from drlnd.core.common.epsilon import EpsilonBase, IncrementalEpsilon
from drlnd.core.common.util import lerp
from drlnd.core.common.q_table import QTable, DictQTable, TiledQTable
from drlnd.core.common.logger import get_default_logger
logger = get_default_logger()
class QControlMethod(Enum):
kMethodSarsa0 = 0
kMethodSarsaMax = 1
kMethodSarsaExpect = 2
class QTableAgent(AgentBase):
def __init__(self, num_actions: int,
epsilon: EpsilonBase,
control: QControlMethod = QControlMethod.kMethodSarsaExpect,
alpha: float = 0.1,
gamma: float = 1.0,
Q: QTable = None
):
self.num_actions_ = num_actions
if isinstance(epsilon, IncrementalEpsilon):
self.eps_ = epsilon
else:
self.eps_ = IncrementalEpsilon(epsilon)
self.ctrl_ = control
self.alpha_ = alpha
self.gamma_ = gamma
# NOTE(yycho0108): instead of initializing to zero,
# Consider alternative values to enable "optimistic"
# Q-table.
if Q is None:
Q = DictQTable(self.num_actions_)
self.Q_ = Q
def get_action_probs(self, state):
n = self.num_actions_
probs = np.empty(n)
if self.Q_.has(state):
# E-greedy probability
eps = self.eps_()
best_action = self.Q_[state].argmax()
probs.fill(eps / len(probs)) # -> sum to eps
probs[best_action] += (1.0 - eps)
else:
# Uniform probability for unknown states
probs.fill(1.0/n)
return probs
def select_action(self, state):
"""
Select action based on the current state and the estimated q-table.
"""
# E-greedy policy
if np.random.random() < self.eps_():
return np.random.choice(self.num_actions_)
return np.argmax(self.Q_.get(state))
def step(self, state, action, reward, next_state, done):
# Update q-table
q_old = self.Q_.get(state, action)
# Determine expected q value based on the
# Control method settings.
q_next = 0.0
if done:
# FIXME(yycho0108): somewhat fragile method
# To track table training progress.
self.eps_.increment_index()
else:
if self.ctrl_ == QControlMethod.kMethodSarsa0:
# NOTE(yycho0108): does not currently work
q_next = self.Q_.get(next_state, next_action)
elif self.ctrl_ == QControlMethod.kMethodSarsaMax:
q_next = np.max(self.Q_.get(next_state))
elif self.ctrl_ == QControlMethod.kMethodSarsaExpect:
probs = self.get_action_probs(next_state)
q_next = self.Q_.get(next_state).dot(probs)
q_new = reward + self.gamma_ * q_next
self.Q_.update(state, action, q_new, self.alpha_)
def save(self, path, filename='q.pkl'):
filename = os.path.join(path, filename)
data = (self.num_actions_, self.eps_, self.ctrl_,
self.alpha_, self.gamma_, self.Q_)
logger.debug('Saving agent to {}'.format(filename))
with open(filename, 'wb') as f:
pickle.dump(data, f)
def load(self, path, filename='q.pkl'):
filename = os.path.join(path, filename)
logger.debug('Loading agent from {}'.format(filename))
with open(filename, 'rb') as f:
data = pickle.load(f)
self.num_actions_, self.eps_, self.ctrl_, self.alpha_, self.gamma_, self.Q_ = data
@classmethod
def from_file(cls, filename):
with open(filename, 'rb') as f:
data = pickle.load(f)
num_actions, epsilon, control, alpha, gamma, Q = data
return QTableAgent(num_actions, epsilon, control, alpha, gamma, Q)
| StarcoderdataPython |
4914098 | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @author <NAME>
import numpy as np
def hex_to_rgb(hex_color):
rgb_hex = [hex_color[x:x + 2] for x in [1, 3, 5]]
return np.array([int(hex_value, 16) for hex_value in rgb_hex])
def rgb_to_hex(rgb):
hex_value = []
for i in rgb:
h = hex(int(i))[2:]
if len(h) < 2:
h = "{}0".format(h)
hex_value.append(h)
return "#" + "".join(hex_value)
def make_random_color(fmt='rgb'):
"""
Generate a random color.
Possible output formats are:
- 'rgb' (default): (255, 255, 100)
- 'dec': (1.0, 1.0, 0.392)
- 'rgba': (1.0, 1.0, 0.392, 1.0)
- 'hex': #ffff64
"""
rgb = np.random.randint(0, 255, 3)
if fmt == 'rgb':
return rgb
elif fmt == 'dec':
return rgb.astype(np.float) / 255.0
elif fmt == 'rgba':
return np.concatenate((rgb.astype(np.float) / 255.0, [1.0]))
elif fmt == 'hex':
return rgb_to_hex(rgb)
else:
raise RuntimeError("Unknown color format {}. Possible choices are: "
"rgb, dec, rgba, and hex.".format(fmt))
| StarcoderdataPython |
3237974 | <reponame>freeclouds/OpenHDMap<filename>map_label_tool/py_proto/modules/perception/onboard/proto/fusion_component_config_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/perception/onboard/proto/fusion_component_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/perception/onboard/proto/fusion_component_config.proto',
package='apollo.perception.onboard',
syntax='proto2',
serialized_pb=_b('\n>modules/perception/onboard/proto/fusion_component_config.proto\x12\x19\x61pollo.perception.onboard\"\xa7\x02\n\x15\x46usionComponentConfig\x12\x15\n\rfusion_method\x18\x01 \x01(\t\x12\x1a\n\x12\x66usion_main_sensor\x18\x02 \x01(\t\x12\x1b\n\x13object_in_roi_check\x18\x03 \x01(\x08\x12#\n\x1bradius_for_roi_object_check\x18\x04 \x01(\x01\x12<\n\x1doutput_obstacles_channel_name\x18\x05 \x01(\t:\x15/perception/obstacles\x12[\n%output_viz_fused_content_channel_name\x18\x06 \x01(\t:,/perception/inner/visualization/FusedObjects')
)
_FUSIONCOMPONENTCONFIG = _descriptor.Descriptor(
name='FusionComponentConfig',
full_name='apollo.perception.onboard.FusionComponentConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fusion_method', full_name='apollo.perception.onboard.FusionComponentConfig.fusion_method', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fusion_main_sensor', full_name='apollo.perception.onboard.FusionComponentConfig.fusion_main_sensor', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='object_in_roi_check', full_name='apollo.perception.onboard.FusionComponentConfig.object_in_roi_check', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='radius_for_roi_object_check', full_name='apollo.perception.onboard.FusionComponentConfig.radius_for_roi_object_check', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_obstacles_channel_name', full_name='apollo.perception.onboard.FusionComponentConfig.output_obstacles_channel_name', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("/perception/obstacles").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_viz_fused_content_channel_name', full_name='apollo.perception.onboard.FusionComponentConfig.output_viz_fused_content_channel_name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=True, default_value=_b("/perception/inner/visualization/FusedObjects").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=94,
serialized_end=389,
)
DESCRIPTOR.message_types_by_name['FusionComponentConfig'] = _FUSIONCOMPONENTCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FusionComponentConfig = _reflection.GeneratedProtocolMessageType('FusionComponentConfig', (_message.Message,), dict(
DESCRIPTOR = _FUSIONCOMPONENTCONFIG,
__module__ = 'modules.perception.onboard.proto.fusion_component_config_pb2'
# @@protoc_insertion_point(class_scope:apollo.perception.onboard.FusionComponentConfig)
))
_sym_db.RegisterMessage(FusionComponentConfig)
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
12844798 | #!/usr/bin/env python
"""Test add_metadata_route function with legal parameters.
"""
import apikit
from flask import Flask
def test_set_flask_metadata():
"""Test metadata creation with legal parameters.
"""
app = Flask("bob")
apikit.set_flask_metadata(app, "2.0", "http://example.repo", "BobApp")
apikit.add_metadata_route(app, "")
apikit.add_metadata_route(app, "bob")
apikit.add_metadata_route(app, ["bob"])
apikit.add_metadata_route(app, ["bob", "chuck"])
| StarcoderdataPython |
6608959 | <reponame>siavash-khodadadeh/MetaLearning-TF2.0
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Flatten, Dense, Input, BatchNormalization
from decorators import name_repr
class SimpleModel(tf.keras.Model):
name = 'SimpleModel'
def __init__(self, num_classes):
super(SimpleModel, self).__init__(name='simple_model')
self.conv1 = tf.keras.layers.Conv2D(64, 3, name='conv1', strides=(2, 2), padding='same')
self.bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn1')
self.conv2 = tf.keras.layers.Conv2D(64, 3, name='conv2', strides=(2, 2), padding='same')
self.bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn2')
self.conv3 = tf.keras.layers.Conv2D(64, 3, name='conv3', strides=(2, 2), padding='same')
self.bn3 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn3')
self.conv4 = tf.keras.layers.Conv2D(64, 3, name='conv4', strides=(2, 2), padding='same')
self.bn4 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn4')
self.flatten = Flatten(name='flatten')
self.dense = Dense(num_classes, activation=None, name='dense')
def conv_block(self, features, conv, bn=None, training=False):
conv_out = conv(features)
batch_normalized_out = bn(conv_out, training=training)
return tf.keras.activations.relu(batch_normalized_out)
def call(self, inputs, training=False):
image = inputs
c1 = self.conv_block(image, self.conv1, self.bn1, training=training)
c2 = self.conv_block(c1, self.conv2, self.bn2, training=training)
c3 = self.conv_block(c2, self.conv3, self.bn3, training=training)
c4 = self.conv_block(c3, self.conv4, self.bn4, training=training)
c4 = tf.reduce_mean(c4, [1, 2])
f = self.flatten(c4)
out = self.dense(f)
return out
class MiniImagenetModel(tf.keras.Model):
def __init__(self, num_classes, *args, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'MiniImagenetModel'
super(MiniImagenetModel, self).__init__(*args, **kwargs)
self.max_pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.conv1 = tf.keras.layers.Conv2D(32, 3, name='conv1')
self.bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn1')
# self.bn1 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn1')
self.conv2 = tf.keras.layers.Conv2D(32, 3, name='conv2')
self.bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn2')
# self.bn2 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn2')
self.conv3 = tf.keras.layers.Conv2D(32, 3, name='conv3')
self.bn3 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn3')
# self.bn3 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn3')
self.conv4 = tf.keras.layers.Conv2D(32, 3, name='conv4')
self.bn4 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn4')
# self.bn4 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn4')
self.flatten = Flatten(name='flatten')
self.dense = Dense(num_classes, activation=None, name='dense')
def conv_block(self, features, conv, bn=None, training=False):
conv_out = conv(features)
batch_normalized_out = bn(conv_out, training=training)
batch_normalized_out = self.max_pool(batch_normalized_out)
return tf.keras.activations.relu(batch_normalized_out)
def get_features(self, inputs, training=False, apply_final_activation=True):
import numpy as np
image = inputs
c1 = self.conv_block(image, self.conv1, self.bn1, training=training)
c2 = self.conv_block(c1, self.conv2, self.bn2, training=training)
c3 = self.conv_block(c2, self.conv3, self.bn3, training=training)
if apply_final_activation:
c4 = self.conv_block(c3, self.conv4, self.bn4, training=training)
c4 = tf.reshape(c4, [-1, np.prod([int(dim) for dim in c4.get_shape()[1:]])])
else:
conv_out = self.conv4(c3)
batch_normalized_out = self.bn4(conv_out, training=training)
c4 = self.max_pool(batch_normalized_out)
c4 = tf.reshape(c4, [-1, np.prod([int(dim) for dim in c4.get_shape()[1:]])])
f = self.flatten(c4)
return f
def call(self, inputs, training=False):
f = self.get_features(inputs, training=training)
out = self.dense(f)
return out
class VGGSmallModel(tf.keras.models.Model):
name = 'VGGSmallModel'
def __init__(self, num_classes):
super(VGGSmallModel, self).__init__(name='vgg_small_model')
self.max_pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.conv1 = tf.keras.layers.Conv2D(64, 3, name='conv1')
self.bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn1')
self.conv2 = tf.keras.layers.Conv2D(128, 3, name='conv2')
self.bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn2')
self.conv3 = tf.keras.layers.Conv2D(256, 3, name='conv3')
self.bn3 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn3')
self.conv4 = tf.keras.layers.Conv2D(256, 3, name='conv4')
self.bn4 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn4')
self.conv5 = tf.keras.layers.Conv2D(512, 3, name='conv5')
self.bn5 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn5')
self.conv6 = tf.keras.layers.Conv2D(512, 3, name='conv6')
self.bn6 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn6')
self.flatten = Flatten(name='flatten')
self.dense1 = tf.keras.layers.Dense(32, activation=None, name='dense1')
self.bn_dense = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn_dense')
self.dense = Dense(num_classes, activation=None, name='dense')
def conv_block(self, features, conv, bn=None, training=False):
conv_out = conv(features)
batch_normalized_out = bn(conv_out, training=training)
batch_normalized_out = self.max_pool(batch_normalized_out)
return tf.keras.activations.relu(batch_normalized_out)
def call(self, inputs, training=False):
image = inputs
output = self.conv_block(image, self.conv1, self.bn1, training=training)
output = self.conv_block(output, self.conv2, self.bn2, training=training)
output = self.conv_block(output, self.conv3, self.bn3, training=training)
output = self.conv_block(output, self.conv4, self.bn4, training=training)
output = self.conv_block(output, self.conv5, self.bn5, training=training)
output = self.conv_block(output, self.conv6, self.bn6, training=training)
output = self.flatten(output)
output = self.dense1(output)
output = self.bn_dense(output)
output = tf.keras.activations.relu(output)
output = self.dense(output)
return output
class FiveLayerResNet(tf.keras.models.Model):
name = 'FiveLayerResNet'
def __init__(self, num_classes):
super(FiveLayerResNet, self).__init__(name='FiveLayerResNet')
self.global_max_pool = tf.keras.layers.GlobalMaxPooling2D()
self.max_pool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2))
self.block1_conv1 = tf.keras.layers.Conv2D(64, (3, 3), strides=(2, 2), activation=None, padding='valid', name='block1_conv1')
self.block1_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block1_bn1')
self.block1_conv2 = tf.keras.layers.Conv2D(64, (3, 3), strides=(2, 2), activation=None, padding='valid', name='block1_conv2')
self.block1_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block1_bn2')
self.block2_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block2_conv1')
self.block2_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block2_bn1')
self.block2_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block2_conv2')
self.block2_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block2_bn2')
self.block3_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block3_conv1')
self.block3_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block3_bn1')
self.block3_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block3_conv2')
self.block3_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block3_bn2')
self.block4_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block4_conv1')
self.block4_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block4_bn1')
self.block4_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block4_conv2')
self.block4_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block4_bn2')
# self.block5_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block5_conv1')
# self.block5_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block5_bn1')
# self.block5_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block5_conv2')
# self.block5_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block5_bn2')
# self.block6_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block6_conv1')
# self.block6_bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block6_bn1')
# self.block6_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation=None, padding='same', name='block6_conv2')
# self.block6_bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='block6_bn2')
self.flatten = Flatten(name='flatten')
self.dense = Dense(num_classes, activation=None, name='dense')
def forward_res_block(self, inputs, conv1, bn1, conv2, bn2, training, use_shortcut=True):
output = inputs
shortcut = output
output = conv1(output)
output = bn1(output, training=training)
output = tf.keras.activations.relu(output)
output = conv2(output)
output = bn2(output, training=training)
output = tf.keras.activations.relu(output)
if use_shortcut:
output = output + shortcut
return output
def call(self, inputs, training=False):
output = inputs
output = self.forward_res_block(
inputs, self.block1_conv1, self.block1_bn1, self.block1_conv2, self.block1_bn2, training, use_shortcut=False
)
output = self.max_pool(output)
output = self.forward_res_block(
output, self.block2_conv1, self.block2_bn1, self.block2_conv2, self.block2_bn2, training
)
output = self.max_pool(output)
output = self.forward_res_block(
output, self.block3_conv1, self.block3_bn1, self.block3_conv2, self.block3_bn2, training
)
output = self.max_pool(output)
output = self.forward_res_block(
output, self.block4_conv1, self.block4_bn1, self.block4_conv2, self.block4_bn2, training
)
output = self.max_pool(output)
# output = self.forward_res_block(
# output, self.block5_conv1, self.block5_bn1, self.block5_conv2, self.block5_bn2, training
#)
# output = self.max_pool(output)
# output = self.forward_res_block(
# output, self.block6_conv1, self.block6_bn1, self.block6_conv2, self.block6_bn2, training
# )
output = self.global_max_pool(output)
output = self.flatten(output)
output = self.dense(output)
return output
class VGG19Model(tf.keras.models.Model):
name = 'VGG19Model'
def __init__(self, num_classes):
super(VGG19Model, self).__init__(name='vgg19_model')
self.block1_conv1 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')
self.block1_conv2 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')
self.block1_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')
self.block2_conv1 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')
self.block2_conv2 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')
self.block2_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')
self.block3_conv1 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')
self.block3_conv2 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')
self.block3_conv3 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')
self.block3_conv4 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')
self.block3_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')
self.block4_conv1 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')
self.block4_conv2 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')
self.block4_conv3 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')
self.block4_conv4 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')
self.block4_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')
self.block5_conv1 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')
self.block5_conv2 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')
self.block5_conv3 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')
self.block5_conv4 = tf.keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')
self.block5_pool = tf.keras.layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')
self.average_pool = tf.keras.layers.AveragePooling2D(pool_size=(7, 7))
self.flatten = tf.keras.layers.Flatten(name='flatten')
# self.fc1 = tf.keras.layers.Dense(512, activation='relu', name='fc1')
# self.fc2 = tf.keras.layers.Dense(1024, activation='relu', name='fc2')
self.fc3 = tf.keras.layers.Dense(num_classes, activation=None, name='predictions')
def call(self, inputs, training=False):
image = inputs
output = self.block1_conv1(image)
output = self.block1_conv2(output)
output = self.block1_pool(output)
output = self.block2_conv1(output)
output = self.block2_conv2(output)
output = self.block2_pool(output)
output = self.block3_conv1(output)
output = self.block3_conv2(output)
output = self.block3_conv3(output)
output = self.block3_conv4(output)
output = self.block3_pool(output)
output = self.block4_conv1(output)
output = self.block4_conv2(output)
output = self.block4_conv3(output)
output = self.block4_conv4(output)
output = self.block4_pool(output)
output = self.block5_conv1(output)
output = self.block5_conv2(output)
output = self.block5_conv3(output)
output = self.block5_conv4(output)
output = self.block5_pool(output)
output = self.average_pool(output)
output = self.flatten(output)
# output = self.fc1(output)
# output = self.fc2(output)
output = self.fc3(output)
return output
@name_repr('TransferNet')
def get_transfer_net(
architecture='VGG16',
num_hidden_units=None,
num_trainable_layers=3,
num_classes=5,
random_layer_initialization_seed=None
):
base_model = getattr(tf.keras.applications, architecture)(
include_top=False,
weights='imagenet',
input_shape=(224, 224, 3),
)
base_model.trainable = False
counter = 1
for layer in reversed(base_model.layers):
if counter >= num_trainable_layers:
break
else:
layer.trainable = True
if isinstance(layer, tf.keras.layers.Dense) or isinstance(layer, tf.keras.layers.Conv2D):
counter += 1
last_layer = tf.keras.layers.Flatten(name='flatten')(base_model.output)
tf.random.set_seed(random_layer_initialization_seed)
if num_hidden_units:
hidden_layers = []
for i, n in enumerate(num_hidden_units):
hidden_layers.append(tf.keras.layers.Dense(n, name='fc_' + str(i + 1), activation='relu')(last_layer))
last_layer = hidden_layers[-1]
fc_out = tf.keras.layers.Dense(num_classes, name='fc_out', activation=None)(last_layer)
tf.random.set_seed(None)
model = tf.keras.models.Model(inputs=[base_model.input], outputs=[fc_out], name='TransferNet')
return model
class VoxCelebModel(tf.keras.Model):
name = 'VoxCelebModel'
def __init__(self, num_classes):
super(VoxCelebModel, self).__init__(name='vox_celeb_model')
self.max_pool = tf.keras.layers.MaxPool1D(pool_size=(12, ), strides=(12, ))
self.conv1 = tf.keras.layers.Conv1D(32, 3, name='conv1')
self.bn1 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn1')
# self.bn1 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn1')
self.conv2 = tf.keras.layers.Conv1D(32, 3, name='conv2')
self.bn2 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn2')
# self.bn2 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn2')
self.conv3 = tf.keras.layers.Conv1D(32, 3, name='conv3')
self.bn3 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn3')
# self.bn3 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn3')
self.conv4 = tf.keras.layers.Conv1D(32, 3, name='conv4')
self.bn4 = tf.keras.layers.BatchNormalization(center=True, scale=False, name='bn4')
# self.bn4 = tf.keras.layers.LayerNormalization(center=True, scale=False, name='bn4')
self.flatten = Flatten(name='flatten')
self.dense = Dense(num_classes, activation=None, namshe='dense')
def conv_block(self, features, conv, bn=None, training=False):
conv_out = conv(features)
batch_normalized_out = bn(conv_out, training=training)
batch_normalized_out = self.max_pool(batch_normalized_out)
return tf.keras.activations.relu(batch_normalized_out)
def call(self, inputs, training=False):
image = inputs
c1 = self.conv_block(image, self.conv1, self.bn1, training=training)
c2 = self.conv_block(c1, self.conv2, self.bn2, training=training)
c3 = self.conv_block(c2, self.conv3, self.bn3, training=training)
c4 = self.conv_block(c3, self.conv4, self.bn4, training=training)
f = self.flatten(c4)
out = self.dense(f)
return out
| StarcoderdataPython |
6457157 | <filename>src/app/voltdb/voltdb_src/lib/python/voltcli/voltdb.d/start.py
# This file is part of VoltDB.
# Copyright (C) 2008-2021 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
voltdbroot_help = ('Specifies the root directory for the database. The default '
'is voltdbroot under the current working directory.')
server_list_help = ('{hostname-or-ip[,...]}, '
'Specifies the leader(s) for coordinating cluster startup. ')
@VOLT.Command(
bundles = VOLT.ServerBundle('probe',
safemode_available=True,
supports_daemon=True,
supports_multiple_daemons=True,
check_environment_config=True,
supports_paused=True),
options = (
VOLT.StringListOption('-H', '--host', 'server_list', server_list_help, default = ''),
VOLT.IntegerOption('-c', '--count', 'hostcount', 'number of hosts in the cluster'),
VOLT.PathOption('-D', '--dir', 'directory_spec', voltdbroot_help),
VOLT.BooleanOption('-r', '--replica', 'replica', None),
VOLT.BooleanOption('-A', '--add', 'enableadd', 'allows the server to elastically expand the cluster if the cluster is already complete', default = False),
VOLT.IntegerOption('-m', '--missing', 'missing', 'specifies how many nodes are missing at K-safe cluster startup'),
VOLT.PathOption('-l', '--license', 'license', 'specify a license file to replace the existing staged copy of the license')
),
description = 'Starts a database, which has been initialized.'
)
def start(runner):
if runner.opts.replica:
runner.abort_with_help('The --replica option is no longer allowed.')
if runner.opts.directory_spec:
runner.args.extend(['voltdbroot', runner.opts.directory_spec])
if not runner.opts.server_list:
runner.abort_with_help('You must specify the --host option.')
runner.args.extend(['mesh', ','.join(runner.opts.server_list)])
if runner.opts.hostcount:
runner.args.extend(['hostcount', runner.opts.hostcount])
if runner.opts.missing:
runner.args.extend(['missing', runner.opts.missing])
if runner.opts.enableadd:
runner.args.extend(['enableadd'])
if runner.opts.license:
runner.args.extend(['license', runner.opts.license])
runner.go()
| StarcoderdataPython |
8093444 | import pandas as pd
import time
import os
#os.environ['TZ'] = 'America/Los_Angeles' # set new timezone
#time.tzset()
url="https://healthdata.gov/api/views/g62h-syeh/rows.csv?accessType=DOWNLOAD"
#Read current data into dataframe
data = pd.read_csv(url)
#Save as CSV
#data.to_csv(f'./hhs/scrapes/{time.strftime("%Y-%m-%d-%H-%M")}.csv', index=False)
#Select columns
data2 = data[['date', 'state', 'critical_staffing_shortage_anticipated_within_week_yes',
'critical_staffing_shortage_anticipated_within_week_no',
'critical_staffing_shortage_anticipated_within_week_not_reported']]
#Convert date to datetime
data2['date'] = pd.to_datetime(data2['date'])
#Check which are after Aug. 1, 2020 when data got better reported
start_date = "2020-8-1"
after_start_date = data2["date"] >= start_date
#Pull out Aug. 1, 2020 onward
data2 = data2.loc[after_start_date]
#Re-sort
data2 = data2.sort_values(by = ['state', 'date'], ascending = (True, False))
#SAVE RAW DATA TO FILE
data2.to_csv(f'./hhs/scrapes/{time.strftime("%Y-%m-%d")}.csv', index=False)
#CALCULATE NATIONAL RATE ======================================= |
data3 = data2.groupby('date').agg(
yes = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_yes', aggfunc=sum),
no = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_no', aggfunc=sum),
missing = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_not_reported', aggfunc=sum))
#Calculate total
data3["total"] = data3["yes"] + data3["no"] + data3["missing"]
#Calculate those saying yes of those that answered
data3["pct_yes"] = data3["yes"] / (data3["yes"] + data3["no"])
#Calculate those answering of all hospitals
data3["pct_reporting"] = ( data3["yes"] + data3["no"] ) / data3["total"]
#Calculate 7-day rolling average of percent of hospitals anticipating critical staffing shortages within a week
data3['roll_avg'] = data3["pct_yes"].rolling(7).mean()
#Calculate 7-day rolling average of percent of hospitals providing responses
data3['roll_reporting'] = data3["pct_reporting"].rolling(7).mean()
#Re-sort
data3 = data3.sort_values(by = 'date', ascending = False)
#SAVE NATIONAL RATE TO FILE
data3.to_csv(f'./hhs/national/{time.strftime("%Y-%m-%d")}.csv')
#CALCULATE STATE RATES ======================================= |
data4 = data2.groupby(['date','state']).agg(
yes = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_yes', aggfunc=sum),
no = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_no', aggfunc=sum),
missing = pd.NamedAgg(column='critical_staffing_shortage_anticipated_within_week_not_reported', aggfunc=sum))
#Sort by state then date ascending to calculate rolling averages
data4 = data4.sort_values(by = ['state', 'date'])
#Calculate total
data4["total"] = data4["yes"] + data4["no"] + data4["missing"]
#Calculate those saying yes of those that answered
data4["pct_yes"] = data4["yes"] / (data4["yes"] + data4["no"])
#Calculate those answering of all hospitals
data4["pct_reporting"] = ( data4["yes"] + data4["no"] ) / data4["total"]
#Calculate 7-day rolling average of percent of hospitals anticipating critical staffing shortages within a week
data4['roll_avg'] = data4["pct_yes"].rolling(7).mean()
data4['roll_reporting'] = data4["pct_reporting"].rolling(7).mean()
#Re-sort
data4 = data4.sort_values(by = ['state', 'date'], ascending = (True, False))
#SAVE STATE RATES TO FILE
data4.to_csv(f'./hhs/states/{time.strftime("%Y-%m-%d")}.csv')
| StarcoderdataPython |
3548559 | #!/usr/bin/env python
"""
Point Grey camera used in the context of a focus lock.
Hazen 09/19
Evan 06/21
"""
import numpy
import time
from PyQt5 import QtCore
import storm_control.sc_hardware.utility.af_lock_c as afLC
import storm_control.sc_hardware.pointGrey.spinnaker as spinnaker
import tifffile
# ===== Import fitting libraries. ===========
# Numpy fitter, this should always be available.
import storm_control.sc_hardware.utility.np_lock_peak_finder as npLPF
# Finding/fitting using the storm-analysis project.
saLPF = None
try:
import storm_control.sc_hardware.utility.sa_lock_peak_finder as saLPF
except ModuleNotFoundError as mnfe:
print(">> Warning! Storm analysis lock fitting module not found. <<")
print(mnfe)
pass
# Finding using the storm-analysis project, fitting using image correlation.
cl2DG = None
try:
import storm_control.sc_hardware.utility.corr_lock_c2dg as cl2DG
except ModuleNotFoundError as mnfe:
# Only need one warning about the lack of storm-analysis.
pass
except OSError as ose:
print(">> Warning! Correlation lock fitting C library not found. <<")
print(ose)
pass
#==============================
class LockCamera(QtCore.QThread):
"""
This class is used to control a Point Grey (Spinnaker) camera in the
context of a focus lock.
"""
cameraUpdate = QtCore.pyqtSignal(dict)
def __init__(self, camera_id = None, parameters = None, **kwds):
super().__init__(**kwds)
self.cur_offsetx = None
self.cur_offsety = None
self.old_offsetx = None
self.old_offsety = None
self.max_offsetx = None
self.max_offsety = None
self.n_analyzed = 0
self.n_dropped = 0
self.start_time = None
self.params_mutex = QtCore.QMutex()
self.running = False
self.zero_dist = parameters.get("zero_dist")
# Initialize library.
spinnaker.pySpinInitialize(verbose = False)
# Get the camera & set some defaults.
self.camera = spinnaker.getCamera(camera_id)
#print(f'self.camera = { self.camera}')
# Only Grasshopper has defect correction
if self.camera.hasProperty("VideoMode"):
self.camera.setProperty("VideoMode", parameters.get("video_mode"))
self.camera.setProperty("pgrDefectPixelCorrectionEnable", False)
# Set pixel format.
self.camera.setProperty("PixelFormat", "Mono16")
# We don't want any of these 'features'.
#self.camera.setProperty("AcquisitionFrameRateAuto", "Off")
self.camera.setProperty("AcquisitionMode", "Continuous")
self.camera.setProperty("ExposureAuto", "Off")
self.camera.setProperty("GainAuto", "Off")
if self.camera.hasProperty("pgrExposureCompensationAuto"):
self.camera.setProperty("pgrExposureCompensationAuto", "Off")
if self.camera.hasProperty("BlackLevelClampingEnable"):
self.camera.setProperty("BlackLevelClampingEnable", False)
if self.camera.hasProperty("SharpnessEnabled"):
self.camera.setProperty("SharpnessEnabled", False)
if self.camera.hasProperty("GammaEnabled"):
self.camera.setProperty("GammaEnabled", False)
#
# No idea what this means in the context of a black and white
# camera. We try and turn it off but that seems to be much
# harder to do than one would hope.
#
#self.camera.setProperty("OnBoardColorProcessEnabled", False)
# Verify that we have turned off some of these 'features'.
for feature in ["pgrDefectPixelCorrectionEnable",
"BlackLevelClampingEnable",
"SharpnessEnabled",
"GammaEnabled"]:
if self.camera.hasProperty(feature):
assert not self.camera.getProperty(feature).getValue()
# Configure camera to not use triggering.
#
self.camera.setProperty("TriggerMode", "Off")
# Configure acquisition parameters.
#
# Note: The order is important here.
#
for pname in ["BlackLevel", "Gain", "Height", "Width", "OffsetX", "OffsetY"]:
self.camera.setProperty(pname, parameters.get(pname))
# Use maximum exposure time alowed by desired frame rate. # , "AcquisitionFrameRate"
# Line below does not work with blackfly camera. Exposure time needs to be set explicitly
#self.camera.setProperty("ExposureTime", self.camera.getProperty("ExposureTime").getMaximum())
self.camera.setProperty("ExposureTime", 20000.0)
# Get current offsets.
#
self.cur_offsetx = self.camera.getProperty("OffsetX").getValue()
self.cur_offsety = self.camera.getProperty("OffsetY").getValue()
self.old_offsetx = self.cur_offsetx
self.old_offsety = self.cur_offsety
# Set maximum offsets.
#
self.max_offsetx = self.camera.getProperty("OffsetX").getMaximum()
self.max_offsety = self.camera.getProperty("OffsetY").getMaximum()
def adjustAOI(self, dx, dy):
tmp_x = self.cur_offsetx + dx
tmp_y = self.cur_offsety + dy
tmp_x = max(0, tmp_x)
tmp_x = min(self.max_offsetx, tmp_x)
tmp_y = max(0, tmp_y)
tmp_y = min(self.max_offsety, tmp_y)
#
# The thread loop will check for cur != old and update the camera values
# as necessary.
#
self.params_mutex.lock()
self.cur_offsetx = tmp_x
self.cur_offsety = tmp_y
self.params_mutex.unlock()
def adjustZeroDist(self, inc):
pass
def run(self):
self.camera.startAcquisition()
self.running = True
while(self.running):
[frames, frame_size] = self.camera.getFrames()
self.analyze(frames, frame_size)
# Check for AOI change.
self.params_mutex.lock()
if (self.old_offsetx != self.cur_offsetx) or (self.old_offsety != self.cur_offsety):
self.camera.stopAcquisition()
self.camera.setProperty("OffsetX", self.cur_offsetx)
self.camera.setProperty("OffsetY", self.cur_offsety)
self.camera.startAcquisition()
self.old_offsetx = self.cur_offsetx
self.old_offsety = self.cur_offsety
self.params_mutex.unlock()
self.msleep(5)
self.camera.stopAcquisition()
def startCamera(self):
self.start(QtCore.QThread.NormalPriority)
self.start_time = time.time()
def stopCamera(self, verbose = False):
if verbose:
fps = self.n_analyzed/(time.time() - self.start_time)
print(" > AF: Analyzed {0:d}, Dropped {1:d}, {2:.3f} FPS".format(self.n_analyzed, self.n_dropped, fps))
print(" > AF: OffsetX {0:d}, OffsetY {1:d}, ZeroD {2:.2f}".format(self.cur_offsetx, self.cur_offsety, self.zero_dist))
self.running = False
self.wait()
self.camera.shutdown()
class CameraQPD(LockCamera):
"""
QPD emulation class. The default camera ROI of 200x200 pixels.
The focus lock is configured so that there are two laser spots on the camera.
The distance between these spots is fit and the difference between this distance and the
zero distance is returned as the focus lock offset. The maximum value of the camera
pixels is returned as the focus lock sum.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
# fixed parameters
self.x_off1 = 0.0
self.y_off1 = 0.0
self.x_off2 = 0.0
self.y_off2 = 0.0
self.image - None # will be loaded below
self.allow_single_fits = False # parameters.get("allow_single_fits") # False
self.sigma = parameters.get("sigma") # 5
self.background = parameters.get("background") # background
self.fit_size = parameters.get("fit_size")*self.sigma # 1.5, relative to sigma
# Some derived parameters
self.half_x = int(self.x_width/2)
self.half_y = int(self.y_width/2)
# maybe good things to add
'''
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
'''
# def adjustAOI is defined above
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.1*inc
self.params_mutex.unlock()
def getImage(self):
return [self.image, self.x_off1, self.y_off1, self.x_off2, self.y_off2, self.sigma]
def analyze(self, frames, frame_size):
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
frame = elt.getData().reshape(frame_size)
self.image = frame
image1 = frame[self.roi1]
image2 = frame[self.roi2]
def getZeroDist(self):
return self.zero_dist
def qpdScan(self, reps = 4):
"""
Returns [power, offset, is_good]
"""
power_total = 0.0
offset_total = 0.0
good_total = 0.0
for i in range(reps):
[power, n_good, offset] = self.singleQpdScan()
power_total += power
good_total += n_good
offset_total += offset
power_total = power_total/float(reps)
if (good_total > 0):
return [power_total, offset_total/good_total, True]
else:
return [power_total, 0, False]
def singleQpdScan(self):
"""
Perform a single measurement of the focus lock offset and camera sum signal.
Returns [power, total_good, offset]
"""
# Determine offset by fitting gaussians to the two beam spots.
# In the event that only beam spot can be fit then this will
# attempt to compensate. However this assumes that the two
# spots are centered across the mid-line of camera ROI.
#
[total_good, dist1, dist2] = self.doFit(data)
# Calculate offset.
# No good fits.
if (total_good == 0):
return [power, 0.0, 0.0]
# One good fit.
elif (total_good == 1):
if self.allow_single_fits:
return [power, 1.0, ((dist1 + dist2) - 0.5*self.zero_dist)]
else:
return [power, 0.0, 0.0]
# Two good fits. This gets twice the weight of one good fit
# if we are averaging.
else:
return [power, 2.0, 2.0*((dist1 + dist2) - self.zero_dist)]
class CameraQPDCorrFit(CameraQPD):
"""
This version uses storm-analyis to do the peak finding and
image correlation to do the peak fitting.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
assert (cl2DG is not None), "Correlation fitting not available."
self.fit_hl = None
self.fit_hr = None
def doFit(self, data):
dist1 = 0
dist2 = 0
self.x_off1 = 0.0
self.y_off1 = 0.0
self.x_off2 = 0.0
self.y_off2 = 0.0
if self.fit_hl is None:
roi_size = int(3.0 * self.sigma)
self.fit_hl = cl2DG.CorrLockFitter(roi_size = roi_size,
sigma = self.sigma,
threshold = 10)
self.fit_hr = cl2DG.CorrLockFitter(roi_size = roi_size,
sigma = self.sigma,
threshold = 10)
total_good = 0
[x1, y1, status] = self.fit_hl.findFitPeak(data[:,:self.half_x])
if status:
total_good += 1
self.x_off1 = x1 - self.half_y
self.y_off1 = y1 - self.half_x
dist1 = abs(self.y_off1)
[x2, y2, status] = self.fit_hr.findFitPeak(data[:,-self.half_x:])
if status:
total_good += 1
self.x_off2 = x2 - self.half_y
self.y_off2 = y2
dist2 = abs(self.y_off2)
return [total_good, dist1, dist2]
def shutDown(self):
super().shutDown()
if self.fit_hl is not None:
self.fit_hl.cleanup()
self.fit_hr.cleanup()
class AFLockCamera(LockCamera):
"""
This class works with the auto-focus hardware configuration.
In this configuration there are two spots that move horizontally
as the focus changes. The spots are shifted vertically so that
they don't overlap with each other.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.cnt = 0
self.max_backlog = 20
self.min_good = parameters.get("min_good")
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
# Create slices for selecting the appropriate regions from the camera.
# This is where the problem comes up
t1 = list(map(int, parameters.get("roi1").split(",")))
self.roi1 = (slice(t1[0], t1[1]), slice(t1[2], t1[3]))
t2 = list(map(int, parameters.get("roi2").split(",")))
self.roi2 = (slice(t2[0], t2[1]), slice(t2[2], t2[3]))
self.afc = afLC.AFLockC(offset = parameters.get("background"),
downsample = parameters.get("downsample"))
assert (self.reps >= self.min_good), "'reps' must be >= 'min_good'."
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.001*inc
self.params_mutex.unlock()
def analyze(self, frames, frame_size):
# testing inputs
# frame_size = (1440,1080)
# the frames list has class objects inside, specifically:
# <class 'storm_control.sc_hardware.pointGrey.spinnaker.ScamData>
if False:
print('\n\n--------------- \ndef analyze\n--------------------')
print(f'type(frames)= {type(frames)}')
print(f'len(frames)= {len(frames)}')
if len(frames)> 0:
print(f'type(frames[0]) = { type(frames[0])}')
print(f'type(frame_size) = {type(frame_size)}')
print(f'frame_size = {frame_size}')
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
if False:
# testing what happens without the reshapping
print('saving frame_test1.npy ...')
frame_test1 = elt.getData()
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame_test1.npy',frame_test1)
# or reshapping using the inverted x and Y direction
print('saving frame_test2.npy ...')
frame_test2 = elt.getData().reshape((frame_size[1],frame_size[0]))
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame_test2.npy',frame_test2)
# Not sure why, but the dimensions are swapped
#frame = elt.getData().reshape(frame_size)
frame = elt.getData().reshape((frame_size[1],frame_size[0]))
image1 = frame[self.roi1]
image2 = frame[self.roi2]
# Debugging ROI shape issues
if False:
print('\n\n------------------------\nDebugging ROI shape issues')
print(f'self.roi1 = {self.roi1}')
print(f'self.roi2 = {self.roi2}')
print(f'frame.shape = {frame.shape}')
print(f'image1.shape = {image1.shape}')
print(f'image2.shape = {image2.shape}')
print(f'image1.dtype = {image1.dtype}')
print(f'image2.dtype = {image2.dtype}')
print('------------------------\n\n')
# Debugging image1 and image2 into findOffsetU16NM
if False:
# testing what happens without the reshapping
print('saving image1.npy and image2.npy...')
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image1.npy',image1)
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image2.npy',image2)
# This is the offending line
[x_off, y_off, success, mag] = self.afc.findOffsetU16NM(image1, image2, verbose = True)
if False:
print('\n\n------------------------\nDebugging findOffsetU16NM')
print(f'x_off = {x_off}')
print(f'y_off = {y_off}')
#self.bg_est[self.cnt] = frame[0,0]
self.good[self.cnt] = success
self.mag[self.cnt] = mag
self.x_off[self.cnt] = x_off
self.y_off[self.cnt] = y_off
# Check if we have all the samples we need.
self.cnt += 1
if (self.cnt == self.reps):
# Convert current frame to 8 bit image.
# 201218 seems like our camera data is 16 bit not 12 bit (even though ADC is 12 bit)
#image = numpy.right_shift(frame, 3).astype(numpy.uint8) #convert from 12 bit
image = numpy.right_shift(frame, 4).astype(numpy.uint8) #convert from 16 bit
#debugging save image to check how it looks.
# result: frame and image are already scrambled.
if False:
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\image.npy',image)
numpy.save(r'C:\Users\STORM1\Desktop\focus_lock_debugging\frame.npy',frame)
qpd_dict = {"is_good" : True,
"image" : image,
"offset" : 0.0,
"sum" : 0.0,
"x_off" : 0.0,
"y_off" : 0.0}
if (numpy.count_nonzero(self.good) < self.min_good):
qpd_dict["is_good"] = False
self.cameraUpdate.emit(qpd_dict)
else:
mag = numpy.mean(self.mag[self.good])
y_off = numpy.mean(self.y_off[self.good]) - self.zero_dist
qpd_dict["offset"] = y_off
qpd_dict["sum"] = self.sum_scale*mag - self.sum_zero
qpd_dict["x_off"] = numpy.mean(self.x_off[self.good])
qpd_dict["y_off"] = y_off
self.cameraUpdate.emit(qpd_dict)
self.cnt = 0
def stopCamera(self):
super().stopCamera()
self.afc.cleanup()
class SSLockCamera(LockCamera):
"""
This class works with the standard IR laser focus lock configuration.
In this configuration there is a single spot that movies horizontally
as the focus changes.
"""
def __init__(self, parameters = None, **kwds):
kwds["parameters"] = parameters
super().__init__(**kwds)
self.cnt = 0
self.max_backlog = 20
self.min_good = parameters.get("min_good")
self.offset = parameters.get("offset")
self.reps = parameters.get("reps")
self.sum_scale = parameters.get("sum_scale")
self.sum_zero = parameters.get("sum_zero")
self.good = numpy.zeros(self.reps, dtype = numpy.bool)
self.mag = numpy.zeros(self.reps)
self.x_off = numpy.zeros(self.reps)
self.y_off = numpy.zeros(self.reps)
self.lpf = slpf.LockPeakFinder(offset = 0,
sigma = parameters.get("sigma"),
threshold = parameters.get("threshold"))
assert (self.reps >= self.min_good), "'reps' must be >= 'min_good'."
def adjustZeroDist(self, inc):
self.params_mutex.lock()
self.zero_dist += 0.1*inc
self.params_mutex.unlock()
def analyze(self, frames, frame_size):
# Only keep the last max_backlog frames if we are falling behind.
lf = len(frames)
if (lf>self.max_backlog):
self.n_dropped += lf - self.max_backlog
frames = frames[-self.max_backlog:]
for elt in frames:
self.n_analyzed += 1
frame = elt.getData().reshape(frame_size)
# self.offset is slightly below what the camera reads with no
# signal. We'll be doing MLE fitting so we can't tolerate
# negative values in 'frame'.
frame = frame - self.offset
# Magnitude calculation.
mag = numpy.max(frame) - numpy.mean(frame)
# Fit peak X/Y location.
[x_off, y_off, success] = self.lpf.findFitPeak(frame)
self.good[self.cnt] = success
self.mag[self.cnt] = mag
self.x_off[self.cnt] = x_off
self.y_off[self.cnt] = y_off
# Check if we have all the samples we need.
self.cnt += 1
if (self.cnt == self.reps):
# Convert current frame to 8 bit image.
image = numpy.right_shift(frame.astype(numpy.uint16), 3).astype(numpy.uint8)
mag = numpy.mean(self.mag)
qpd_dict = {"is_good" : True,
"image" : image,
"offset" : 0.0,
"sum" : self.sum_scale*mag - self.sum_zero,
"x_off" : 0.0,
"y_off" : 0.0}
if (numpy.count_nonzero(self.good) < self.min_good):
qpd_dict["is_good"] = False
self.cameraUpdate.emit(qpd_dict)
else:
y_off = numpy.mean(self.y_off[self.good]) - self.zero_dist
qpd_dict["offset"] = y_off
qpd_dict["x_off"] = numpy.mean(self.x_off[self.good])
qpd_dict["y_off"] = y_off
self.cameraUpdate.emit(qpd_dict)
self.cnt = 0
def stopCamera(self):
super().stopCamera()
self.lpf.cleanup()
#
# The MIT License
#
# Copyright (c) 2020 Babcock Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| StarcoderdataPython |
12184 | from __future__ import print_function
import argparse, sys
from .utils import is_textfile
def contains_crlf(filename):
with open(filename, mode='rb') as file_checked:
for line in file_checked.readlines():
if line.endswith(b'\r\n'):
return True
return False
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='filenames to check')
args = parser.parse_args(argv)
text_files = [f for f in args.filenames if is_textfile(f)]
files_with_crlf = [f for f in text_files if contains_crlf(f)]
return_code = 0
for file_with_crlf in files_with_crlf:
print('CRLF end-lines detected in file: {0}'.format(file_with_crlf))
return_code = 1
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| StarcoderdataPython |
6485850 | from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 6
_version_micro = "" # use "" for first of series, number for 1 and above
_version_extra = "" # use "dev0" for developemnt, "" for full release
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = ".".join(map(str, _ver))
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
]
# Description should be a one-liner:
description = "fast_upfirdn: CPU & GPU implementations of scipy.signal.upfirdn"
# Long description will go up on the pypi page
long_description = """
Fast Upfirdn
============
The core low-level function implemented here is an equivalent of
``scipy.signal.upfirdn`` but with support for both CPU (via NumPy) and GPU
(via CuPy).
The version of ``upfirdn`` here supports several signal extension modes. These
have been contributed upstream to SciPy and are available there for SciPy 1.4+.
.. _README: https://github.com/mritools/fast_upfirdn/blob/master/README.md
License
=======
``fast_upfirdn`` is licensed under the terms of the BSD 3-clause license. See
the file "LICENSE.txt" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2019-2020,
<NAME>, Cincinnati Children's Hospital Medical Center.
"""
NAME = "fast_upfirdn"
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/mritools/fast_upfirdn"
DOWNLOAD_URL = ""
LICENSE = "BSD"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {"fast_upfirdn": [pjoin("data", "*"), pjoin("tests", "*")]}
REQUIRES = ["numpy", "cython"]
PYTHON_REQUIRES = ">= 3.7"
| StarcoderdataPython |
9687019 | <gh_stars>0
#!/usr/bin/env python3
import sys
import os
import logging
import argparse
from wordpal import puzzicon
from pb5.balloons import WordSearcher
def main():
parser = argparse.ArgumentParser()
parser.add_argument("balloons", nargs='+')
parser.add_argument("-l", "--log-level", choices=('DEBUG', 'INFO', 'WARN', 'ERROR'), default='INFO', help="set log level")
parser.add_argument("-v", "--verbose", action='store_const', const='DEBUG', dest='log_level', help="set log level DEBUG")
parser.add_argument("-n", "--length", type=int, default=3)
args = parser.parse_args()
logging.basicConfig(level=logging.__dict__[args.log_level])
searcher = WordSearcher(puzzicon.load_default_puzzemes())
balloons = list()
for b in args.balloons:
if b.startswith('@'):
with open(b[1:], 'r') as ifile:
for line in ifile:
for x in line.split():
balloons.append(x.strip())
else:
balloons.append(b.strip())
assert balloons, "no balloons provided"
matches = searcher.find(balloons, args.length)
for match in matches :
print(match)
return 0
if __name__ == '__main__':
exit(main())
| StarcoderdataPython |
5199931 | <gh_stars>1-10
import os
from pddlstream.algorithms.reorder import get_partial_orders
from pddlstream.language.constants import EQ, get_prefix, get_args, NOT, MINIMIZE
from pddlstream.language.conversion import str_from_fact, evaluation_from_fact
from pddlstream.language.function import FunctionResult
from pddlstream.language.object import OptimisticObject
from pddlstream.language.synthesizer import SynthStreamResult, decompose_stream_plan
from pddlstream.utils import clear_dir, ensure_dir, str_from_plan
# https://www.graphviz.org/doc/info/
PARAMETER_COLOR = 'LightGreen'
CONSTRAINT_COLOR = 'LightBlue'
NEGATED_COLOR = 'LightYellow'
COST_COLOR = 'LightSalmon'
STREAM_COLOR = 'LightSteelBlue'
FUNCTION_COLOR = 'LightCoral'
VISUALIZATIONS_DIR = 'visualizations/'
CONSTRAINT_NETWORK_DIR = os.path.join(VISUALIZATIONS_DIR, 'constraint_networks/')
STREAM_PLAN_DIR = os.path.join(VISUALIZATIONS_DIR, 'stream_plans/')
PLAN_LOG_FILE = os.path.join(VISUALIZATIONS_DIR, 'log.txt')
ITERATION_TEMPLATE = 'iteration_{}.png'
SYNTHESIZER_TEMPLATE = '{}_{}.png'
POST_PROCESS = 'post'
##################################################
def has_pygraphviz():
try:
import pygraphviz
except ImportError:
return False
return True
def reset_visualizations():
clear_dir(VISUALIZATIONS_DIR)
ensure_dir(CONSTRAINT_NETWORK_DIR)
ensure_dir(STREAM_PLAN_DIR)
def log_plans(stream_plan, action_plan, iteration):
# TODO: do this within the focused algorithm itself?
decomposed_plan = decompose_stream_plan(stream_plan)
with open(PLAN_LOG_FILE, 'a+') as f:
f.write('Iteration: {}\n'
'Stream plan: {}\n'
'Synthesizer plan: {}\n'
'Action plan: {}\n\n'.format(
iteration, decomposed_plan,
stream_plan, str_from_plan(action_plan)))
def create_synthesizer_visualizations(result, iteration):
stream_plan = result.decompose()
filename = SYNTHESIZER_TEMPLATE.format(result.instance.external.name,
POST_PROCESS if iteration is None else iteration)
#constraints = get_optimistic_constraints(evaluations, stream_plan)
visualize_constraints(result.get_certified() + result.get_functions(),
os.path.join(CONSTRAINT_NETWORK_DIR, filename))
visualize_stream_plan_bipartite(stream_plan,
os.path.join(STREAM_PLAN_DIR, filename))
def create_visualizations(evaluations, stream_plan, iteration):
# TODO: place it in the temp_dir?
# TODO: decompose any joint streams
for result in stream_plan:
if isinstance(result, SynthStreamResult):
create_synthesizer_visualizations(result, iteration)
filename = ITERATION_TEMPLATE.format(POST_PROCESS if iteration is None else iteration)
# visualize_stream_plan(stream_plan, path)
constraints = set() # TODO: approximates needed facts using produced ones
for stream in stream_plan:
constraints.update(filter(lambda f: evaluation_from_fact(f) not in evaluations, stream.get_certified()))
visualize_constraints(constraints, os.path.join(CONSTRAINT_NETWORK_DIR, filename))
visualize_stream_plan_bipartite(decompose_stream_plan(stream_plan), os.path.join(STREAM_PLAN_DIR, filename))
visualize_stream_plan_bipartite(stream_plan, os.path.join(STREAM_PLAN_DIR, 'fused_' + filename))
##################################################
def visualize_constraints(constraints, filename='constraint_network.pdf', use_functions=True):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=False)
graph.node_attr['style'] = 'filled'
#graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['colorscheme'] = 'SVG'
graph.edge_attr['colorscheme'] = 'SVG'
#graph.graph_attr['rotate'] = 90
#graph.node_attr['fixedsize'] = True
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['rankdir'] = 'RL'
graph.graph_attr['nodesep'] = 0.05
graph.graph_attr['ranksep'] = 0.25
#graph.graph_attr['pad'] = 0
# splines="false";
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
functions = set()
negated = set()
heads = set()
for fact in constraints:
prefix = get_prefix(fact)
if prefix in (EQ, MINIMIZE):
functions.add(fact[1])
elif prefix == NOT:
negated.add(fact[1])
else:
heads.add(fact)
heads.update(functions)
heads.update(negated)
objects = {a for head in heads for a in get_args(head)}
optimistic_objects = filter(lambda o: isinstance(o, OptimisticObject), objects)
for opt_obj in optimistic_objects:
graph.add_node(str(opt_obj), shape='circle', color=PARAMETER_COLOR)
for head in heads:
if not use_functions and (head in functions):
continue
# TODO: prune values w/o free parameters?
name = str_from_fact(head)
if head in functions:
color = COST_COLOR
elif head in negated:
color = NEGATED_COLOR
else:
color = CONSTRAINT_COLOR
graph.add_node(name, shape='box', color=color)
for arg in get_args(head):
if arg in optimistic_objects:
graph.add_edge(name, str(arg))
graph.draw(filename, prog='dot') # neato | dot | twopi | circo | fdp | nop
return graph
##################################################
def visualize_stream_plan(stream_plan, filename='stream_plan.pdf'):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['color'] = STREAM_COLOR
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
for stream in stream_plan:
graph.add_node(str(stream))
for stream1, stream2 in get_partial_orders(stream_plan):
graph.add_edge(str(stream1), str(stream2))
# TODO: could also print the raw values (or a lookup table)
# https://stackoverflow.com/questions/3499056/making-a-legend-key-in-graphviz
graph.draw(filename, prog='dot')
return graph
##################################################
def visualize_stream_plan_bipartite(stream_plan, filename='stream_plan.pdf', use_functions=False):
from pygraphviz import AGraph
graph = AGraph(strict=True, directed=True)
graph.node_attr['style'] = 'filled'
graph.node_attr['shape'] = 'box'
graph.node_attr['fontcolor'] = 'black'
#graph.node_attr['fontsize'] = 12
graph.node_attr['width'] = 0
graph.node_attr['height'] = 0.02 # Minimum height is 0.02
graph.node_attr['margin'] = 0
#graph.graph_attr['rankdir'] = 'LR'
graph.graph_attr['nodesep'] = 0.1
graph.graph_attr['ranksep'] = 0.25
graph.graph_attr['outputMode'] = 'nodesfirst'
graph.graph_attr['dpi'] = 300
def add_fact(fact):
head, color = (fact[1], COST_COLOR) if get_prefix(fact) == EQ else (fact, CONSTRAINT_COLOR)
s_fact = str_from_fact(head)
graph.add_node(s_fact, color=color)
return s_fact
def add_stream(stream):
color = FUNCTION_COLOR if isinstance(stream, FunctionResult) else STREAM_COLOR
s_stream = str(stream.instance) if isinstance(stream, FunctionResult) else str(stream)
graph.add_node(s_stream, style='rounded,filled', color=color)
# shape: oval, plaintext, polygon, rarrow, cds
# style: rounded, filled, bold
return s_stream
achieved_facts = set()
for stream in stream_plan:
if not use_functions and isinstance(stream, FunctionResult):
continue
s_stream = add_stream(stream)
for fact in stream.instance.get_domain():
if fact in achieved_facts:
s_fact = add_fact(fact)
graph.add_edge(s_fact, s_stream) # Add initial facts?
#if not isinstance(stream, StreamResult):
# continue
for fact in stream.get_certified():
if fact not in achieved_facts: # Ensures DAG
s_fact = add_fact(fact)
graph.add_edge(s_stream, s_fact)
achieved_facts.add(fact)
graph.draw(filename, prog='dot')
return graph
# graph.layout
# https://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/reference/agraph.html
| StarcoderdataPython |
6599933 | <reponame>cpearce/scikit-ika<filename>skika/hyper_parameter_tuning/trees_arf/evaluate_prequential_and_adapt.py<gh_stars>1-10
import os
import warnings
import re
from timeit import default_timer as timer
from numpy import unique
# Include the 2 following from the third_party skmultiflow
from skmultiflow.evaluation.base_evaluator import StreamEvaluator
from skmultiflow.utils import constants
from skika.hyper_parameter_tuning.trees_arf.meta_feature_generator import ComputeStreamMetaFeatures
class EvaluatePrequentialAndAdaptTreesARF(StreamEvaluator):
""" Prequential evaluation method with adaptive tuning of hyper-parameters to tune the number of trees in ARF.
Description :
This code is based on the ``scikit_multiflow`` evaluate_prequential implementation.
Copyright (c) 2017, scikit-multiflow
All rights reserved.
We modified it to include adaptive tuning of hyper-parameters.
Scikit_multiflow description:
An alternative to the traditional holdout evaluation, inherited from
batch setting problems.
The prequential evaluation is designed specifically for stream settings,
in the sense that each sample serves two purposes, and that samples are
analysed sequentially, in order of arrival, and become immediately
inaccessible.
This method consists of using each sample to test the model, which means
to make a predictions, and then the same sample is used to train the model
(partial fit). This way the model is always tested on samples that it
hasn't seen yet.
Additional scikit-ika features:
This method implements an adaptive tuning process to adapt the number of trees
in an Adaptive Random Forest, depending on the number of redundant features in the stream.
Parameters :
n_wait:int (Default: 200)
The number of samples to process between each test. Also defines when to update the plot if `show_plot=True`.
Note that setting `n_wait` too small can significantly slow the evaluation process.
max_samples:int (Default: 100000)
The maximum number of samples to process during the evaluation.
batch_size:int (Default: 1)
The number of samples to pass at a time to the model(s).
pretrain_size:int (Default: 200)
The number of samples to use to train the model before starting the evaluation. Used to enforce a 'warm' start.
max_time:float (Default: float("inf"))
The maximum duration of the simulation (in seconds).
metrics:list, optional (Default: ['accuracy', 'kappa'])
| The list of metrics to track during the evaluation. Also defines the metrics that will be displayed in plots
and/or logged into the output file. Valid options are
| *Classification*
| 'accuracy'
| 'kappa'
| 'kappa_t'
| 'kappa_m'
| 'true_vs_predicted'
| 'precision'
| 'recall'
| 'f1'
| 'gmean'
| *Multi-target Classification*
| 'hamming_score'
| 'hamming_loss'
| 'exact_match'
| 'j_index'
| *Regression*
| 'mean_square_error'
| 'mean_absolute_error'
| 'true_vs_predicted'
| *Multi-target Regression*
| 'average_mean_squared_error'
| 'average_mean_absolute_error'
| 'average_root_mean_square_error'
| *Experimental*
| 'running_time'
| 'model_size'
| 'ram_hours'
output_file: string, optional (Default: None)
File name to save the summary of the evaluation.
show_plot: bool (Default: False)
If True, a plot will show the progress of the evaluation. Warning: Plotting can slow down the evaluation
process.
restart_stream: bool, optional (default: True)
If True, the stream is restarted once the evaluation is complete.
data_points_for_classification: bool(Default: False)
If True, the visualization used is a cloud of data points (only works for classification) and default
performance metrics are ignored. If specific metrics are required, then they *must* be explicitly set
using the ``metrics`` attribute.
metaKB : dict (Default: None)
The meta model linking the meta features to the hyper-parameters configuration.
It is a dictionary linking the percentage of redundant features and the number of trees to choose for
each of them. This model is built by runing multiple ARF configurations (with different number of trees)
on multiple streams with different percentages of redundant features, and using the build_pareto_knowledge_trees
module to choose the number of trees.
E.g.: dictMeta = {0.0:60 ,0.1:30, 0.2:30, 0.3:30, 0.4:60, 0.5:70, 0.6:60, 0.7:30, 0.8:30, 0.9:30}
If no metaKB, the class performs only the prequential evaluation.
Notes
1. If the adaptive hyper-parameter tuning is not used, this evaluator can process a single learner to track its performance;
or multiple learners at a time, to compare different models on the same stream.
2. If the adaptive hyper-parameter tuning is used, this evaluator can process only a single learner at the moment.
3. This class can be only used with the ARF as a classifier. Further developments are needed to generalise it to more tasks with
more classifiers.
4. The metric 'true_vs_predicted' is intended to be informative only. It corresponds to evaluations at a specific
moment which might not represent the actual learner performance across all instances.
Example:
>>> from skika.data.random_rbf_generator_redund import RandomRBFGeneratorRedund
>>> from skika.hyper_parameter_tuning.trees_arf.evaluate_prequential_and_adapt import EvaluatePrequentialAndAdaptTreesARF
>>>
>>> # Set the stream
>>> stream = StreamGeneratorRedund(base_stream = RandomRBFGeneratorRedund(n_classes=2, n_features=30, n_centroids=50, noise_percentage = 0.0), random_state=None, n_drifts = 100, n_instances = 100000)
>>> stream.prepare_for_use()
>>>
>>> # Set the model
>>> arf = AdaptiveRandomForest(n_estimators = 10)
>>>
>>> # Set the meta knowledge
>>> dictMeta = {0.0:60 ,0.1:30, 0.2:30, 0.3:30, 0.4:60, 0.5:70, 0.6:60, 0.7:30, 0.8:30, 0.9:30} # dict = {'pourc redund feat':best nb tree}
>>>
>>> # Set the evaluator
>>>
>>> evaluator = EvaluatePrequential(metrics=['accuracy','kappa','running_time','ram_hours'],
>>> max_samples=100000,
>>> n_wait=500,
>>> pretrain_size=200,
>>> show_plot=True)
>>>
>>> # Run evaluation with adative tuning
>>> evaluator.evaluate(stream=stream, model=arf, model_names=['ARF'])
"""
def __init__(self,
n_wait=200,
max_samples=100000,
batch_size=1,
pretrain_size=200,
max_time=float("inf"),
metrics=None,
output_file=None,
show_plot=False,
restart_stream=True,
data_points_for_classification=False,
metaKB=None):
super().__init__()
self._method = 'prequential'
self.n_wait = n_wait
self.max_samples = max_samples
self.pretrain_size = pretrain_size
self.batch_size = batch_size
self.max_time = max_time
self.output_file = output_file
self.show_plot = show_plot
self.data_points_for_classification = data_points_for_classification
self.metaKB = metaKB
if not self.data_points_for_classification:
if metrics is None:
self.metrics = [constants.ACCURACY, constants.KAPPA]
else:
if isinstance(metrics, list):
self.metrics = metrics
else:
raise ValueError("Attribute 'metrics' must be 'None' or 'list', passed {}".format(type(metrics)))
else:
if metrics is None:
self.metrics = [constants.DATA_POINTS]
else:
if isinstance(metrics, list):
self.metrics = metrics
self.metrics.append(constants.DATA_POINTS)
else:
raise ValueError("Attribute 'metrics' must be 'None' or 'list', passed {}".format(type(metrics)))
self.restart_stream = restart_stream
self.n_sliding = n_wait
warnings.filterwarnings("ignore", ".*invalid value encountered in true_divide.*")
warnings.filterwarnings("ignore", ".*Passing 1d.*")
def evaluate(self, stream, model, model_names=None):
""" Evaluates a model on samples from a stream and adapt the tuning.
Parameters
----------
stream: Stream
The stream from which to draw the samples.
model: skmultiflow.core.BaseStreamModel or sklearn.base.BaseEstimator or list
The model or list of models to evaluate.
NOTE : Only ARF is usable with this current version of the adaptive tuning.
model_names: list, optional (Default=None)
A list with the names of the models.
Returns
-------
StreamModel or list
The trained model(s).
"""
self._init_evaluation(model=model, stream=stream, model_names=model_names)
if self._check_configuration():
self._reset_globals()
# Initialize metrics and outputs (plots, log files, ...)
self._init_metrics()
self._init_plot()
self._init_file()
self.model, self.list_acc, new_nb_trees = self._train_and_test()
if self.show_plot:
self.visualizer.hold()
return self.model, self.list_acc, new_nb_trees
def _train_and_test(self):
""" Method to control the prequential evaluation and adaptive tuning.
Returns
-------
BaseClassifier extension or list of BaseClassifier extensions
The trained classifiers.
Notes
-----
The classifier parameter should be an extension from the BaseClassifier. In
the future, when BaseRegressor is created, it could be an extension from that
class as well.
"""
self._start_time = timer()
self._end_time = timer()
print('Prequential Evaluation')
print('Evaluating {} target(s).'.format(self.stream.n_targets))
actual_max_samples = self.stream.n_remaining_samples()
if actual_max_samples == -1 or actual_max_samples > self.max_samples:
actual_max_samples = self.max_samples
first_run = True
# Init meta_features extractors and list of accuracies (for test) for each model
self.list_acc = []
self.extractor = []
for i in range(self.n_models):
self.extractor.append(ComputeStreamMetaFeatures(stream = self.stream, list_feat = ['perc_redund_feat']))
self.list_acc.append([])
if self.pretrain_size > 0:
print('Pre-training on {} sample(s).'.format(self.pretrain_size))
X, y = self.stream.next_sample(self.pretrain_size)
######################
# Do model adaptation only if a knowledge base is not None
######################
if self.metaKB != None :
####################
# Choose first configuration of parameters from pre_train set meta feats
self.extractor[i].list_stream_samples.append(X)
current_perc_redund = self.extractor[i].run_extraction(['perc_redund_feat'])[0][0]
# ## DEBUG:
# print('Read perc redund : '+str(self.stream.perc_redund_features))
# print('Initial Configuration of the hyperparameters : nbtrees = {}'.format(self.metaKB[round(current_perc_redund[0],1)]))
for i in range(self.n_models):
self.model[i].n_estimators = self.metaKB[round(current_perc_redund,1)] # Use round(current_perc_redund[0],1) as perc redund is the first feature in the extracted list
self.model[i].init_ensemble(X)
print('initial number of trees {}'.format(self.metaKB[round(current_perc_redund,1)]))
last_perc_redund = current_perc_redund
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION:
# Training time computation
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, classes=self.stream.target_values)
self.running_time_measurements[i].compute_training_time_end()
elif self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y, classes=unique(self.stream.target_values))
self.running_time_measurements[i].compute_training_time_end()
else:
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X=X, y=y)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.pretrain_size)
self.global_sample_count += self.pretrain_size
first_run = False
update_count = 0
print('Evaluating...')
# Verification variables
compt_drift = []
drift_detec_list = []
pourc_redund_read = []
new_nb_trees = []
# Initialise RAM_hours measurements
for i in range(self.n_models):
self.running_RAM_H_measurements[i].compute_evaluate_start_time()
# Verification variables
compt_drift.append(0) # Temp variable to simulate drift detection
drift_detec_list.append([])
pourc_redund_read.append([])
new_nb_trees.append([])
sample_stream = False
while ((self.global_sample_count < actual_max_samples) & (self._end_time - self._start_time < self.max_time)
& (self.stream.has_more_samples())):
try:
X, y = self.stream.next_sample(self.batch_size)
######################
# Do model adaptation only if a knowledge base is not None
######################
if self.metaKB != None :
# Update model hyper-parameters if drifts
# TODO : To begin we specify when the drifts happens (set in ConceptDriftStream at 5000) --> replace with direct drift detection BaseDriftDetector
# if drift detected (signal from ARF) -> extract meta-features
# if change in meta features -> match with meta-knowledge and change parameters
# Drift Detection
# Works only with adaptiveRF modif to get drift and warnings for a StreamGeneratorRedund
for i in range(self.n_models):
# if warning detected, measure of meta-features is launched
if self.model[i].warning_detected :
print('Warning detected at {}'.format(self.global_sample_count))
self.extractor[i].list_stream_samples = []
sample_stream = True
if sample_stream == True :
# Store next samples
self.extractor[i].list_stream_samples.append(X)
if self.model[i].drift_detected and len(self.extractor[i].list_stream_samples) > 10:
print('Drift detected at {}'.format(self.global_sample_count))
current_perc_redund = self.extractor[i].run_extraction(['perc_redund_feat'])[0][0]
# ## DEBUG:
# print('Read perc redund : '+str(self.stream.perc_redund_features))
# Test first if meta-features really changed before updating the model
if last_perc_redund != current_perc_redund :
print('Change in meta-features at {}'.format(self.global_sample_count))
self.model[i].new_n_estimators = self.metaKB[round(current_perc_redund,1)] # Round for the moment to get perfect match with meta model
self.model[i].update_config(X)
print('New number of trees {}'.format(self.metaKB[round(current_perc_redund,1)]))
last_perc_redund = current_perc_redund
sample_stream = False
# Verification variables
compt_drift[i] = compt_drift[i] + 1
drift_detec_list[i].append(self.global_sample_count)
pourc_redund_read[i].append(round(current_perc_redund,1))
new_nb_trees[i].append(self.metaKB[round(current_perc_redund,1)])
# # DEBUG:
# print('Number detected drifts : '+str(compt_drift[i]))
# print('Positions detected drifts : '+str(drift_detec_list[i]))
# print('Perc redund measured : '+str(pourc_redund_read[i]))
if X is not None and y is not None:
# Test
prediction = [[] for _ in range(self.n_models)]
for i in range(self.n_models):
try:
# Testing time
self.running_RAM_H_measurements[i].compute_evaluate_start_time()
self.running_time_measurements[i].compute_testing_time_begin()
prediction[i].extend(self.model[i].predict(X))
self.running_time_measurements[i].compute_testing_time_end()
self.running_RAM_H_measurements[i].compute_update_time_increment()
except TypeError:
raise TypeError("Unexpected prediction value from {}"
.format(type(self.model[i]).__name__))
self.global_sample_count += self.batch_size
for j in range(self.n_models):
for i in range(len(prediction[0])):
self.mean_eval_measurements[j].add_result(y[i], prediction[j][i])
self.current_eval_measurements[j].add_result(y[i], prediction[j][i])
self._check_progress(actual_max_samples)
# Train
if first_run:
for i in range(self.n_models):
if self._task_type != constants.REGRESSION and \
self._task_type != constants.MULTI_TARGET_REGRESSION:
# Accounts for the moment of training beginning
self.running_RAM_H_measurements[i].compute_evaluate_start_time()
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X, y, self.stream.target_values)
# Accounts the ending of training
self.running_time_measurements[i].compute_training_time_end()
self.running_RAM_H_measurements[i].compute_update_time_increment()
else:
self.running_RAM_H_measurements[i].compute_evaluate_start_time()
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X, y)
self.running_time_measurements[i].compute_training_time_end()
self.running_RAM_H_measurements[i].compute_update_time_increment()
# Update total running time
self.running_time_measurements[i].update_time_measurements(self.batch_size)
first_run = False
else:
for i in range(self.n_models):
self.running_RAM_H_measurements[i].compute_evaluate_start_time()
self.running_time_measurements[i].compute_training_time_begin()
self.model[i].partial_fit(X, y)
self.running_time_measurements[i].compute_training_time_end()
self.running_time_measurements[i].update_time_measurements(self.batch_size)
self.running_RAM_H_measurements[i].compute_update_time_increment()
if ((self.global_sample_count % self.n_wait) == 0 or
(self.global_sample_count >= self.max_samples) or
(self.global_sample_count / self.n_wait > update_count + 1)):
if prediction is not None:
self._update_metrics()
for i in range(self.n_models):
self.list_acc[i].append(self.current_eval_measurements[i].accuracy_score())
update_count += 1
self._end_time = timer()
except BaseException as exc:
print(exc)
if exc is KeyboardInterrupt:
self._update_metrics()
break
# Flush file buffer, in case it contains data
self._flush_file_buffer()
if len(set(self.metrics).difference({constants.DATA_POINTS})) > 0:
self.evaluation_summary()
else:
print('Done')
if self.metaKB != None :
for i in range(self.n_models):
print('Number detected drifts : '+str(compt_drift[i]))
print('Positions detected drifts : '+str(drift_detec_list[i]))
print('Perc redund measured : '+str(pourc_redund_read[i]))
print('New trees number : '+str(new_nb_trees[i]))
if self.restart_stream:
self.stream.restart()
return self.model, self.list_acc, new_nb_trees
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially fit all the models on the given data.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
The data upon which the algorithm will create its model.
y: Array-like
An array-like containing the classification labels / target values for all samples in X.
classes: list
Stores all the classes that may be encountered during the classification task. Not used for regressors.
sample_weight: Array-like
Samples weight. If not provided, uniform weights are assumed.
Returns
-------
EvaluatePrequential
self
"""
if self.model is not None:
for i in range(self.n_models):
if self._task_type == constants.CLASSIFICATION or \
self._task_type == constants.MULTI_TARGET_CLASSIFICATION:
self.model[i].partial_fit(X=X, y=y, classes=classes, sample_weight=sample_weight)
else:
self.model[i].partial_fit(X=X, y=y, sample_weight=sample_weight)
return self
else:
return self
def predict(self, X):
""" Predicts with the estimator(s) being evaluated.
Parameters
----------
X: Numpy.ndarray of shape (n_samples, n_features)
All the samples we want to predict the label for.
Returns
-------
list of numpy.ndarray
Model(s) predictions
"""
predictions = None
if self.model is not None:
predictions = []
for i in range(self.n_models):
predictions.append(self.model[i].predict(X))
return predictions
def get_info(self):
info = self.__repr__()
if self.output_file is not None:
_, filename = os.path.split(self.output_file)
info = re.sub(r"output_file=(.\S+),", "output_file='{}',".format(filename), info)
return info
| StarcoderdataPython |
109184 | <reponame>uaca/deepy<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wrapper import deepy_tensor
from functions import concat, concatenate, reverse, ifelse, apply, repeat, var, vars, activate, is_neural_var, is_theano_var
from onehot import onehot_tensor, onehot
import theano_nnet_imports as nnet
import costs as costs
from theano_imports import * | StarcoderdataPython |
8146088 | <filename>all_utils.py
from keras.layers import CuDNNLSTM
from keras.layers import Lambda
from keras.layers.merge import add
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from numpy import dstack
from pandas import read_csv
from keras.utils import to_categorical
from itertools import product
########################### grid functions
def giveSingleParameters():
verbose = 0
batch_size = 128
optimizer = 'adam'
epochs = 9
activation = 'relu'
kernel_size_2D = (1, 3)
kernel_size_1D = 3
filters = 64
pool_size = 2
loss = 'categorical_crossentropy'
out_activation = 'softmax'
dropout_rate = 0.5
return dict(verbose=verbose, epochs=epochs, batch_size=batch_size, activation=activation,
kernel_size_2D=kernel_size_2D, kernel_size_1D=kernel_size_1D, filters=filters, pool_size=pool_size,
loss=loss, out_activation=out_activation, optimizer=optimizer, dropout_rate=dropout_rate)
# give the parameters for grid search
def giveParameters():
verbose = [0]
batch_size = [64, 128]
optimizer = ['adam', 'sgd']
epochs = [15]
activation = ['relu', 'tanh']
kernel_size_2D = [(1, 3)]
kernel_size_1D = [3]
filters = [64]
pool_size = [2]
loss = ['categorical_crossentropy']
out_activation = ['softmax']
dropout_rate = [0.2, 0.5]
return dict(verbose=verbose, epochs=epochs, batch_size=batch_size, activation=activation,
kernel_size_2D=kernel_size_2D, kernel_size_1D=kernel_size_1D, filters=filters, pool_size=pool_size,
loss=loss, out_activation=out_activation, optimizer=optimizer, dropout_rate=dropout_rate)
def defineConfigurations():
cfgs = list()
parameters = giveParameters()
return list((dict(zip(parameters, x)) for x in product(*parameters.values())))
def summarize_gridresults(gridresults):
for x in gridresults:
print(x[0]) # prints the config
summarize_results(x[1]) # prints the scores
def summarize_results(scores):
print(scores)
m, s = np.mean(scores), np.std(scores)
print('Accuracy: %.3f%% (+/-%.3f)' % (m, s))
def printBestGrid(gridresults):
best = gridresults[0]
for x in gridresults:
if np.mean(x[1]) > np.mean(best[1]):
best = x
print("best result:")
print(best[0])
summarize_results(best[1])
# helper function to save model results to csv files
def saveResults(name, fittingProcess, accuracy, aux_accuracy, loss, aux_loss, n):
loss_history = fittingProcess.history['main_output_loss']
acc_history = fittingProcess.history['main_output_acc']
lstm_loss_history = fittingProcess.history['aux_output_loss']
lstm_acc_history = fittingProcess.history['aux_output_acc']
val_loss_history = fittingProcess.history['val_main_output_loss']
val_acc_history = fittingProcess.history['val_main_output_acc']
val_lstm_loss_history = fittingProcess.history['val_aux_output_loss']
val_lstm_acc_history = fittingProcess.history['val_aux_output_acc']
with open(name + str(n) + '.csv', "w") as outfile:
outfile.write("loss,accuracy,val_loss,val_acc")
outfile.write("\n")
for ind in range(len(loss_history)):
outfile.write(
str(loss_history[ind]) + ',' + str(acc_history[ind]) + ',' + str(val_loss_history[ind]) + ',' + str(
val_acc_history[ind]))
outfile.write("\n")
with open(name + '-lstm' + str(n) + '.csv', "w") as outfile:
outfile.write("lstm_loss,lstm_accuracy,val_lstm_loss,val_lstm_acc")
outfile.write("\n")
for ind in range(len(loss_history)):
outfile.write(str(lstm_loss_history[ind]) + ',' + str(lstm_acc_history[ind]) + ',' + str(
val_lstm_loss_history[ind]) + ',' + str(val_lstm_acc_history[ind]))
outfile.write("\n")
with open(name + '-modelevaluate' + str(n) + '.csv', "w") as outfile:
outfile.write("lstm_loss,")
outfile.write("lstm_accuracy,")
outfile.write("loss,")
outfile.write("accuracy,")
outfile.write("\n")
outfile.write(str(loss) + ',')
outfile.write(str(accuracy) + ',')
outfile.write(str(aux_loss)+',')
outfile.write(str(aux_accuracy))
outfile.write("\n")
# helper function to assign hyperparameters
def unfold_general_hyperparameters(cfg):
verbose = cfg.get('verbose') if ('verbose' in cfg) else 0
epochs = cfg.get('epochs') if ('epochs' in cfg) else 25
batch_size = cfg.get('batch_size') if ('batch_size' in cfg) else 64
activation = cfg.get('activation') if ('activation' in cfg) else 'relu'
# kernel_size_1D = cfg.get('kernel_size_1D') if ('kernel_size_1D' in cfg) else 3
filters = cfg.get('filters') if ('filters' in cfg) else 64
pool_size = cfg.get('pool_size') if ('pool_size' in cfg) else 2
loss = cfg.get('loss') if ('loss' in cfg) else 'categorical_crossentropy'
out_activation = cfg.get('out_activation') if ('out_activation' in cfg) else 'softmax'
optimizer = cfg.get('optimizer') if ('optimizer' in cfg) else 'adam'
dropout_rate = cfg.get('dropout_rate') if ('dropout_rate' in cfg) else 0.5
return verbose, epochs, batch_size, activation, filters, pool_size, loss, out_activation, optimizer, dropout_rate
# helper function for PCA feature selection
def feature_selection(all_aux_trainX, all_aux_testX):
data = np.concatenate((all_aux_trainX, all_aux_testX), axis=0)
scaler = MinMaxScaler(feature_range=[0, 1])
data_rescaled = scaler.fit_transform(data)
pca = PCA(n_components=175)
dataset = pca.fit_transform(data_rescaled)
aux_trainX = dataset[0:all_aux_trainX.shape[0]][:]
aux_testX = dataset[all_aux_trainX.shape[0]:][:]
return aux_trainX, aux_testX
# residual lstm layer generator
def residual_lstm_layers(input, rnn_width, rnn_depth, rnn_dropout):
x = input
for i in range(rnn_depth):
return_sequences = i < rnn_depth - 1
# if the return_sequences is true, which means that this LSTM layer will output 3D instead of 2D(By default LSTM output 2D(the last time step of sequence)).
# have the LSTM output a value for each time step in the input data.
# x_rnn = LSTM(rnn_width, recurrent_dropout=rnn_dropout, dropout=rnn_dropout, return_sequences=return_sequences)(x)
x_rnn = CuDNNLSTM(rnn_width, return_sequences=return_sequences)(x)
if return_sequences:
if i > 0 or input.shape[-1] == rnn_width:
x = add([x, x_rnn])
else:
x = x_rnn
else:
# Last layer does not return sequences, just the last element
# so we select only the last element of the previous output.
def slice_last(x):
return x[..., -1, :]
x = add([Lambda(slice_last)(x), x_rnn])
# x = TimeDistributed(Dense(6, activation='softmax'))(x)
return x
########################### preparing the data
# load a single file as a numpy array
def load_file(filepath):
dataframe = read_csv(filepath, header=None, delim_whitespace=True)
return dataframe.values
# load a list of files and return as a 3d numpy array
def load_group(filenames, prefix=''):
loaded = list()
for name in filenames:
data = load_file(prefix + name)
loaded.append(data)
# stack group so that features are the 3rd dimension
loaded = dstack(loaded)
return loaded
# load a dataset group, such as train or test
def load_timeseries_dataset_group(group, prefix=''):
filepath = prefix + group + '/Inertial Signals/'
# load all 9 files as a single array
filenames = list()
# total acceleration
filenames += ['total_acc_x_' + group + '.txt', 'total_acc_y_' + group + '.txt', 'total_acc_z_' + group + '.txt']
# body acceleration
filenames += ['body_acc_x_' + group + '.txt', 'body_acc_y_' + group + '.txt', 'body_acc_z_' + group + '.txt']
# body gyroscope
filenames += ['body_gyro_x_' + group + '.txt', 'body_gyro_y_' + group + '.txt', 'body_gyro_z_' + group + '.txt']
# load input data
X = load_group(filenames, filepath)
# load class output
y = load_file(prefix + group + '/y_' + group + '.txt')
return X, y
def load_const_dataset_group(group, prefix=''):
X = load_file(prefix + group + '/X_' + group + '.txt')
y = load_file(prefix + group + '/y_' + group + '.txt')
return X, y
# load the dataset, returns train and test X and y elements
def load_dataset(prefix=''):
# load all train
trainX, trainy = load_timeseries_dataset_group('train', prefix + 'HARDataset/')
all_aux_trainX, aux_trainy = load_const_dataset_group('train', prefix + 'HARDataset/')
print(">> Time series data shape: {0} , {1}".format(trainX.shape, trainy.shape))
print(">> Constant data shape: {0} , {1}".format(all_aux_trainX.shape, aux_trainy.shape))
# load all test
testX, testy = load_timeseries_dataset_group('test', prefix + 'HARDataset/')
all_aux_testX, aux_testy = load_const_dataset_group('test', prefix + 'HARDataset/')
print(">> Time series data shape: {0} , {1}".format(testX.shape, testy.shape))
print(">> Constant data shape: {0} , {1}".format(all_aux_testX.shape, aux_testy.shape))
# feature selection on constant features
aux_trainX, aux_testX = feature_selection(all_aux_trainX, all_aux_testX)
# zero-offset class values
trainy = trainy - 1
testy = testy - 1
aux_trainy = aux_trainy - 1
aux_testy = aux_testy - 1
# one hot encode y
trainy = to_categorical(trainy)
testy = to_categorical(testy)
aux_trainy = to_categorical(aux_trainy)
aux_testy = to_categorical(aux_testy)
print(">> Final shapes of time series dataset: {0}, {1}, {2}, {3}".format(trainX.shape, trainy.shape, testX.shape,
testy.shape))
print(">> Final shapes of constant dataset: {0}, {1}, {2}, {3}".format(aux_trainX.shape, aux_trainy.shape,
aux_testX.shape, aux_testy.shape))
return trainX, trainy, testX, testy, aux_trainX, aux_trainy, aux_testX, aux_testy
| StarcoderdataPython |
1671645 | <filename>glslc/test/option_dash_S.py
# Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, StdinShader
def simple_vertex_shader():
return """#version 310 es
void main() {
gl_Position = vec4(1., 2., 3., 4.);
}"""
def simple_fragment_shader():
return """#version 310 es
void main() {
gl_FragDepth = 10.;
}"""
def simple_compute_shader():
return """#version 310 es
void main() {
uvec3 temp = gl_WorkGroupID;
}"""
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleDashCapSSingleFile(expect.ValidAssemblyFile):
"""Tests that -S works with a single file."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', shader]
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleFileSingleDashCapS(expect.ValidAssemblyFile):
"""Tests that the position of -S doesn't matter."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = [shader, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestSingleDashCapSMultipleFiles(expect.ValidAssemblyFile):
"""Tests that -S works with multiple files."""
shader1 = FileShader(simple_vertex_shader(), '.vert')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', shader1, shader2, shader3]
@inside_glslc_testsuite('OptionDashCapS')
class TestMultipleDashCapSSingleFile(expect.ValidAssemblyFile):
"""Tests that multiple -Ss works as one."""
shader = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', '-S', shader, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestMultipleDashCapSMultipleFiles(expect.ValidAssemblyFile):
"""Tests a mix of -Ss and files."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_compute_shader(), '.comp')
glslc_args = ['-S', shader1, '-S', '-S', shader2, '-S', shader3, '-S']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashC(expect.ValidAssemblyFile):
"""Tests that -S overwrites -c."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-c', '-S', shader1, '-c', '-c', shader2]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashFShaderStage(expect.ValidAssemblyFile):
"""Tests that -S works with -fshader-stage=."""
shader1 = FileShader(simple_fragment_shader(), '.glsl')
shader2 = FileShader(simple_vertex_shader(), '.glsl')
shader3 = FileShader(simple_compute_shader(), '.glsl')
glslc_args = ['-S',
'-fshader-stage=fragment', shader1,
'-fshader-stage=vertex', shader2,
'-fshader-stage=compute', shader3]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashStd(expect.ValidAssemblyFileWithWarning):
"""Tests that -S works with -std=."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
shader3 = FileShader(simple_compute_shader(), '.comp')
glslc_args = ['-S', '-std=450', shader1, shader2, shader3]
w = (': warning: (version, profile) forced to be (450, none), '
'while in source code it is (310, es)\n')
expected_warning = [
shader1, w, shader2, w, shader3, w, '3 warnings generated.\n']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashOSingleFile(expect.SuccessfulReturn,
expect.CorrectAssemblyFilePreamble):
"""Tests that -S works with -o on a single file."""
shader = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', '-o', 'blabla', shader]
def check_output_blabla(self, status):
output_name = os.path.join(status.directory, 'blabla')
return self.verify_assembly_file_preamble(output_name)
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithDashOMultipleFiles(expect.ErrorMessage):
"""Tests that -S works with -o on a single file."""
shader1 = FileShader(simple_fragment_shader(), '.frag')
shader2 = FileShader(simple_vertex_shader(), '.vert')
glslc_args = ['-S', '-o', 'blabla', shader1, shader2]
expected_error = ['glslc: error: cannot specify -o when '
'generating multiple output files\n']
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithStdIn(expect.ValidAssemblyFile):
"""Tests that -S works with stdin."""
shader = StdinShader(simple_fragment_shader())
glslc_args = ['-S', '-fshader-stage=fragment', shader]
@inside_glslc_testsuite('OptionDashCapS')
class TestDashCapSWithStdOut(
expect.ReturnCodeIsZero, expect.StdoutMatch, expect.StderrMatch):
"""Tests that -S works with stdout."""
shader = FileShader(simple_fragment_shader(), '.frag')
glslc_args = ['-S', '-o', '-', shader]
expected_stdout = True
expected_stderr = ''
| StarcoderdataPython |
1728710 | <reponame>alfredots/image-processing
import numpy as np
import math
class Aritmetics:
@staticmethod
def add(img1, img2):
newImage = np.zeros((img1.shape[0],img1.shape[1],img1.shape[2]), np.uint8)
rows, columns, pixel = img1.shape
for i in range(rows):
for j in range(columns):
newImage[i][j] = img1[i][j] + img2[i][j]
return newImage
@staticmethod
def sub(img1, img2):
newImage = np.zeros((img1.shape[0],img1.shape[1],img1.shape[2]), np.uint8)
rows, columns, pixel = img1.shape
for i in range(rows):
for j in range(columns):
newImage[i][j] = img1[i][j] - img2[i][j]
return newImage
@staticmethod
def mult(img1, img2):
newImage = np.zeros((img1.shape[0],img1.shape[1],img1.shape[2]), np.uint8)
rows, columns, pixel = img1.shape
for i in range(rows):
for j in range(columns):
newImage[i][j] = img1[i][j] * img2[i][j]
return newImage
@staticmethod
def div(img1, img2):
newImage = np.zeros((img1.shape[0],img1.shape[1],img1.shape[2]), np.uint8)
rows, columns, pixel = img1.shape
for i in range(rows):
for j in range(columns):
if img2[i][j][0] != 0:
newImage[i][j][0] = int(img1[i][j][0] / img2[i][j][0])
else:
newImage[i][j][0] = img1[i][j][0]
if img2[i][j][1] != 0:
newImage[i][j][1] = int(img1[i][j][1] / img2[i][j][1])
else:
newImage[i][j][1] = img1[i][j][1]
if img2[i][j][2] != 0:
newImage[i][j][2] = int(img1[i][j][2] / img2[i][j][2])
else:
newImage[i][j][2] = img1[i][j][2]
return newImage | StarcoderdataPython |
1755123 | # -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
TEST equimap
'''
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
#import warnings
if __name__ == '__main__':
#print('path 1 =', sys.path)
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#print('path 2 =', sys.path)
# Local modules
import imas
import equimap
#import imas_west
#import pywed as pw
shot = 53221
tol_val = 1E-10
# For 2D plots
interp_points = 60
# FIRST POINT B_NORM
# ------------------
time_in = np.linspace(36, 37, 10)
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 3)
Z_in = np.zeros(R_in.shape)
# Read equilibrium data
idd = imas.ids(shot, 0)
idd.open_env('imas_public', 'west', '3')
idd.equilibrium.get()
out = idd.equilibrium
equiDict = {}
# Declaration of arrays 2d plots
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
equiDict['r'] = np.full(NbrPoints, np.nan)
equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
# For 2D plots
R_all = np.linspace(np.min(equiDict['r']), np.max(equiDict['r']), interp_points)
Z_all = np.linspace(np.min(equiDict['z']), np.max(equiDict['z']), interp_points)
R_all_tot = np.repeat(R_all, interp_points)
Z_all_tot = np.tile(Z_all, interp_points)
Rr = R_all_tot.reshape((interp_points, interp_points))
Zr = Z_all_tot.reshape((interp_points, interp_points))
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm')
end = time.time()
print()
print('time in equimap.get b_norm =', end - start)
print()
print('oute.shape b_norm =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm', no_ripple=True)
end = time.time()
print()
print('time in equimap.get b_norm no Ripple =', end - start)
print()
print('oute.shape b_norm no ripple =', oute_noR.shape)
print()
print('Mean value B_norm ripple =', np.mean(oute[int(0.5*oute.shape[0]), :]))
print('Mean value B_norm NO ripple =', \
np.mean(oute_noR[int(0.5*oute_noR.shape[0]), :]))
diff_mean_val = np.mean(oute[int(0.5*oute.shape[0]), :]) \
- np.mean(oute_noR[int(0.5*oute_noR.shape[0]), :])
print('Diff mean values =', diff_mean_val)
percent_diff = np.abs(100*diff_mean_val \
/ np.mean(oute[int(0.5*oute.shape[0]), :]))
print('Percent diff mean values =', percent_diff)
# CHECK
# -----
if (np.abs(percent_diff - 0.011052598088) > tol_val):
print()
print('ERROR: Higher than tolerance percent difference ' \
+ str(np.abs(percent_diff - 0.011052598088)))
print()
#raise RuntimeError
# FOR:
# shot = 53221
# time_in = np.linspace(36, 37, 10)
# Phi_in = np.linspace(0, 2*np.pi/18, 100)
# R_in = np.full(Phi_in.shape, 3)
# Z_in = np.zeros(R_in.shape)
# RESULTS:
# Mean value B_norm ripple = 3.05593472975
# Mean value B_norm NO ripple = 3.05627248994
# Diff mean values = -0.000337760183512
# Percent diff mean values = 0.011052598088
print()
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], label='B_norm at R={0}, Phi=Z=0'.format(R_in[-1]))
plt.plot(time_in, oute_noR[:, -1], label='B_norm no ripple at R={0}, Phi=Z=0'.format(R_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_norm [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_norm at t={0:.2f}, R={1}, Z=0'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_norm no ripple at t={0:.2f}, R={1}, Z=0'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_norm [T]')
# SECOND POSITION B_NORM
# ----------------------
t_ignitron = []
t_ignitron.append(32)
print()
print('t_igni =', t_ignitron[0])
print()
time_in = np.linspace(t_ignitron[0], 38, 10)
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 2.43)
Z_in = np.full(Phi_in.shape, 0.57)
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm')
end = time.time()
print()
print('time in equimap.get 2 b_norm =', end - start)
print()
print('oute.shape 2 b_norm =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_norm', no_ripple=True)
end = time.time()
print()
print('time in equimap.get 2 b_norm no ripple =', end - start)
print()
print('oute.shape 2 b_norm no ripple =', oute_noR.shape)
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], \
label='B_norm at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.plot(time_in, oute_noR[:, -1], \
label='B_norm no ripple at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_norm [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_norm at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_norm no ripple at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_norm [T]')
# B_NORM 2D
# ---------
# CALL EQUIMAP
start = time.time()
outa = equimap.get(shot, time=time_in, \
R=R_all_tot, Phi=np.zeros(R_all_tot.shape), Z=Z_all_tot, \
quantity='b_field_norm')
end = time.time()
print()
print('time in equimap.get b_norm 2D =', end - start)
print()
outar = outa[int(0.5*outa.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr, Zr, outar)
plt.colorbar()
arg_time = np.argmin(np.abs(out.time - time_in[int(0.5*outa.shape[0])]))
plt.plot(np.squeeze(out.time_slice[arg_time].boundary.outline.r), \
np.squeeze(out.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(out.time_slice[arg_time].global_quantities.magnetic_axis.r, \
out.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('B_norm t={0:.2f}'.format(time_in[int(0.5*outa.shape[0])]))
# B_R TEST
# --------
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 3)
Z_in = np.full(Phi_in.shape, 0)
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_r')
end = time.time()
print()
print('time in equimap.get br =', end - start)
print()
print('oute.shape br =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_r', no_ripple=True)
end = time.time()
print()
print('time in equimap.get br no ripple =', end - start)
print()
print('oute.shape br no ripple =', oute_noR.shape)
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], \
label='B_r at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.plot(time_in, oute_noR[:, -1], \
label='B_r no ripple at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_r [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_r at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_r no ripple at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_r [T]')
# CALL EQUIMAP
start = time.time()
outa = equimap.get(shot, time=time_in, \
R=R_all_tot, Phi=np.zeros(R_all_tot.shape), Z=Z_all_tot, \
quantity='b_field_r')
end = time.time()
print()
print('time in equimap.get br 2D =', end - start)
print()
outar = outa[int(0.5*outa.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr, Zr, outar)
plt.colorbar()
arg_time = np.argmin(np.abs(out.time - time_in[int(0.5*outa.shape[0])]))
plt.plot(np.squeeze(out.time_slice[arg_time].boundary.outline.r), \
np.squeeze(out.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(out.time_slice[arg_time].global_quantities.magnetic_axis.r, \
out.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('B_r t={0:.2f}'.format(time_in[int(0.5*outa.shape[0])]))
# B_Z TEST
# --------
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 3)
Z_in = np.full(Phi_in.shape, 0.2)
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_z')
end = time.time()
print()
print('time in equimap.get bz =', end - start)
print()
print('oute.shape bz =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_z', no_ripple=True)
end = time.time()
print()
print('time in equimap.get bz no ripple =', end - start)
print()
print('oute.shape bz no ripple =', oute_noR.shape)
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], \
label='B_z at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.plot(time_in, oute_noR[:, -1], \
label='B_z no ripple at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_z [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_z at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_z no ripple at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_z [T]')
# CALL EQUIMAP
start = time.time()
outa = equimap.get(shot, time=time_in, \
R=R_all_tot, Phi=np.zeros(R_all_tot.shape), Z=Z_all_tot, \
quantity='b_field_z')
end = time.time()
print()
print('time in equimap.get bz 2D =', end - start)
print()
outar = outa[int(0.5*outa.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr, Zr, outar)
plt.colorbar()
arg_time = np.argmin(np.abs(out.time - time_in[int(0.5*outa.shape[0])]))
plt.plot(np.squeeze(out.time_slice[arg_time].boundary.outline.r), \
np.squeeze(out.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(out.time_slice[arg_time].global_quantities.magnetic_axis.r, \
out.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('B_z t={0:.2f}'.format(time_in[int(0.5*outa.shape[0])]))
# B_TOR TEST
# ----------
Phi_in = np.linspace(0, 2*np.pi/18, 100)
R_in = np.full(Phi_in.shape, 3)
Z_in = np.full(Phi_in.shape, 0)
# CALL EQUIMAP
start = time.time()
oute = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_tor')
end = time.time()
print()
print('time in equimap.get btor =', end - start)
print()
print('oute.shape btor =', oute.shape)
# CALL EQUIMAP
start = time.time()
oute_noR = equimap.get(shot, time=time_in, \
R=R_in, Phi=Phi_in, Z=Z_in, \
quantity='b_field_tor', no_ripple=True)
end = time.time()
print()
print('time in equimap.get btor no ripple =', end - start)
print()
print('oute.shape btor no ripple =', oute_noR.shape)
# PLOTS
plt.figure()
plt.plot(time_in, oute[:, -1], \
label='B_tor at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.plot(time_in, oute_noR[:, -1], \
label='B_tor no ripple at R={0}, Phi={1:.2f}, Z={2}'.format( \
R_in[-1], Phi_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Time [s]')
plt.ylabel('B_tor [T]')
plt.figure()
plt.plot(Phi_in, oute[int(0.5*oute.shape[0]), :], \
label='B_tor at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.plot(Phi_in, oute_noR[int(0.5*oute.shape[0]), :], \
label='B_tor no ripple at t={0:.2f}, R={1}, Z={2}'.format( \
time_in[int(0.5*oute.shape[0])], R_in[-1], Z_in[-1]))
plt.legend()
plt.xlabel('Phi [rad]')
plt.ylabel('B_tor [T]')
# CALL EQUIMAP
start = time.time()
outa = equimap.get(shot, time=time_in, \
R=R_all_tot, Phi=np.zeros(R_all_tot.shape), Z=Z_all_tot, \
quantity='b_field_tor')
end = time.time()
print()
print('time in equimap.get btor 2D =', end - start)
print()
outar = outa[int(0.5*outa.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr, Zr, outar)
plt.colorbar()
arg_time = np.argmin(np.abs(out.time - time_in[int(0.5*outa.shape[0])]))
plt.plot(np.squeeze(out.time_slice[arg_time].boundary.outline.r), \
np.squeeze(out.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(out.time_slice[arg_time].global_quantities.magnetic_axis.r, \
out.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('B_tor t={0:.2f}'.format(time_in[int(0.5*outa.shape[0])]))
plt.show()
| StarcoderdataPython |
5105717 | <reponame>idvxlab/vega-lite-linter<filename>vega_lite_linter/fixer/action.py
class Actions:
ADD_MARK = "ADD_MARK"
CHANGE_MARK = "CHANGE_MARK"
BIN = "BIN"
BIN_X = "BIN('X')"
BIN_Y = "BIN('Y')"
BIN_COLOR = "BIN('COLOR')"
BIN_SIZE = "BIN('SIZE')"
REMOVE_BIN = "REMOVE_BIN"
REMOVE_BIN_X = "REMOVE_BIN('X')"
REMOVE_BIN_Y = "REMOVE_BIN('Y')"
AGGREGATE = "AGGREGATE"
AGGREGATE_X = "AGGREGATE('X')"
AGGREGATE_Y = "AGGREGATE('Y')"
AGGREGATE_COLOR = "AGGREGATE('COLOR')"
AGGREGATE_SIZE = "AGGREGATE('SIZE')"
CHANGE_AGGREGATE = "CHANGE_AGGREGATE"
REMOVE_AGGREGATE = "REMOVE_AGGREGATE"
REMOVE_AGGREGATE_X = "REMOVE_AGGREGATE('X')"
REMOVE_AGGREGATE_Y = "REMOVE_AGGREGATE('Y')"
ADD_COUNT = "ADD_COUNT"
ADD_COUNT_X = "ADD_COUNT('X')"
ADD_COUNT_Y = "ADD_COUNT('Y')"
REMOVE_COUNT = "REMOVE_COUNT"
REMOVE_COUNT_X = "REMOVE_COUNT('X')"
REMOVE_COUNT_Y = "REMOVE_COUNT('Y')"
LOG = "LOG"
REMOVE_LOG = "REMOVE_LOG"
ZERO = "ZERO"
REMOVE_ZERO = "REMOVE_ZERO"
STACK = "STACK"
REMOVE_STACK = "REMOVE_STACK"
# existing encoding's field adding, changing, removing
ADD_FIELD = "ADD_FIELD"
CHANGE_FIELD = "CHANGE_FIELD"
REMOVE_FIELD = "REMOVE_FIELD"
# new encoding adding removing and changing
ADD_CHANNEL = "ADD_CHANNEL"
ADD_CHANNEL_X = "ADD_CHANNEL('X')"
ADD_CHANNEL_Y = "ADD_CHANNEL('Y')"
ADD_CHANNEL_COLOR = "ADD_CHANNEL('COLOR')"
REMOVE_CHANNEL = "REMOVE_CHANNEL"
MOVE_CHANNEL = "CHANGE_CHANNEL"
ADD_FIELD_X = "ADD_FIELD('X')"
CHANGE_FIELD_X = "CHANGE_FIELD('X')"
REMOVE_FIELD_X = "REMOVE_FIELD('X')"
ADD_FIELD_Y = "ADD_FIELD('Y')"
CHANGE_FIELD_Y = "CHANGE_FIELD('Y')"
REMOVE_FIELD_Y = "REMOVE_FIELD('Y')"
ADD_FIELD_COLOR = "ADD_FIELD('COLOR')"
CHANGE_FIELD_COLOR = "CHANGE_FIELD('COLOR')"
REMOVE_FIELD_COLOR = "REMOVE_FIELD('COLOR')"
ADD_FIELD_SIZE = "ADD_FIELD('SIZE')"
CHANGE_FIELD_SIZE = "CHANGE_FIELD('SIZE')"
REMOVE_FIELD_SIZE = "REMOVE_FIELD('SIZE')"
CHANGE_TYPE = "CHANGE_TYPE"
CORRECT_MARK = "CORRECT_MARK"
CORRECT_CHANNEL = "CORRECT_CHANNEL"
CORRECT_TYPE = "CORRECT_TYPE"
CORRECT_AGGREGATE = "CORRECT_AGGREGATE"
CORRECT_BIN = "CORRECT_BIN"
| StarcoderdataPython |
4819211 | # https://docs.gunicorn.org/en/latest/settings.html#accesslog
accesslog = "-"
# https://docs.gunicorn.org/en/latest/settings.html#accesslog
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" %(M)s'
# https://docs.gunicorn.org/en/latest/settings.html#errorlog
errorlog = "-"
# https://docs.gunicorn.org/en/latest/settings.html#timeout
timeout = 30
| StarcoderdataPython |
123871 | <reponame>zopefoundation/zc.resourcelibrary<gh_stars>0
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import os.path
from zope.browserresource.directory import DirectoryResourceFactory
from zope.browserresource.metadirectives import IBasicResourceInformation
from zope.browserresource.metaconfigure import allowed_names
from zope.component import getSiteManager
from zope.configuration.exceptions import ConfigurationError
from zope.interface import Interface
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zope.security.checker import CheckerPublic, NamesChecker
import zope.configuration.fields
from zc.resourcelibrary.resourcelibrary import LibraryInfo, library_info
class IResourceLibraryDirective(IBasicResourceInformation):
"""
Defines a resource library
"""
name = zope.schema.TextLine(
title=u"The name of the resource library",
description=u"""\
This is the name used to disambiguate resource libraries. No two
libraries can be active with the same name.""",
required=True,
)
require = zope.configuration.fields.Tokens(
title=u"Require",
description=u"The resource libraries on which this library depends.",
required=False,
value_type=zope.schema.Text(),
)
class IDirectoryDirective(Interface):
"""
Identifies a directory to be included in a resource library
"""
source = zope.configuration.fields.Path(
title=u"Source",
description=u"The directory containing the files to add.",
required=True,
)
include = zope.configuration.fields.Tokens(
title=u"Include",
description=u"The files which should be included in HTML pages which "
u"reference this resource library.",
required=False,
value_type=zope.schema.Text(),
)
factory = zope.configuration.fields.GlobalObject(
title=u"Factory",
description=u"Alternate directory-resource factory",
required=False,
)
def handler(name,
dependencies,
required,
provided,
adapter_name,
factory,
info=''):
if dependencies:
for dep in dependencies:
if dep not in library_info:
raise ConfigurationError(
'Resource library "%s" has unsatisfied dependency on "%s".'
% (name, dep))
getSiteManager().registerAdapter(
factory, required, provided, adapter_name, info)
INCLUDABLE_EXTENSIONS = ('.js', '.css', '.kss')
class ResourceLibrary(object):
def __init__(self, _context, name, require=(),
layer=IDefaultBrowserLayer, permission='zope.Public'):
self.name = name
self.layer = layer
if permission == 'zope.Public':
permission = CheckerPublic
self.checker = NamesChecker(allowed_names, permission)
# make note of the library in a global registry
self.old_library_info = library_info.get(name)
library_info[name] = LibraryInfo()
library_info[name].required.extend(require)
def directory(self, _context, source, include=(), factory=None):
if not os.path.isdir(source):
raise ConfigurationError("Directory %r does not exist" % source)
for file_name in include:
ext = os.path.splitext(file_name)[1]
if ext not in INCLUDABLE_EXTENSIONS:
raise ConfigurationError(
'Resource library doesn\'t know how to include this '
'file: "%s".' % file_name)
# remember which files should be included in the HTML when this library
# is referenced
library_info[self.name].included.extend(include)
if factory is None:
factory = DirectoryResourceFactory
factory = factory(source, self.checker, self.name)
_context.action(
discriminator=('resource', self.name, IBrowserRequest, self.layer),
callable=handler,
args=(self.name, library_info[self.name].required, (self.layer,),
Interface, self.name, factory, _context.info),
)
def __call__(self):
if self.old_library_info is None:
return
curr_li = library_info[self.name]
if self.old_library_info.included != curr_li.included or \
self.old_library_info.required != curr_li.required:
raise NotImplementedError(
"Can't cope with 2 different registrations of the same "
"library: %s (%s, %s) (%s, %s)" % (
self.name,
self.old_library_info.required,
self.old_library_info.included,
curr_li.required,
curr_li.included))
| StarcoderdataPython |
9712394 | <filename>django_candy/middleware.py
from django.http import HttpResponse, JsonResponse
from django.conf import settings
CORS_ALLOW_ORIGIN = getattr(settings, 'CORS_ALLOW_ORIGIN', 'http://localhost:3000')
CORS_ALLOW_METHODS = getattr(settings, 'CORS_ALLOW_METHODS', ['POST', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'PATCH'])
CORS_ALLOW_HEADERS = getattr(settings, 'CORS_ALLOW_HEADERS', ['*', 'content-type', 'authorization', 'X-CSRFTOKEN'])
CORS_ALLOW_CREDENTIALS = getattr(settings, 'CORS_ALLOW_CREDENTIALS', True)
CORS_EXPOSE_HEADERS = getattr(settings, 'CORS_EXPOSE_HEADERS', ['content-type', 'location', '*'])
CORS_REQUEST_HEADERS = getattr(settings, 'CORS_REQUEST_HEADERS', ['*'])
class CorsMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def set_headers(self, response):
response['Access-Control-Allow-Origin'] = CORS_ALLOW_ORIGIN
response['Access-Control-Allow-Methods'] = ','.join(CORS_ALLOW_METHODS)
response['Access-Control-Allow-Headers'] = ','.join(CORS_ALLOW_HEADERS)
response['Access-Control-Allow-Credentials'] = 'true' if CORS_ALLOW_CREDENTIALS else 'false'
response['Access-Control-Expose-Headers'] = ','.join(CORS_EXPOSE_HEADERS)
response['Access-Control-Request-Headers'] = ','.join(CORS_REQUEST_HEADERS)
return response
def __call__(self, request):
#self.process_request(request)
#self.process_response(request, self.get_response(request))
response = self.get_response(request)
response = self.set_headers(response)
return response
def _process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
return self._set_headers(HttpResponse())
def _process_response(self, request, response):
return self._set_headers(response)
| StarcoderdataPython |
5093236 | from colorama import Fore, Back, Style
from pyfiglet import Figlet
from PyInquirer import prompt, Separator
from examples import custom_style_2
import time
import src.migrations.YoutubeToSpotify as YoutubeToSpotify
import src.migrations.SpotifyToYoutube as SpotifyToYoutube
def main():
# print banner
f = Figlet(font='cybermedium')
print(Style.BRIGHT + Fore.YELLOW +
f.renderText('Youtube : Spotify Migration') + Style.RESET_ALL)
print(Style.BRIGHT + Fore.YELLOW + 'Author: ' + Style.DIM + '<NAME>\n' + Style.RESET_ALL)
# let user decide in which direction to perform transfer
user_selection = get_transfer_direction()
print('Great! Logging you in.\n')
time.sleep(1)
if user_selection['transfer_direction'] == 'Youtube ⭢ Spotify':
migrator = YoutubeToSpotify.Migrator()
migrator.execute()
else:
migrator = SpotifyToYoutube.Migrator()
migrator.execute()
def get_transfer_direction(): # fetch the user's playlists
question = [ # prompt user to select one
{
'type': 'list',
'name': 'transfer_direction',
'message': "In which direction would you like to transfer playlists?",
'choices': [
{
'name': "Youtube ⭢ Spotify"
},
{
'name': "Spotify ⭢ Youtube"
}
]
}
]
return prompt(question, style=custom_style_2)
if __name__ == '__main__':
main() | StarcoderdataPython |
4809821 | /home/runner/.cache/pip/pool/3b/e4/ff/dbcd7adea90479a5fc6cb82a24bb8e2d8babd79702dbfc6903444435f3 | StarcoderdataPython |
8087403 | <filename>NBApredict/scrapers/scraper.py<gh_stars>1-10
"""
This module wraps the team stats, schedule, and betting line scrapers together and stores their data in the database.
If the script is called, it instantiates a DBInterface object for database interactions and creates a SQLalchemy session
object from the DBInterface's information. Otherwise, the scape_all() function is called with database, session, and
league year arguments specified.
"""
import os
from sqlalchemy.orm import Session
# Local Imports
from nbapredict.database.dbinterface import DBInterface
from nbapredict.scrapers import team_scraper, season_scraper, line_scraper
import nbapredict.configuration as configuration
def scrape_all(database, session, league_year):
"""Scrape and store team stats, schedule information, and betting lines in the database.
Note, this only adds data to the session. Changes must be committed to be saved.
Args:
database: An instantiated DBInterface object from database.database for database interactions
session: An instance of a sqlalchemy Session class bound to the database's engine
league_year: The league year to scrape data from (i.e. 2018-2019 season is 2019)
"""
# Insure the database folder exists
if not os.path.isdir(configuration.output_directory()):
os.mkdir(configuration.output_directory())
team_scrape = team_scraper.scrape(database=database)
season_scrape = season_scraper.scrape(database=database, session=session)
line_scrape = line_scraper.scrape(database=database, session=session)
if __name__ == "__main__":
db_path = configuration.database_file(os.path.dirname(__file__))
db = DBInterface(db_path)
league_year = 2019
session = Session(bind=db.engine)
scrape_all(database=db, session=session, league_year=league_year)
| StarcoderdataPython |
1726777 | import pandas as pd
import numpy as np
import os
from PyQt5.QtCore import QAbstractTableModel, Qt, QVariant
import cubetools.config as cfg
class Model():
'''Creates data for the UI and export'''
def __init__(self):
super().__init__()
self.valid_cncfilelist = self.check_cnc_filepaths()
def check_cnc_filepaths(self) -> dict():
'''Returns checked filepaths from config.py
Returns:
dict: {NAME:PATH} format of the checked machine entries from cfg'''
filelist = {}
if cfg.path_to_cnc:
for name, dir_path in cfg.path_to_cnc.items():
dir_path_string = str(dir_path)
if (os.path.isfile(dir_path_string + '/tool.t') and
os.path.isfile(dir_path_string + '/tool_p.tch')):
filelist[name] = dir_path_string
return filelist
def parse_headers(self, header_line: str) -> dict():
'''Gets headers indexes to define column widths
Parameters:
header_line(str): raw readline from file
Returns:
colspecs_dict(dict): columns {<COL_NAME>: (i_start, i_end)} '''
colspecs_dict = {}
column_names = header_line.split()
col_idx = []
prev_i = 0
if set(["T"]).issubset(column_names):
for i, k in enumerate(header_line):
if k != " ":
if (i != prev_i+1):
col_idx.append(i)
prev_i = i
col_idx.append(len(header_line)+1)
colspecs_dict = {name: (col_idx[i], col_idx[i+1])
for (name, i)
in zip(column_names, range(len(col_idx)-1))}
return colspecs_dict
def read_tooltable(self, toolt_cncfile: str) -> pd.DataFrame():
'''Reads tool-file into pandas-dataframe
Parameters:
toolt_cncfile(file): full path fwf(fixed-width-field) file
Returns:
dftools(dataframe): pandas dataframe with all cols/rows'''
with open(toolt_cncfile) as data_toolt:
table_toolt = data_toolt.readlines()
header_line = table_toolt[1]
headers = self.parse_headers(header_line)
if headers != dict():
dftools = pd.read_fwf(toolt_cncfile,
skiprows=2, skipfooter=1,
names=headers.keys(),
colspecs=list(headers.values()),
index_col=None)
dftools = dftools.dropna(subset=["T"])
dftools = dftools.astype({"T": int})
return dftools
else:
return pd.DataFrame()
def export_tooltable(self,
machines_selected: list,
fileformats_selected: set,
path_field: str):
'''Exports pandas-tables in various formats
Parameters:
path_field(str): path for the exported files
machines_selected(list): list of names to search for in the cfg
fileformats_selected(set): set of extensions to export
Returns:
saved files in formats from [fileformats_selected] in folder [pathfield]'''
self.fileformats_allowed = {'xlsx', 'csv', 'json'}
self.fileformats = (fileformats_selected & self.fileformats_allowed)
self.machines_selected = machines_selected
self.ui_path_field = str(path_field)
self.machinelist = dict([(name, path) for name, path
in self.valid_cncfilelist.items()
if name in self.machines_selected])
for mach_name, dir_path in self.machinelist.items():
toolt = self.read_tooltable(dir_path + "/tool.t")
toolpt = self.read_tooltable(dir_path + "/tool_p.tch")
file_to_save = self.ui_path_field + "/" + mach_name
for ext in self.fileformats:
if ext == "xlsx":
toolt.to_excel(file_to_save + '.xlsx',
index=False)
toolpt.to_excel(file_to_save + '_magazine.xlsx',
index=False)
if ext == "csv":
toolt.to_csv(file_to_save + '.csv',
index=False)
toolpt.to_csv(file_to_save + '_magazine.csv',
index=False)
if ext == "json":
toolt.to_json(file_to_save + '.json')
toolpt.to_json(file_to_save + '_magazine.json')
class ToolSummaryTable(QAbstractTableModel, Model):
'''Provides datatable for the preview'''
def __init__(self, machine_selected):
super().__init__()
mainmodel = Model()
self.machine_name = machine_selected
if self.machine_name in self.valid_cncfilelist.keys():
self.tool_file = self.valid_cncfilelist[self.machine_name]
self.tooldf = mainmodel.read_tooltable(self.tool_file + "tool.t")
self.magazindf = mainmodel.read_tooltable(self.tool_file + "tool_p.tch")
self.summarydf = self.tooldf.loc[self.tooldf['T'].isin(self.magazindf['T'])]
self.summarydf = self.summarydf[['T', 'NAME', 'DOC', "L"]]
self.summarydf = self.assign_toolstatus(self.summarydf)
def rowCount(self, index):
return self.summarydf.shape[0]
def columnCount(self, index):
return self.summarydf.shape[1]
def data(self, index, role):
if role != Qt.DisplayRole:
return QVariant()
return str(self.summarydf.iloc[index.row(), index.column()])
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole or orientation != Qt.Horizontal:
return QVariant()
return self.summarydf.columns[section]
def assign_toolstatus(self, summarydf):
refdb_df = cfg.refdb_sample
summarydf['L_NOM'] = summarydf['T'].map(refdb_df.set_index('T')['L_NOM'])
summarydf['Status'] = np.where(summarydf['L'] > summarydf['L_NOM'],
'Check is failed',
'Tool is OK')
summarydf.loc[pd.isna(summarydf['L_NOM']) == True, 'Status'] = \
'Not checked'
return summarydf
| StarcoderdataPython |
3360739 | #!/usr/bin/env python
import pytest
from click.testing import CliRunner
from enshrine import to_json, to_yaml, main
VARS_YAML = """---
var1: value1
var2: value2
"""
INI_TEMPLATE = """[section]
value1: {{ var1 }}
value2: {{ var2 }}
"""
def _write_files():
with open('vars.yaml', 'w') as f:
f.write(VARS_YAML)
with open('template.ini.jinja', 'w') as f:
f.write(INI_TEMPLATE)
@pytest.mark.parametrize('data, expected', [
({'key': 'value'}, '{"key": "value"}'),
([1, 'two', 3], '[1, "two", 3]'),
(None, 'null')
])
def test_to_json(data, expected):
assert to_json(data) == expected
@pytest.mark.parametrize('data, expected', [
({'key': 'value'}, '{key: value}\n'),
([1, 'two', 3], '[1, two, 3]\n')
])
def test_to_yaml(data, expected):
assert to_yaml(data) == expected
def test_main():
runner = CliRunner()
with runner.isolated_filesystem():
_write_files()
result = runner.invoke(main, ['vars.yaml', 'template.ini.jinja'])
assert result.exit_code == 0
assert result.output == '[section]\nvalue1: value1\nvalue2: value2\n\n'
| StarcoderdataPython |
1929440 | <filename>setup.py<gh_stars>1-10
import sys
if sys.version_info.major != 3: raise Exception('ktrain requires Python 3')
from distutils.core import setup
import setuptools
with open('README.md', encoding='utf-8') as readme_file:
readme_file.readline()
readme = readme_file.read()
exec(open('ktrain/version.py').read())
setup(
name = 'ktrain',
packages = setuptools.find_packages(),
package_data={'ktrain': ['text/shallownlp/ner_models/*']},
version = __version__,
license='Apache License 2.0',
description = 'ktrain is a wrapper for TensorFlow Keras that makes deep learning and AI more accessible and easier to apply',
#description = 'ktrain is a lightweight wrapper for TensorFlow Keras to help train neural networks',
long_description = readme,
long_description_content_type = 'text/markdown',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/amaiya/ktrain',
keywords = ['tensorflow', 'keras', 'deep learning', 'machine learning'],
install_requires=[
'scikit-learn==0.23.2', # due to change in 0.24.x that breaks eli5
'matplotlib >= 3.0.0',
'pandas >= 1.0.1',
'fastprogress >= 0.1.21',
'requests',
'joblib',
'packaging',
'ipython',
'langdetect',
'jieba',
'cchardet', # previously pinned to 2.1.5 (due to this issue: https://github.com/PyYoshi/cChardet/issues/61) but no longer needed
'syntok',
# NOTE: these modules can be optionally omitted from deployment if not being used to yield lighter-weight footprint
'seqeval==0.0.19', # imported in imports with warning and used in 'ktrain.text.ner' ; pin to 0.0.19 due to numpy version incompatibility with TensorFlow 2.3
'transformers>=4.0.0,<=4.3.3', # imported in imports with warning and used in 'ktrain.text' ; pin to transformers>4.0 due to breaking changes
'sentencepiece', # Added due to breaking change in transformers>=4.0
'keras_bert>=0.86.0', # imported in imports with warning and used in 'ktrain.text' ; support for TF 2.3
'networkx>=2.3', # imported by graph module
'whoosh', # imported by text.qa module
# NOTE: these libraries below are manually installed on-the-fly when required by an invoked method with appropriate warnings
#'eli5 >= 0.10.0', # forked version used by TextPredictor.explain and ImagePredictor.explain
#'stellargraph>=0.8.2', # forked version used by graph module
# 'shap', # used by TabularPredictor.explain
#'textblob', # used by textutils.extract_noun_phrases
#'textract', # used by textutils.extract_copy and text.qa.core.SimpleQA
#'bokeh', # used by visualze_documents text.eda module
#'allennlp', # required for NER Elmo embeddings since TF2 TF_HUB does not work
# 'torch', # used by text.translation, text.zsl, and text.summarization
],
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| StarcoderdataPython |
396638 | import math
import logging
import torch
import torch.nn as nn
from prettytable import PrettyTable
from ..mask import Mask_s, Mask_c
__all__ = ['resdg20_cifar10', 'resdg32_cifar10', 'resdg56_cifar10',
'resdg110_cifar10']
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv2d_out_dim(dim, kernel_size, padding=0, stride=1, dilation=1, ceil_mode=False):
if ceil_mode:
return int(math.ceil((dim + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))
else:
return int(math.floor((dim + 2 * padding - dilation * (kernel_size - 1) - 1) / stride + 1))
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, h, w, eta=4,
stride=1, downsample=None, **kwargs):
super(BasicBlock, self).__init__()
# gating modules
self.height = conv2d_out_dim(h, kernel_size=3, stride=stride, padding=1)
self.width = conv2d_out_dim(w, kernel_size=3, stride=stride, padding=1)
self.mask_s = Mask_s(self.height, self.width, inplanes, eta, eta, **kwargs)
self.mask_c = Mask_c(inplanes, planes, **kwargs)
self.upsample = nn.Upsample(size=(self.height, self.width), mode='nearest')
# conv 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
# conv 2
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
# misc
self.downsample = downsample
self.inplanes, self.planes = inplanes, planes
self.b = eta * eta
self.b_reduce = (eta-1) * (eta-1)
flops_conv1_full = torch.Tensor([9 * self.height * self.width * planes * inplanes])
flops_conv2_full = torch.Tensor([9 * self.height * self.width * planes * planes])
# downsample flops
self.flops_downsample = torch.Tensor([self.height*self.width*planes*inplanes]
)if downsample is not None else torch.Tensor([0])
# full flops
self.flops_full = flops_conv1_full + flops_conv2_full + self.flops_downsample
# mask flops
flops_mks = self.mask_s.get_flops()
flops_mkc = self.mask_c.get_flops()
self.flops_mask = torch.Tensor([flops_mks + flops_mkc])
def forward(self, input):
x, norm_1, norm_2, flops = input
residual = x
# spatial mask
mask_s_m, norm_s, norm_s_t = self.mask_s(x) # [N, 1, h, w]
mask_s = self.upsample(mask_s_m) # [N, 1, H, W]
# conv 1
mask_c, norm_c, norm_c_t = self.mask_c(x) # [N, C_out, 1, 1]
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if not self.training:
out = out * mask_c * mask_s
else:
out = out * mask_c
# conv 2
out = self.conv2(out)
out = self.bn2(out)
out = out * mask_s
# identity
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
# flops
flops_blk = self.get_flops(mask_s_m, mask_s, mask_c)
flops = torch.cat((flops, flops_blk.unsqueeze(0)))
# norm
norm_1 = torch.cat((norm_1, torch.cat((norm_s, norm_s_t)).unsqueeze(0)))
norm_2 = torch.cat((norm_2, torch.cat((norm_c, norm_c_t)).unsqueeze(0)))
return (out, norm_1, norm_2, flops)
def get_flops(self, mask_s, mask_s_up, mask_c):
s_sum = mask_s.sum((1,2,3))
c_sum = mask_c.sum((1,2,3))
# conv1
flops_conv1 = 9 * self.b * s_sum * c_sum * self.inplanes
# conv2
flops_conv2 = 9 * self.b * s_sum * self.planes * c_sum
# total
flops = flops_conv1 + flops_conv2 + self.flops_downsample.to(flops_conv1.device)
return torch.cat((flops, self.flops_mask.to(flops.device), self.flops_full.to(flops.device)))
class ResNetCifar10(nn.Module):
def __init__(self, depth, num_classes=10, h=32, w=32, **kwargs):
super(ResNetCifar10, self).__init__()
self.height, self.width = h, w
# Model type specifies number of layers for CIFAR-10 model
n = (depth - 2) // 6
block = BasicBlock
# norm
self._norm_layer = nn.BatchNorm2d
# conv1
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1,bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
# residual blocks
self.layer1, h, w = self._make_layer(block, 16, n, h, w, 4, **kwargs)
self.layer2, h, w = self._make_layer(block, 32, n, h, w, 2, stride=2, **kwargs)
self.layer3, h, w = self._make_layer(block, 64, n, h, w, 2, stride=2, **kwargs)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
# flops
self.flops_conv1 = torch.Tensor([9 * self.height * self.width * 16 * 3])
self.flops_fc = torch.Tensor([64 * block.expansion * num_classes])
# criterion
self.criterion = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None and m.bias is not None:
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, h, w, tile, stride=1, **kwargs):
norm_layer = self._norm_layer
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, h, w, tile,
stride, downsample, **kwargs))
h = conv2d_out_dim(h, kernel_size=1, stride=stride, padding=0)
w = conv2d_out_dim(w, kernel_size=1, stride=stride, padding=0)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, h, w, tile, **kwargs))
return nn.Sequential(*layers), h, w
def forward(self, x, label, den_target, lbda, gamma, p):
batch_num, _, _, _ = x.shape
# conv1
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
# residual blocks
norm1 = torch.zeros(1, batch_num+1).to(x.device)
norm2 = torch.zeros(1, batch_num+1).to(x.device)
flops = torch.zeros(1, batch_num+2).to(x.device)
x = self.layer1((x, norm1, norm2, flops)) # 32x32
x = self.layer2(x) # 16x16
x, norm1, norm2, flops = self.layer3(x) # 8x8
# fc layer
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
# flops
flops_real = [flops[1:, 0:batch_num].permute(1, 0).contiguous(),
self.flops_conv1.to(x.device), self.flops_fc.to(x.device)]
flops_mask, flops_ori = flops[1:, -2].unsqueeze(0), flops[1:, -1].unsqueeze(0)
# norm
norm_s = norm1[1:, 0:batch_num].permute(1, 0).contiguous()
norm_c = norm2[1:, 0:batch_num].permute(1, 0).contiguous()
norm_s_t, norm_c_t = norm1[1:, -1].unsqueeze(0), norm2[1:, -1].unsqueeze(0)
# get outputs
outputs = {}
outputs["closs"], outputs["rloss"], outputs["bloss"] = self.get_loss(
x, label, batch_num, den_target, lbda, gamma, p,
norm_s, norm_c, norm_s_t, norm_c_t,
flops_real, flops_mask, flops_ori)
outputs["out"] = x
outputs["flops_real"] = flops_real
outputs["flops_mask"] = flops_mask
outputs["flops_ori"] = flops_ori
return outputs
def set_criterion(self, criterion):
self.criterion = criterion
return
def get_loss(self, output, label, batch_size, den_target, lbda, gamma, p,
mask_norm_s, mask_norm_c, norm_s_t, norm_c_t,
flops_real, flops_mask, flops_ori):
closs, rloss, bloss = self.criterion(output, label, flops_real, flops_mask,
flops_ori, batch_size, den_target, lbda, mask_norm_s, mask_norm_c,
norm_s_t, norm_c_t, gamma, p)
return closs, rloss, bloss
def record_flops(self, flops_conv, flops_mask, flops_ori, flops_conv1, flops_fc):
i = 0
table = PrettyTable(['Layer', 'Conv FLOPs', 'Conv %', 'Mask FLOPs', 'Total FLOPs', 'Total %', 'Original FLOPs'])
table.add_row(['layer0'] + ['{flops:.2f}K'.format(flops=flops_conv1/1024)] + [' ' for _ in range(5)])
for name, m in self.named_modules():
if isinstance(m, BasicBlock):
table.add_row([name] + ['{flops:.2f}K'.format(flops=flops_conv[i]/1024)] + ['{per_f:.2f}%'.format(
per_f=flops_conv[i]/flops_ori[i]*100)] + ['{mask:.2f}K'.format(mask=flops_mask[i]/1024)] +
['{total:.2f}K'.format(total=(flops_conv[i]+flops_mask[i])/1024)] + ['{per_t:.2f}%'.format(
per_t=(flops_conv[i]+flops_mask[i])/flops_ori[i]*100)] +
['{ori:.2f}K'.format(ori=flops_ori[i]/1024)])
i+=1
table.add_row(['fc'] + ['{flops:.2f}K'.format(flops=flops_fc/1024)] + [' ' for _ in range(5)])
table.add_row(['Total'] + ['{flops:.2f}K'.format(flops=(flops_conv[i]+flops_conv1+flops_fc)/1024)] +
['{per_f:.2f}%'.format(per_f=(flops_conv[i]+flops_conv1+flops_fc)/(flops_ori[i]+flops_conv1+flops_fc)*100)] +
['{mask:.2f}K'.format(mask=flops_mask[i]/1024)] + ['{total:.2f}K'.format(
total=(flops_conv[i]+flops_mask[i]+flops_conv1+flops_fc)/1024)] + ['{per_t:.2f}%'.format(
per_t=(flops_conv[i]+flops_mask[i]+flops_conv1+flops_fc)/(flops_ori[i]+flops_conv1+flops_fc)*100)] +
['{ori:.2f}K'.format(ori=(flops_ori[i]+flops_conv1+flops_fc)/1024)])
logging.info('\n{}'.format(table))
def resdg20_cifar10(**kwargs):
"""
return a ResNet 20 object for cifar-10.
"""
return ResNetCifar10(20, **kwargs)
def resdg32_cifar10(**kwargs):
"""
return a ResNet 32 object for cifar-10.
"""
return ResNetCifar10(32, **kwargs)
def resdg56_cifar10(**kwargs):
"""
return a ResNet 56 object for cifar-10.
"""
return ResNetCifar10(56, **kwargs)
def resdg110_cifar10(**kwargs):
"""
return a ResNet 110 object for cifar-10.
"""
return ResNetCifar10(110, **kwargs)
| StarcoderdataPython |
8016381 | import time
from tqdm import tqdm
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.utils import Progbar
from nlpgnn.datas.checkpoint import LoadCheckpoint
from nlpgnn.datas.dataloader import TFWriter, TFLoader
from nlpgnn.metrics import Metric
from nlpgnn.models import bert
from nlpgnn.optimizers import optim
from nlpgnn.tools import bert_init_weights_from_checkpoint
from sklearn.metrics import classification_report
# 载入参数
# LoadCheckpoint(language='zh', model="bert", parameters="base", cased=True, url=None)
# language: the language you used in your input data
# model: the model you choose,could be bert albert and gpt2
# parameters: can be base large xlarge xxlarge for albert, base medium large for gpt2, base large for BERT.
# cased: True or false, only for bert model.
# url: you can give a link of other checkpoint.
load_check = LoadCheckpoint(language='en', cased=True)
param, vocab_file, model_path = load_check.load_bert_param()
# 定制参数
param.batch_size = 32
param.maxlen = 128
param.label_size = 9
total_epochs = 100
patience = 10
def ner_evaluation(true_label: list, predicts: list, masks: list, label_names: dict):
all_predict = []
all_true = []
true_label = [tf.reshape(item, [-1]).numpy() for item in true_label]
predicts = [tf.reshape(item, [-1]).numpy() for item in predicts]
masks = [tf.reshape(item, [-1]).numpy() for item in masks]
for i, j, m in zip(true_label, predicts, masks):
index = np.argwhere(m == 1)
all_true.extend(i[index].reshape(-1))
all_predict.extend(j[index].reshape(-1))
report_dict = classification_report(all_true, all_predict,
target_names=list(label_names.keys()), digits=4, output_dict=True)
report = classification_report(all_true, all_predict,
target_names=list(label_names.keys()), digits=4)
print(report)
return report_dict['macro avg']['f1-score']
# 构建模型
class BERT_NER(tf.keras.Model):
def __init__(self, param, **kwargs):
super(BERT_NER, self).__init__(**kwargs)
self.batch_size = param.batch_size
self.maxlen = param.maxlen
self.label_size = param.label_size
self.bert = bert.BERT(param)
self.dense = tf.keras.layers.Dense(self.label_size, activation="relu")
def call(self, inputs, is_training=True):
bert = self.bert(inputs, is_training)
sequence_output = bert.get_sequence_output() # batch,sequence,768
pre = self.dense(sequence_output)
pre = tf.reshape(pre, [self.batch_size, self.maxlen, -1])
output = tf.math.softmax(pre, axis=-1)
return output
def predict(self, inputs, is_training=False):
output = self(inputs, is_training=is_training)
return output
model = BERT_NER(param)
model.build(input_shape=(3, param.batch_size, param.maxlen))
model.summary()
# 构建优化器
optimizer_bert = optim.AdamWarmup(learning_rate=2e-5, # 重要参数
decay_steps=10000, # 重要参数
warmup_steps=1000, )
# 构建损失函数
sparse_categotical_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
# 初始化参数
bert_init_weights_from_checkpoint(model,
model_path, # bert_model.ckpt
param.num_hidden_layers,
pooler=False)
# 写入数据 通过check_exist=True参数控制仅在第一次调用时写入
writer = TFWriter(param.maxlen, vocab_file,
modes=["train", "valid"], check_exist=False)
ner_load = TFLoader(param.maxlen, param.batch_size, epoch=1)
# 训练模型
# 使用tensorboard
# summary_writer = tf.summary.create_file_writer("./tensorboard")
# Metrics
f1score = Metric.SparseF1Score(average="macro")
precsionscore = Metric.SparsePrecisionScore(average="macro")
recallscore = Metric.SparseRecallScore(average="macro")
accuarcyscore = Metric.SparseAccuracy()
# 保存模型
checkpoint = tf.train.Checkpoint(model=model)
manager = tf.train.CheckpointManager(checkpoint, directory="./save",
checkpoint_name="model.ckpt",
max_to_keep=3)
# For train model
print('Training Begin\n')
Batch = 0
Best_F1 = 0
epoch_no_improve = 0
total_step = 0
for _, _, _, _ in tqdm(ner_load.load_train()):
total_step += 1
print('Total Step: {}'.format(total_step))
metrics_names = ['F1', 'precision', 'recall', 'acc']
for epoch in range(total_epochs):
train_predicts = []
train_true_label = []
train_masks = []
print('Epoch {:3d}'.format(epoch + 1))
time.sleep(0.5)
pb_i = Progbar(total_step, stateful_metrics=metrics_names)
for X, token_type_id, input_mask, Y in ner_load.load_train():
with tf.GradientTape() as tape:
predict = model([X, token_type_id, input_mask])
loss = sparse_categotical_loss(Y, predict)
train_predict = tf.argmax(predict, -1)
train_predicts.append(train_predict)
train_true_label.append(Y)
train_masks.append(input_mask)
f1 = f1score(Y, predict)
precision = precsionscore(Y, predict)
recall = recallscore(Y, predict)
accuracy = accuarcyscore(Y, predict)
values = [('F1', f1), ('precision', precision), ('recall', recall), ('acc', accuracy)]
pb_i.add(1, values=values)
grads_bert = tape.gradient(loss, model.variables)
optimizer_bert.apply_gradients(grads_and_vars=zip(grads_bert, model.variables))
Batch += 1
time.sleep(0.5)
ner_evaluation(train_true_label, train_predicts, train_masks, writer.label2id())
print()
time.sleep(0.5)
manager.save(checkpoint_number=(epoch + 1))
valid_Batch = 0
valid_predicts = []
valid_true_label = []
valid_masks = []
print('Valid for Epoch {:3d}'.format(epoch + 1))
time.sleep(0.5)
for valid_X, valid_token_type_id, valid_input_mask, valid_Y in ner_load.load_valid():
predict = model.predict([valid_X, valid_token_type_id, valid_input_mask])
predict = tf.argmax(predict, -1)
valid_predicts.append(predict)
valid_true_label.append(valid_Y)
valid_masks.append(valid_input_mask)
time.sleep(0.5)
print(writer.label2id())
valid_F1 = ner_evaluation(valid_true_label, valid_predicts, valid_masks, writer.label2id())
if valid_F1 > Best_F1:
Best_F1 = valid_F1
model.save_weights('best_model_weights.h5')
print('Model saved successfully')
else:
epoch_no_improve += 1
print('Epoch no improve: {}'.format(epoch_no_improve))
if epoch_no_improve >= patience:
print('Early Stop')
break
time.sleep(0.5)
model.load_weights('best_model_weights.h5')
test_Batch = 0
test_predicts = []
test_true_label = []
test_masks = []
print('\nTest model')
time.sleep(0.5)
for test_X, test_token_type_id, test_input_mask, test_Y in ner_load.load_test():
predict = model.predict([test_X, test_token_type_id, test_input_mask])
predict = tf.argmax(predict, -1)
test_predicts.append(predict)
test_true_label.append(test_Y)
test_masks.append(test_input_mask)
time.sleep(0.5)
print(writer.label2id())
test_F1 = ner_evaluation(test_true_label, test_predicts, test_masks, writer.label2id())
pd.DataFrame(test_predicts).to_csv('test_predict.csv', index=False)
| StarcoderdataPython |
8069182 | from collections import Counter
from aoc.util import load_input, load_example
def column_counts(lines, pos):
result = ""
for i in range(len(lines[0].strip())):
result += Counter(line[i] for line in lines).most_common()[pos][0]
return result
def part1(lines):
"""
>>> part1(load_example(__file__, "6"))
'easter'
"""
return column_counts(lines, 0)
def part2(lines):
"""
>>> part2(load_example(__file__, "6"))
'advent'
"""
return column_counts(lines, -1)
if __name__ == "__main__":
data = load_input(__file__, 2016, "6")
print(part1(data))
print(part2(data))
| StarcoderdataPython |
9752756 | ##通过字典文件矫正box
# 原文件重命名为 old_[name].box,新建原名文件用于保存矫正后box
# 实现思路详见 check_box()
##使用说明:
# 修改 boxName
# 修改 mode和 mImgNum
# 如有需要可以修改 imgNum
import cv2
import os
import numpy as np
import random
import pickle
def check_box( boxName, mImgNum, mode, numNum=10):
'''
----------------------------------------------------------------
*思路
lstm对图片每行都生成一‘\t’标志行到box,
故box文件中,n行数字行后接一标志行。(打开box看就知)
将原box文件中数字行第一个字符替换成字典文件对应的数字。
若识别出的数字数小于numNum,则补行
若识别出的数字数大于numNum,则删行
由于不能在原文件上读写操作(麻烦不方便),
故创建一个新的box文件用于转存
----------------------------------------------------------------
*参数说明:
boxName box文件名
mImgNum tif 中图像数目
mode 文件模式, 'train'/ 'test'
numNum 每张图像中数字数目,默认10
----------------------------------------------------------------
'''
#读字典,用于矫正box
with open('./Dict/merge_numDic_'+mode+'.pkl', 'rb') as f:
dic = pickle.load(f)
#旧文件重命名
old_box = 'mergeTif/' + 'old_'+boxName
if os.path.exists(old_box): os.remove(old_box)
os.rename( 'mergeTif/'+boxName, old_box )
#打开原box文件
with open( 'mergeTif/'+'old_'+boxName, 'rb') as box:
#打开新box文件用于转存box
with open( 'mergeTif/'+boxName,'w') as check_box:
#每张图片应对应numNum个数字行+ \t行,多余的删除,少则补
print( '开始纠正'+boxName )
for imgIndex in range(mImgNum):
for numOrd in range(numNum): #numNum行内的数字行,修改后存入
line=box.readline()
if(line[0] == 9): #数字行少于numNum,进行补行并退出('\t'对应utf-8编码为9)
#print(imgIndex,'补行')
#补行
for l in range(numNum-numOrd):
check_box.write(dic[imgIndex][numOrd+l]+line[1:].decode())
break
#替换首字符编码,纠正识别结果
for label in range(1,4):
if(line[label]==32): break #空格对应utf-8编码为32,不同字符utf-8编码长度不同
line = dic[imgIndex][numOrd].encode() + line[label:]
check_box.write(line.decode())
else: line=box.readline()
while line[0] != 9: line=box.readline() #识别出的数字数大于等于numNum,去除多余行('\t'对应utf-8编码为9)
else: check_box.write(line.decode())
if __name__ == '__main__':
#纠正 num.mnist.exp0.box
check_box( boxName = 'num.mnist.exp0.box'
, mImgNum = 350, mode = 'train')
#纠正 num_test.mnist.exp0.box
check_box( boxName = 'num_test.mnist.exp0.box'
, mImgNum = 150, mode = 'test')
input('已纠正完成!按回车退出...')
| StarcoderdataPython |
6462214 | """config.py: a place to hold config/globals"""
from os import path
from enum import Enum
import logging
import prosper.common.prosper_logging as p_logging
import prosper.common.prosper_config as p_config
HERE = path.abspath(path.dirname(__file__))
LOGGER =logging.getLogger('publicAPI')
CONFIG = None #TODO
USER_AGENT = 'lockefox https://github.com/EVEprosper/ProsperAPI'
USER_AGENT_SHORT = 'lockefox @EVEProsper test'
DEFAULT_RANGE = 60
MAX_RANGE = 180
DEFAULT_HISTORY_RANGE = 700
EXPECTED_CREST_RANGE = 400
SPLIT_CACHE_FILE = path.join(HERE, 'cache', 'splitcache.json')
class SwitchCCPSource(Enum):
"""enum for switching between crest/esi"""
ESI = 'ESI'
CREST = 'CREST'
EMD = 'EMD'
def load_globals(config=CONFIG):
"""loads global vars from config object"""
global USER_AGENT, USER_AGENT_SHORT, DEFAULT_RANGE, MAX_RANGE
USER_AGENT = config.get('GLOBAL', 'useragent')
USER_AGENT_SHORT = config.get('GLOBAL', 'useragent_short')
DEFAULT_RANGE = int(config.get('CREST', 'prophet_range'))
MAX_RANGE = int(config.get('CREST', 'prophet_max'))
SPLIT_INFO = {}
| StarcoderdataPython |
6645244 | <reponame>albseb511/execode<gh_stars>0
from flask import request
from flask_restful import Resource, reqparse
from ..services.contest_detail import get_contests_challenges, get_contests, add_contest, update_contest, caesar_encrypt_raw, caesar_decrypt_raw
from app.main.services.decode_auth_token import decode_auth_token
from app.main import db
from ..services.signup_contest import validate_signup
class Contest(Resource):
""""
Get contest details
Create contest
"""
parser = reqparse.RequestParser()
parser.add_argument('start_date', type=str,
required=True, help="Start Date is needed")
parser.add_argument('end_date', type=str, required=True,
help="End Date is needed")
parser.add_argument('start_time', type=str,
required=True, help="End Time is needed")
parser.add_argument('end_time', type=str, required=True,
help="End Time is needed")
parser.add_argument('details', type=str, required=True,
help="Details is needed")
parser.add_argument('action', type=str, required=False,
help="Action is needed")
parser.add_argument('show_leaderboard', type=bool,
required=True, help="Show leaderboard is needed")
parser.add_argument('challenge_ids', type=list,
required=True, location='json', help="Challenges cannot be empty")
@classmethod
def post(self, contest_name):
# auth token
auth_token = request.headers.get("Authorization")
user_id = decode_auth_token(auth_token)
if user_id:
data = Contest.parser.parse_args()
# Add contest to database
created = add_contest(data, contest_name, user_id)
if created:
return {"comment": "contest created successfully"}, 200
else:
return {"comment": "error in contest creation"}, 501
else:
return {"comment": "JWT Expired or Invalid"}, 401
class ContestGet(Resource):
@classmethod
def get(self, contest_id_encoded):
"""
Contest details
"""
# check authentication header
# check user role
# print(contest_name)
# contests_details = ContestsModel.get_contests_challenges(contest_name)
# print(contests_details)
# return {"message": "data"}
auth_token = request.headers.get("Authorization")
user_id = decode_auth_token(auth_token)
if user_id:
print(contest_id_encoded)
print(user_id)
print('-----------------')
contest_id = caesar_decrypt_raw(contest_id_encoded)
is_signed_up = validate_signup(contest_id, user_id)
if is_signed_up:
return get_contests_challenges(contest_id, user_id)
else:
return {"comment": "please redirect user to the signup page",
"error": True,
"redirect": True,
"url": "/contest/%s"%(contest_id_encoded)}, 403
else:
return {"comment": "JWT Expired or Invalid", "error": True, "redirect": False}, 200
class Contests(Resource):
@classmethod
def get(self):
return get_contests()
class ContestEdit(Resource):
""""
Get contest details
Create contest
"""
parser = reqparse.RequestParser()
parser.add_argument('start_date', type=str,
required=True, help="Start Date is needed")
parser.add_argument('end_date', type=str, required=True,
help="End Date is needed")
parser.add_argument('start_time', type=str,
required=True, help="End Time is needed")
parser.add_argument('end_time', type=str, required=True,
help="End Time is needed")
parser.add_argument('details', type=str, required=True,
help="Details is needed")
parser.add_argument('show_leaderboard', type=bool,
required=True, help="Show leaderboard is needed")
parser.add_argument('contest_name', type=str, required=False,
help="contest Name is needed")
@classmethod
def post(self, contest_id):
auth_token = request.headers.get("Authorization")
user_id = decode_auth_token(auth_token)
if user_id:
data = ContestEdit.parser.parse_args()
updated = update_contest(data, contest_name, user_id)
if updated:
return {'status': 'ok',"comment": "contest updated successfully"}, 200
else:
return {'status': 'fail',"comment": "error in contest updation"}, 200 | StarcoderdataPython |
3297956 | <reponame>shigeyukioba/matchernet
from matchernet.ekf import *
from matchernet.fn import *
from matchernet.matchernet import *
from matchernet.matchernet_null import *
from matchernet.observer import *
from matchernet.state import *
from matchernet.state_space_model_2d import *
from matchernet.utils import *
from matchernet.control.mpcenv import *
from matchernet.control.control import *
from matchernet.control.mpc import *
from matchernet.control.multi import *
from matchernet.control.ilqg import *
from matchernet import *
from matchernet.misc.recording import *
__copyright__ = 'Copyright (C) 2019 Shigeyuki Oba'
__version__ = '0.1.0'
__license__ = 'Apache License 2.0'
__author__ = '<NAME>'
__author_email__ = '<EMAIL>'
__url__ = 'https://github.com/shigeyukioba/matchernet'
__all__ = [
"ekf",
"fn",
"matchernet",
"matchernet_null",
"observer",
"state",
"state_space_model_2d",
"utils"
"control"
]
| StarcoderdataPython |
9609618 | <filename>test/test_player.py
import pytest
from PyBall import PyBall
from PyBall.models import Person
from PyBall.exceptions import InvalidIdError, BadRequestError
@pytest.fixture(scope='module')
def test_player():
pyball = PyBall()
return pyball.get_player(446372)
def test_get_player_endpoint_returns_player(test_player):
assert isinstance(test_player, Person)
def test_bad_player_id():
pyball = PyBall()
with pytest.raises(InvalidIdError):
pyball.get_player(-1)
def test_player_id_not_a_num():
pyball = PyBall()
with pytest.raises(BadRequestError):
pyball.get_player("not_an_id")
| StarcoderdataPython |
8099804 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
CODE_TYPE_CHOICES = (
('code', _('Inline code')),
('pre', _('Code block')),
('var', _('Variables')),
('kbd', _('User input')),
('samp', _('Sample output')),
)
| StarcoderdataPython |
321541 | <reponame>greysondn/gamesolutions<gh_stars>0
from tqdm import tqdm
def main():
# some very basic config
minSeatCount = 0
maxSeatCount = 9
for i in range(minSeatCount, maxSeatCount + 1):
# build table
table = []
for j in range(i):
table.append(j)
#actual test process
for j in range(i - 1):
swp = buildAndTest(j, table, table, [], ([], [], len(table) + 1))
tqdm.write(f"{i}:{j} --> {swp[2]} : {swp[1]}")
def buildAndTest(startCorrect, table, remaining, current, worst):
retWorst = (worst[0].copy(), worst[1].copy(), worst[2])
# limit check
if (0 == len(remaining)):
# limit function
if (countMatches(table, current) == startCorrect):
curMax = getMaxArrangement(table, current)
if (curMax[2] < retWorst[2]):
retWorst = (curMax[0].copy(), curMax[1].copy(), curMax[2])
else:
# recursive function
# remaining into current
for i in tqdm(range(len(remaining)), leave=False):
curChop = current.copy()
remChop = remaining.copy()
curChop.append(remChop.pop(i))
# pump downwards
retWorst = buildAndTest(startCorrect, table, remChop, curChop, retWorst)
return retWorst
def rotBackwards(someList):
ret = []
for i in range(1, len(someList)):
ret.append(someList[i])
ret.append(someList[0])
return ret
def rotForwards(someList):
ret = []
ret.append(someList[-1])
for i in range(len(someList)-1):
ret.append(someList[i])
return ret
def getMaxArrangement(table, people):
ret = None
retArrangement = table.copy()
retPeople = people.copy()
retCount = countMatches(table, people)
curPeople = people.copy()
for i in range(len(table)):
curPeople = rotForwards(curPeople)
curCount = countMatches(table, curPeople)
if (curCount > retCount):
retCount = curCount
retPeople = curPeople.copy()
ret = (retArrangement, retPeople, retCount)
return ret
def countMatches(table, people):
# a running count of matches
ret = 0
for i in range(len(table)):
if (table[i] == people[i]):
ret += 1
return ret
if __name__ == "__main__":
main()
| StarcoderdataPython |
6639141 | <filename>python/pandaset/meta.py
#!/usr/bin/env python3
import json
import os.path
from abc import ABCMeta, abstractmethod
from typing import TypeVar, List, overload, Dict
T = TypeVar('T')
class Meta:
"""Meta class inherited by subclasses for more specific meta data types.
``Meta`` provides generic preparation and loading methods for PandaSet folder structures. Subclasses
for specific meta data types must implement certain methods, as well as can override existing ones for extension.
Args:
directory: Absolute or relative path where annotation files are stored
Attributes:
data: List of meta data objects. The type of list elements depends on the subclass specific meta data type.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def _filename(self) -> str:
...
@property
def data(self) -> List[T]:
"""Returns meta data array.
Subclasses can use any type inside array.
"""
return self._data
def __init__(self, directory: str) -> None:
self._directory: str = directory
self._data_structure: str = None
self._data: List[T] = None
self._load_data_structure()
@overload
def __getitem__(self, item: int) -> T:
...
@overload
def __getitem__(self, item: slice) -> List[T]:
...
def __getitem__(self, item):
return self._data[item]
def load(self) -> None:
"""Loads all meta data files from disk into memory.
All meta data files are loaded into memory in filename order.
"""
self._load_data()
def _load_data_structure(self) -> None:
meta_file = f'{self._directory}/{self._filename}'
if os.path.isfile(meta_file):
self._data_structure = meta_file
def _load_data(self) -> None:
self._data = []
with open(self._data_structure, 'r') as f:
file_data = json.load(f)
for entry in file_data:
self._data.append(entry)
class GPS(Meta):
"""GPS data for each timestamp in this sequence.
``GPS`` provides GPS data for each timestamp. GPS data can be retrieved by slicing an instanced ``GPS`` class. (see example)
Args:
directory: Absolute or relative path where annotation files are stored
Attributes:
data: List of meta data objects. The type of list elements depends on the subclass specific meta data type.
Examples:
Assuming an instance `s` of class ``Sequence``, you can get GPS data for the first 5 frames in the sequence as follows:
>>> s.load_gps()
>>> gps_data_0_5 = s.gps[:5]
>>> print(gps_data_0_5)
[{'lat': 37.776089291519924, 'long': -122.39931707791749, 'height': 2.950900131607181, 'xvel': 0.0014639192106827986, 'yvel': 0.15895995994754034}, ...]
"""
@property
def _filename(self) -> str:
return 'gps.json'
@property
def data(self) -> List[Dict[str, float]]:
"""Returns GPS data array.
For every timestamp in the sequence, the GPS data contains vehicle latitude, longitude, height and velocity.
Returns:
List of dictionaries. Each dictionary has `str` keys and return types as follows:
- `lat`: `float`
- Latitude in decimal degree format. Positive value corresponds to North, negative value to South.
- `long`: `float`
- Longitude in decimal degree format. Positive value indicates East, negative value to West.
- `height`: `float`
- Measured height in meters.
- `xvel`: `float`
- Velocity in m/s
- `yvel`: `float`
- Velocity in m/s
"""
return self._data
def __init__(self, directory: str) -> None:
Meta.__init__(self, directory)
@overload
def __getitem__(self, item: int) -> Dict[str, T]:
...
@overload
def __getitem__(self, item: slice) -> List[Dict[str, T]]:
...
def __getitem__(self, item):
return self._data[item]
class Timestamps(Meta):
@property
def _filename(self) -> str:
return 'timestamps.json'
@property
def data(self) -> List[float]:
"""Returns timestamp array.
For every frame in this sequence, this property stores the recorded timestamp.
Returns:
List of timestamps as `float`
"""
return self._data
def __init__(self, directory: str) -> None:
Meta.__init__(self, directory)
@overload
def __getitem__(self, item: int) -> float:
...
@overload
def __getitem__(self, item: slice) -> List[float]:
...
def __getitem__(self, item):
return self._data[item]
if __name__ == '__main__':
pass
| StarcoderdataPython |
3206486 | <gh_stars>1-10
#!/usr/bin/env python
import unittest
from six.moves import xrange
from netutils_linux_monitoring.softirqs import Softirqs
class SoftirqsTest(unittest.TestCase):
def test_file2data(self):
for cpu in ('dualcore', 'i7'):
for i in xrange(1, 6):
top = Softirqs()
top.parse_options(options={'random': True})
top.options.softirqs_file = 'tests/softirqs/{0}/softirqs{1}'.format(cpu, i)
self.assertTrue('NET_RX' in top.parse())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8176783 | <gh_stars>1-10
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import authenticated, HTTPError
from tornado.gen import coroutine
from future.utils import viewitems
from os.path import basename, getsize, join, isdir
from os import walk
from datetime import datetime
from .base_handlers import BaseHandler
from qiita_pet.handlers.api_proxy.util import check_access
from qiita_db.study import Study
from qiita_db.util import (filepath_id_to_rel_path, get_db_files_base_dir,
get_filepath_information, get_mountpoint,
filepath_id_to_object_id, get_data_types)
from qiita_db.meta_util import validate_filepath_access_by_user
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.prep_template import PrepTemplate
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_core.util import execute_as_transaction, get_release_info
class BaseHandlerDownload(BaseHandler):
def _check_permissions(self, sid):
# Check general access to study
study_info = check_access(sid, self.current_user.id)
if study_info:
raise HTTPError(405, reason="%s: %s, %s" % (
study_info['message'], self.current_user.email, sid))
return Study(sid)
def _generate_files(self, header_name, accessions, filename):
text = "sample_name\t%s\n%s" % (header_name, '\n'.join(
["%s\t%s" % (k, v) for k, v in viewitems(accessions)]))
self.set_header('Content-Description', 'text/csv')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Content-Disposition', 'attachment; '
'filename=%s' % filename)
self.write(text)
self.finish()
def _list_dir_files_nginx(self, dirpath):
"""Generates a nginx list of files in the given dirpath for nginx
Parameters
----------
dirpath : str
Path to the directory
Returns
-------
list of (str, str, str)
The path information needed by nginx for each file in the
directory
"""
basedir = get_db_files_base_dir()
basedir_len = len(basedir) + 1
to_download = []
for dp, _, fps in walk(dirpath):
for fn in fps:
fullpath = join(dp, fn)
spath = fullpath
if fullpath.startswith(basedir):
spath = fullpath[basedir_len:]
to_download.append((spath, spath, '-', str(getsize(fullpath))))
return to_download
def _list_artifact_files_nginx(self, artifact):
"""Generates a nginx list of files for the given artifact
Parameters
----------
artifact : qiita_db.artifact.Artifact
The artifact to retrieve the files
Returns
-------
list of (str, str, str)
The path information needed by nginx for each file in the artifact
"""
basedir = get_db_files_base_dir()
basedir_len = len(basedir) + 1
to_download = []
for i, x in enumerate(artifact.filepaths):
# ignore if tgz as they could create problems and the
# raw data is in the folder
if x['fp_type'] == 'tgz':
continue
if isdir(x['fp']):
# If we have a directory, we actually need to list all the
# files from the directory so NGINX can actually download all
# of them
to_download.extend(self._list_dir_files_nginx(x['fp']))
elif x['fp'].startswith(basedir):
spath = x['fp'][basedir_len:]
to_download.append(
(spath, spath, str(x['checksum']), str(x['fp_size'])))
else:
to_download.append(
(x['fp'], x['fp'], str(x['checksum']), str(x['fp_size'])))
for pt in artifact.prep_templates:
qmf = pt.qiime_map_fp
if qmf is not None:
sqmf = qmf
if qmf.startswith(basedir):
sqmf = qmf[basedir_len:]
fname = 'mapping_files/%s_mapping_file.txt' % artifact.id
to_download.append((sqmf, fname, '-', str(getsize(qmf))))
return to_download
def _write_nginx_file_list(self, to_download):
"""Writes out the nginx file list
Parameters
----------
to_download : list of (str, str, str, str)
The file list information
"""
all_files = '\n'.join(
["%s %s /protected/%s %s" % (fp_checksum, fp_size, fp, fp_name)
for fp, fp_name, fp_checksum, fp_size in to_download])
self.set_header('X-Archive-Files', 'zip')
self.write("%s\n" % all_files)
def _set_nginx_headers(self, fname):
"""Sets commong nginx headers
Parameters
----------
fname : str
Nginx's output filename
"""
self.set_header('Content-Description', 'File Transfer')
self.set_header('Expires', '0')
self.set_header('Cache-Control', 'no-cache')
self.set_header('Content-Disposition',
'attachment; filename=%s' % fname)
def _write_nginx_placeholder_file(self, fp):
"""Writes nginx placeholder file in case that nginx is not set up
Parameters
----------
fp : str
The path to be downloaded through nginx
"""
# If we don't have nginx, write a file that indicates this
self.write("This installation of Qiita was not equipped with "
"nginx, so it is incapable of serving files. The file "
"you attempted to download is located at %s" % fp)
class DownloadHandler(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, filepath_id):
fid = int(filepath_id)
if not validate_filepath_access_by_user(self.current_user, fid):
raise HTTPError(
403, "%s doesn't have access to "
"filepath_id: %s" % (self.current_user.email, str(fid)))
relpath = filepath_id_to_rel_path(fid)
fp_info = get_filepath_information(fid)
fname = basename(relpath)
if fp_info['filepath_type'] in ('directory', 'html_summary_dir'):
# This is a directory, we need to list all the files so NGINX
# can download all of them
to_download = self._list_dir_files_nginx(fp_info['fullpath'])
self._write_nginx_file_list(to_download)
fname = '%s.zip' % fname
else:
self._write_nginx_placeholder_file(relpath)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Transfer-Encoding', 'binary')
self.set_header('X-Accel-Redirect', '/protected/' + relpath)
aid = filepath_id_to_object_id(fid)
if aid is not None:
fname = '%d_%s' % (aid, fname)
self._set_nginx_headers(fname)
self.finish()
class DownloadStudyBIOMSHandler(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, study_id):
study_id = int(study_id)
study = self._check_permissions(study_id)
# loop over artifacts and retrieve those that we have access to
to_download = []
# The user has access to the study, but we don't know if the user
# can do whatever he wants to the study or just access the public
# data. (1) an admin has access to all the data; (2) if the study
# is not public, and the user has access, then it has full access
# to the data; (3) if the study is public and the user is not the owner
# or the study is shared with him, then the user doesn't have full
# access to the study data
full_access = (
(self.current_user.level == 'admin') |
(study.status != 'public') |
((self.current_user == study.owner) |
(self.current_user in study.shared_with)))
for a in study.artifacts(artifact_type='BIOM'):
if full_access or a.visibility == 'public':
to_download.extend(self._list_artifact_files_nginx(a))
self._write_nginx_file_list(to_download)
zip_fn = 'study_%d_%s.zip' % (
study_id, datetime.now().strftime('%m%d%y-%H%M%S'))
self._set_nginx_headers(zip_fn)
self.finish()
class DownloadRelease(BaseHandlerDownload):
@coroutine
def get(self, extras):
biom_metadata_release, archive_release = get_release_info()
if extras == 'archive':
relpath = archive_release[1]
else:
relpath = biom_metadata_release[1]
# If we don't have nginx, write a file that indicates this
# Note that this configuration will automatically create and download
# ("on the fly") the zip file via the contents in all_files
self._write_nginx_placeholder_file(relpath)
self._set_nginx_headers(basename(relpath))
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Transfer-Encoding', 'binary')
self.set_header('X-Accel-Redirect',
'/protected-working_dir/' + relpath)
self.finish()
class DownloadRawData(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, study_id):
study_id = int(study_id)
study = self._check_permissions(study_id)
user = self.current_user
# Checking access options
is_owner = study.has_access(user, True)
public_raw_download = study.public_raw_download
if not is_owner and not public_raw_download:
raise HTTPError(405, reason="%s: %s, %s" % (
'No raw data access', self.current_user.email, str(study_id)))
# loop over artifacts and retrieve raw data (no parents)
to_download = []
for a in study.artifacts():
if not a.parents:
if not is_owner and a.visibility != 'public':
continue
to_download.extend(self._list_artifact_files_nginx(a))
self._write_nginx_file_list(to_download)
zip_fn = 'study_raw_data_%d_%s.zip' % (
study_id, datetime.now().strftime('%m%d%y-%H%M%S'))
self._set_nginx_headers(zip_fn)
self.finish()
class DownloadEBISampleAccessions(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, study_id):
sid = int(study_id)
self._check_permissions(sid)
self._generate_files(
'sample_accession', SampleTemplate(sid).ebi_sample_accessions,
'ebi_sample_accessions_study_%s.tsv' % sid)
class DownloadEBIPrepAccessions(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, prep_template_id):
pid = int(prep_template_id)
pt = PrepTemplate(pid)
sid = pt.study_id
self._check_permissions(sid)
self._generate_files(
'experiment_accession', pt.ebi_experiment_accessions,
'ebi_experiment_accessions_study_%s_prep_%s.tsv' % (sid, pid))
class DownloadUpload(BaseHandlerDownload):
@authenticated
@coroutine
@execute_as_transaction
def get(self, path):
user = self.current_user
if user.level != 'admin':
raise HTTPError(403, reason="%s doesn't have access to download "
"uploaded files" % user.email)
# [0] because it returns a list
# [1] we only need the filepath
filepath = get_mountpoint("uploads")[0][1][
len(get_db_files_base_dir()):]
relpath = join(filepath, path)
self._write_nginx_placeholder_file(relpath)
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Transfer-Encoding', 'binary')
self.set_header('X-Accel-Redirect', '/protected/' + relpath)
self._set_nginx_headers(basename(relpath))
self.finish()
class DownloadPublicHandler(BaseHandlerDownload):
@coroutine
@execute_as_transaction
def get(self):
data = self.get_argument("data", None)
study_id = self.get_argument("study_id", None)
data_type = self.get_argument("data_type", None)
dtypes = get_data_types().keys()
if data is None or study_id is None or data not in ('raw', 'biom'):
raise HTTPError(422, reason='You need to specify both data (the '
'data type you want to download - raw/biom) and '
'study_id')
elif data_type is not None and data_type not in dtypes:
raise HTTPError(422, reason='Not a valid data_type. Valid types '
'are: %s' % ', '.join(dtypes))
else:
study_id = int(study_id)
try:
study = Study(study_id)
except QiitaDBUnknownIDError:
raise HTTPError(422, reason='Study does not exist')
else:
public_raw_download = study.public_raw_download
if study.status != 'public':
raise HTTPError(422, reason='Study is not public. If this '
'is a mistake contact: '
'<EMAIL>')
elif data == 'raw' and not public_raw_download:
raise HTTPError(422, reason='No raw data access. If this '
'is a mistake contact: '
'<EMAIL>')
else:
to_download = []
for a in study.artifacts(dtype=data_type,
artifact_type='BIOM'
if data == 'biom' else None):
if a.visibility != 'public':
continue
to_download.extend(self._list_artifact_files_nginx(a))
if not to_download:
raise HTTPError(422, reason='Nothing to download. If '
'this is a mistake contact: '
'<EMAIL>')
else:
self._write_nginx_file_list(to_download)
zip_fn = 'study_%d_%s_%s.zip' % (
study_id, data, datetime.now().strftime(
'%m%d%y-%H%M%S'))
self._set_nginx_headers(zip_fn)
self.finish()
| StarcoderdataPython |
6539893 | # ---------------------------------------------------------------------------------------------------------------------
# API Wars
# controller: server logic control
# v 1.0
# ---------------------------------------------------------------------------------------------------------------------
from modules import data_format as df, data_handler as dh, swapi, utilities as util
# ----------------------------------------------- main data controllers -----------------------------------------------
def data_get(subject: str, page_number: int) -> tuple:
""" Collects the data of the subject. """
def data_get_from_swapi(subject: str, page_number: int) -> dict:
""" Gets the subject data from the external api. """
row_data = swapi.get_data(f'{subject}/?page={page_number}')
# error handling
if 'detail' in row_data and row_data['detail'] == 'Not found':
raise ValueError(f'Page {page_number} with list of the {subject} not found.')
return row_data
# ------------- data_get() -------------
row_data = data_get_from_swapi(subject, page_number)
data = dh.data_prepare(row_data['results'], column_names_get_necessary(subject)) # 'results' - the subject data
data = df.data_format(subject, data)
util.pagination_number_set(subject, row_data['count']) # 'count' - the number of items in subject
return tuple(data)
def data_change_url_to_name(subject_data: tuple, column_name: str) -> tuple:
""" Changes names instead of urls in cell data (later displayed on the buttons). """
return tuple(dh.change_url_to_name(list(subject_data), column_name))
# ---------------------------------------------- button data controllers ----------------------------------------------
def button_data_get(subject: str, subject_data: tuple) -> list:
""" Returns the modal window data needed to handle client-side events. """
return dh.button_data_get(subject_data, dh.columns_with_button_get(subject))
# ---------------------------------------------- column names controllers ---------------------------------------------
def column_names_get(subject: str) -> tuple:
""" Returns column names. """
column_names = dh.column_names_get(subject)
column_names = df.column_names_format(column_names)
column_names = dh.column_names_prepare(column_names)
return column_names
def column_names_get_necessary(subject: str, modal_window: bool = False) -> tuple:
""" Returns the necessary header names used in column names. """
columns = dh.column_names_get(subject)
if modal_window:
columns = dh.column_names_for_modal_window(subject, columns)
return columns
| StarcoderdataPython |
191641 | <filename>knxpy/dpts/dpt1.py
#!/usr/bin/env python3
"""
Boolean data
1 bit
0,1
"""
def encode(value):
return [int(value) & 0x01]
def decode(data):
return (data & 0x01)
| StarcoderdataPython |
8006032 | """
Test Results for the VAR model. Obtained from Stata using
datasets/macrodata/var.do
"""
import numpy as np
class MacrodataResults(object):
params = [-0.2794863875, 0.0082427826, 0.6750534746,
0.2904420695, 0.0332267098, -0.0073250059,
0.0015269951, -0.1004938623, -0.1231841792,
0.2686635768, 0.2325045441, 0.0257430635,
0.0235035714, 0.0054596064, -1.97116e+00,
0.3809752365, 4.4143364022, 0.8001168377,
0.2255078864, -0.1241109271, -0.0239026118]
params = np.asarray(params).reshape(3, -1)
params = np.hstack((params[:, -1][:, None],
params[:, :-1:2],
params[:, 1::2]))
params = params
neqs = 3
nobs = 200
df_eq = 7
nobs_1 = 200
df_model_1 = 6
rmse_1 = .0075573716985351
rsquared_1 = .2739094844780006
llf_1 = 696.8213727557811
nobs_2 = 200
rmse_2 = .0065444260782597
rsquared_2 = .1423626064753714
llf_2 = 725.6033255319256
nobs_3 = 200
rmse_3 = .0395942039671031
rsquared_3 = .2955406949737428
llf_3 = 365.5895183036045
# These are from Stata. They use the LL based definition
# We return Lutkepohl statistics. See Stata TS manual page 436
# bic = -19.06939794312953 # Stata version; we use R version below
# aic = -19.41572126661708 # Stata version; we use R version below
# hqic = -19.27556951526737 # Stata version; we use R version below
# These are from R. See var.R in macrodata folder
bic = -2.758301611618373e+01
aic = -2.792933943967127e+01
hqic = -2.778918768832157e+01
fpe = 7.421287668357018e-13
detsig = 6.01498432283e-13
llf = 1962.572126661708
chi2_1 = 75.44775165699033
# TODO: don't know how they calculate chi2_1
# it's not -2 * (ll1 - ll0)
chi2_2 = 33.19878716815366
chi2_3 = 83.90568280242312
bse = [.1666662376, .1704584393, .1289691456,
.1433308696, .0257313781, .0253307796,
.0010992645, .1443272761, .1476111934,
.1116828804, .1241196435, .0222824956,
.021935591, .0009519255, .8731894193,
.8930573331, .6756886998, .7509319263,
.1348105496, .1327117543, .0057592114]
bse = np.asarray(bse).reshape(3, -1)
bse = np.hstack((bse[:, -1][:, None],
bse[:, :-1:2],
bse[:, 1::2]))
| StarcoderdataPython |
7343 | import csv
import os
from collections import deque
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_PATH = os.path.join(BASE_DIR, 'goods_source.csv')
OUTPUT_PATH = os.path.join(BASE_DIR, 'result.csv')
FILE_ENCODE = 'shift_jis'
INPUT_COLS = ('id', 'goods_name', 'price')
def import_csv():
"""入力データの読み込み
"""
try:
data_l = list()
with open(INPUT_PATH, mode='r', encoding=FILE_ENCODE, newline='') as csvf:
reader = csv.DictReader(csvf)
for dic in reader:
dic['id'] = int(dic['id'])
dic['price'] = int(dic['price'])
data_l.append(dic)
for col in INPUT_COLS:
if col not in data_l[0]:
raise IndexError(col)
return data_l
except FileNotFoundError:
print('goods_source.csvがありません')
return list()
except IndexError as e:
print('列が不足しています: ' + str(e))
return list()
def func(init, old_que, threshold=50):
keep = dict()
new_que = deque(list())
while old_que:
last = old_que.pop()
if init['mod'] + last['mod'] >= threshold:
if keep:
new_que.appendleft(keep)
keep = last
else:
new_que.appendleft(last)
break
return init, keep, old_que, new_que
def calculate(data_l):
"""アルゴリズム
1. 50未満の中でペアにできるものを探す
1-1. queの末端でペアを作れる場合、左端を固定し和が50以上で最小になるように右を選んでペアにする
1-2. queの末端でペアを作れない場合、末端2つを取り出した上で3個以上の組み合わせで消化する
1-2-1. 右末端で和が50以上なら右から左に探索して和が50以上になる最小値を得る->組にして除外
1-2-2. 右末端でも和が50にならないなら右末端をして1-2に戻る
-> 全部を消化しても50にならないならそのまま全部を足してしまう
2. 1と同じことを全体かつ閾値150で行う
"""
# 50未満のものだけ和を取る処理に入れる
under_que = list()
over_que = list()
for i in range(len(data_l)):
_mod = data_l[i]['price'] % 100
data_l[i]['set'] = 0
dic = {
'id': [i],
'mod': _mod,
}
if _mod < 50:
under_que.append(dic)
else:
over_que.append(dic)
under_que.sort(key=lambda x: x['mod'])
under_que = deque(under_que)
while under_que:
init = under_que.popleft()
while under_que:
init, keep, under_que, last_que = func(init, under_que)
# この時点でlast_queは要素1以上
if not keep:
keep = last_que.pop()
init = {
'id': init['id'] + keep['id'],
'mod': init['mod'] + keep['mod'],
}
if last_que:
over_que.append(init)
under_que.extend(last_que)
break
else:
over_que.append(init)
break
# 50以上の項目のうち、合計が150以上になる項目同士を足す
# (これにより購入回数を最小にする)
# final_que: 最終的な組み合わせ
over_que = deque(sorted(over_que, key=lambda x: x['mod']))
final_que = list()
while over_que:
init = over_que.popleft()
init, keep, over_que, last_que = func(init, over_que, 150)
if keep:
init = {
'id': init['id'] + keep['id'],
'mod': (init['mod'] + keep['mod']) % 100,
}
over_que.appendleft(init)
else:
final_que.append(init)
over_que.extend(last_que)
sum_p = 0
# 計算結果の出力
for cnt, que in enumerate(final_que):
point = 0
for id in que['id']:
data_l[id]['set'] = cnt + 1
point += data_l[id]['price']
print(f'set{cnt + 1} {round(point / 100)} P')
sum_p += round(point / 100)
print(f'total: {sum_p} P')
return data_l
def main():
# ファイルの読み込み
data_l = import_csv()
if not data_l:
print('処理を中止します')
return False
# 計算処理
data_l = calculate(data_l)
# 結果をファイルに出力
data_l.sort(key=lambda x: (x['set'], x['id']))
with open(OUTPUT_PATH, mode='w', encoding=FILE_ENCODE, newline='') as csvf:
writer = csv.DictWriter(csvf, data_l[0].keys())
writer.writeheader()
writer.writerows(data_l)
print('Done')
if __name__ == '__main__':
main()
| StarcoderdataPython |
1964486 | <reponame>agonzale34/sdc-advanced-lane-detection
import matplotlib.pyplot as plt
import numpy as np
import cv2
# helper to show 2 images next to each other
def visualize_result(original, result, gray=False, show=True):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(original)
ax1.set_title('Original Image', fontsize=30)
if gray:
ax2.imshow(result, cmap='gray')
else:
ax2.imshow(result)
ax2.set_title('Result Image', fontsize=30)
if show:
plt.show()
def visualize_result4(img1, img2, img3, img4):
f, (ax1, ax2) = plt.subplots(2, 2, figsize=(20, 10))
ax1[0].imshow(img1[0])
ax1[0].set_title(img1[1], fontsize=30)
ax2[0].imshow(img2[0])
ax2[0].set_title(img2[1], fontsize=30)
ax1[1].imshow(img3[0])
ax1[1].set_title(img3[1], fontsize=30)
ax2[1].imshow(img4[0])
ax2[1].set_title(img4[1], fontsize=30)
plt.show()
def show_lines_on_image_test(img_binary, left_lane, right_lane, sa_margin):
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((img_binary, img_binary, img_binary)) * 255
window_img = np.zeros_like(out_img)
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_lane.allx - sa_margin, left_lane.ally]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_lane.allx + sa_margin,
left_lane.ally])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_lane.allx - sa_margin, right_lane.ally]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_lane.allx + sa_margin,
right_lane.ally])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0, 255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0, 255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Plot the polynomial lines onto the image
plt.plot(left_lane.allx, left_lane.ally, color='yellow')
plt.plot(right_lane.allx, right_lane.ally, color='yellow')
plt.imshow(result)
| StarcoderdataPython |
3207702 | <filename>ex020.py
#O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
from random import shuffle
n1 = input('Digite o nome do primeiro aluno: ')
n2 = input('Digite o nome do segundo aluno: ')
n3 = input('Digite o nome do terceiro aluno: ')
n4 = input('Digite o nome do quarto aluno: ')
lista = [n1, n2, n3, n4]
shuffle(lista)
print('A ordem da aprensentação dos alunos é: ', lista)
| StarcoderdataPython |
1785146 | <reponame>damm89/django-camunda<gh_stars>1-10
"""
Public Python API to interact with Activiti.
"""
from typing import Any, Dict, Iterable, List, Optional
import requests
from .camunda_models import ProcessDefinition, Task, factory
from .client import get_client
from .types import CamundaId, ProcessVariables
from .utils import deserialize_variable, serialize_variable
def get_process_definitions() -> List[ProcessDefinition]:
client = get_client()
response = client.get("process-definition", {"sortBy": "name", "sortOrder": "asc"})
return factory(ProcessDefinition, response)
def get_start_form_variables(
process_key: Optional[str] = None, process_id: Optional[str] = None
) -> ProcessVariables:
"""
Get the start form variables from the Camunda process.
If defaults are configured in the Camunda process, these will be returned as value.
A process ID is more specific than a process key, so if both are provided, the
process ID will be used.
"""
if not (process_key or process_id):
raise ValueError("Provide a process key or process ID")
if process_id:
endpoint = f"process-definition/{process_id}/form-variables"
else:
endpoint = f"process-definition/key/{process_key}/form-variables"
# TODO: do any necessary type casting
client = get_client()
variables = client.get(endpoint, underscoreize=False)
return variables
def _get_variable(kind: str, id_ref: CamundaId, name: str) -> Any:
client = get_client()
path = f"{kind}/{id_ref}/variables/{name}"
response_data = client.get(
path, params={"deserializeValues": "false"}, underscoreize=False
)
return deserialize_variable(response_data)
def _get_variables(kind: str, id_ref: CamundaId) -> Dict[str, Any]:
client = get_client()
path = f"{kind}/{id_ref}/variables"
response_data = client.get(
path, params={"deserializeValues": "false"}, underscoreize=False
)
variables = {
name: deserialize_variable(variable) for name, variable in response_data.items()
}
return variables
def get_process_instance_variable(instance_id: CamundaId, name: str) -> Any:
return _get_variable("process-instance", instance_id, name)
def get_all_process_instance_variables(instance_id: CamundaId) -> Dict[str, Any]:
return _get_variables("process-instance", instance_id)
def get_task_variable(task_id: CamundaId, name: str, default=None) -> Any:
try:
return _get_variable("task", task_id, name)
except requests.HTTPError as exc:
if exc.response.status_code == 404: # variable not set
return default
raise
def get_task_variables(task_id: CamundaId) -> Dict[str, Any]:
return _get_variables("task", task_id)
def send_message(
name: str,
process_instance_ids: Iterable[CamundaId],
variables: Optional[Dict[str, Any]] = None,
) -> None:
"""
Send a BPMN message into running process instances, with optional process variables.
:param name: Name/ID of the message definition, extract this from the process.
:param process_instance_ids: an iterable of process instance IDs, can be uuid
instances or strings.
:param variables: Optional mapping of ``{name: value}`` process variables. Will be
serialized as part of the message sending.
"""
client = get_client()
variables = (
{name: serialize_variable(value) for name, value in variables.items()}
if variables
else None
)
for instance_id in process_instance_ids:
body = {
"messageName": name,
"processInstanceId": instance_id,
"processVariables": variables or {},
}
client.post("message", json=body)
def get_task(
task_id: CamundaId, check_history=False, factory_cls=Task
) -> Optional[Task]:
client = get_client()
try:
data = client.get(f"task/{task_id}")
except requests.HTTPError as exc:
if exc.response.status_code == 404:
if not check_history:
return None
# see if we can get it from the history
historical = client.get("history/task", {"taskId": task_id})
if not historical:
return None
assert (
len(historical) < 2
), f"Found multiple tasks in the history for ID {task_id}"
data = historical[0]
# these properties do not exist in the history API:
# https://docs.camunda.org/manual/7.11/reference/rest/history/task/get-task-query/
data.update(
{
"created": data["start_time"],
"delegation_state": None,
"suspended": False,
"form_key": None, # cannot determine this...
"historical": True,
}
)
else:
raise
return factory(factory_cls, data)
def complete_task(task_id: CamundaId, variables: dict) -> None:
client = get_client()
variables = {name: serialize_variable(value) for name, value in variables.items()}
client.post(f"task/{task_id}/complete", json={"variables": variables})
| StarcoderdataPython |
8128579 | <reponame>papperlapapp/PyScep
from enum import Enum, IntEnum
class MessageType(Enum):
"""The SCEP Message Type.
This is encoded as PrintableString so this enum also uses strings.
See Also:
- `SCEP RFC <https://datatracker.ietf.org/doc/draft-gutmann-scep/?include_text=1>`_ Section 3.2.1.2.
"""
CertRep = '3'
"""Response to certificate or CRL request"""
RenewalReq = '17'
"""PKCS #10 certificate request for renewal of
an existing certificate."""
UpdateReq = '18'
"""PKCS #10 certificate request for update of a
certificate issued by a different CA."""
PKCSReq = '19'
"""PKCS #10 certificate request."""
CertPoll = '20'
"""Certificate polling in manual enrolment."""
GetCert = '21'
"""Retrieve a certificate."""
GetCRL = '22'
"""Retrieve a CRL."""
class PKIStatus(Enum):
"""The SCEP PKI Status
Decimal value as printableString
See Also:
- SCEP RFC Section 3.2.1.3.
"""
SUCCESS = '0'
FAILURE = '2'
PENDING = '3'
class FailInfo(Enum):
"""SCEP Failure Information"""
BadAlg = '0'
"""Unrecognized or unsupported algorithm."""
BadMessageCheck = '1'
"""Integrity check (meaning signature
verification of the CMS message) failed."""
BadRequest = '2'
"""Transaction not permitted or supported."""
BadTime = '3'
"""The signingTime attribute from the CMS
authenticatedAttributes was not sufficiently close to the system
time (this failure code is present for legacy reasons and is
unlikely to be encountered in practice)."""
BadCertId = '4'
"""No certificate could be identified matching the
provided criteria."""
class CACaps(Enum):
"""CA Capabilities"""
AES = 'AES'
DES3 = 'DES3'
GetNextCACert = 'GetNextCACert'
POSTPKIOperation = 'POSTPKIOperation'
Renewal = 'Renewal'
SHA1 = 'SHA-1'
SHA256 = 'SHA-256'
SHA512 = 'SHA-512'
SCEPStandard = 'SCEPStandard'
Update = 'Update'
class RevocationReason(IntEnum):
"""RFC 5280 - Reasons for Certificate Revocation
See Also:
- `https://tools.ietf.org/html/rfc5280`_.
"""
unspecified = 0
key_compromise = 1
ca_compromise = 2
affiliation_changed = 3
superseded = 4
cessation_of_operation = 5
certificate_hold = 6
remove_from_crl = 8
privilege_withdrawn = 9
aa_compromise = 10
| StarcoderdataPython |
1805475 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the build_spotdata_cube function"""
import unittest
from datetime import datetime
import iris
import numpy as np
from iris.tests import IrisTest
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.synthetic_data.set_up_test_cubes import construct_scalar_time_coords
class Test_build_spotdata_cube(IrisTest):
"""Tests for the build_spotdata_cube function"""
def setUp(self):
"""Set up some auxiliary coordinate points for re-use"""
self.altitude = np.array([256.5, 359.1, 301.8, 406.2])
self.latitude = np.linspace(58.0, 59.5, 4)
self.longitude = np.linspace(-0.25, 0.5, 4)
self.wmo_id = ["03854", "03962", "03142", "03331"]
self.neighbour_methods = ["nearest", "nearest_land"]
self.grid_attributes = ["x_index", "y_index", "dz"]
def test_scalar(self):
"""Test output for a single site"""
result = build_spotdata_cube(
1.6, "air_temperature", "degC", 10.0, 59.5, 1.3, "03854"
)
# check result type
self.assertIsInstance(result, iris.cube.Cube)
# check data
self.assertArrayAlmostEqual(result.data, np.array([1.6]))
self.assertEqual(result.name(), "air_temperature")
self.assertEqual(result.units, "degC")
# check coordinate values and units
self.assertEqual(result.coord("spot_index").points[0], 0)
self.assertAlmostEqual(result.coord("altitude").points[0], 10.0)
self.assertEqual(result.coord("altitude").units, "m")
self.assertAlmostEqual(result.coord("latitude").points[0], 59.5)
self.assertEqual(result.coord("latitude").units, "degrees")
self.assertAlmostEqual(result.coord("longitude").points[0], 1.3)
self.assertEqual(result.coord("longitude").units, "degrees")
self.assertEqual(result.coord("wmo_id").points[0], "03854")
def test_site_list(self):
"""Test output for a list of sites"""
data = np.array([1.6, 1.3, 1.4, 1.1])
result = build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
)
self.assertArrayAlmostEqual(result.data, data)
self.assertArrayAlmostEqual(result.coord("altitude").points, self.altitude)
self.assertArrayAlmostEqual(result.coord("latitude").points, self.latitude)
self.assertArrayAlmostEqual(result.coord("longitude").points, self.longitude)
self.assertArrayEqual(result.coord("wmo_id").points, self.wmo_id)
def test_neighbour_method(self):
"""Test output where neighbour_methods is populated"""
data = np.array([[1.6, 1.3, 1.4, 1.1], [1.7, 1.5, 1.4, 1.3]])
result = build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
neighbour_methods=self.neighbour_methods,
)
self.assertArrayAlmostEqual(result.data, data)
self.assertEqual(result.coord_dims("neighbour_selection_method")[0], 0)
self.assertArrayEqual(
result.coord("neighbour_selection_method").points, np.arange(2)
)
self.assertArrayEqual(
result.coord("neighbour_selection_method_name").points,
self.neighbour_methods,
)
def test_grid_attributes(self):
"""Test output where grid_attributes is populated"""
data = np.array(
[[1.6, 1.3, 1.4, 1.1], [1.7, 1.5, 1.4, 1.3], [1.8, 1.5, 1.5, 1.4]]
)
result = build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
grid_attributes=self.grid_attributes,
)
self.assertArrayAlmostEqual(result.data, data)
self.assertEqual(result.coord_dims("grid_attributes")[0], 0)
self.assertArrayEqual(result.coord("grid_attributes").points, np.arange(3))
self.assertArrayEqual(
result.coord("grid_attributes_key").points, self.grid_attributes
)
def test_3d_spot_cube(self):
"""Test output with two extra dimensions"""
data = np.ones((2, 3, 4), dtype=np.float32)
result = build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
neighbour_methods=self.neighbour_methods,
grid_attributes=self.grid_attributes,
)
self.assertArrayAlmostEqual(result.data, data)
self.assertEqual(result.coord_dims("neighbour_selection_method")[0], 0)
self.assertEqual(result.coord_dims("grid_attributes")[0], 1)
def test_3d_spot_cube_with_unequal_length_coordinates(self):
"""Test error is raised if coordinates lengths do not match data
dimensions."""
data = np.ones((4, 2, 2), dtype=np.float32)
msg = "Unequal lengths"
with self.assertRaisesRegex(ValueError, msg):
build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
neighbour_methods=self.neighbour_methods,
grid_attributes=self.grid_attributes,
)
def test_scalar_coords(self):
"""Test additional scalar coordinates"""
[(time_coord, _), (frt_coord, _), (fp_coord, _)] = construct_scalar_time_coords(
datetime(2015, 11, 23, 4, 30), None, datetime(2015, 11, 22, 22, 30)
)
data = np.ones((2, 4), dtype=np.float32)
result = build_spotdata_cube(
data,
"air_temperature",
"degC",
self.altitude,
self.latitude,
self.longitude,
self.wmo_id,
scalar_coords=[time_coord, frt_coord, fp_coord],
neighbour_methods=self.neighbour_methods,
)
# pylint: disable=unsubscriptable-object
self.assertEqual(result.coord("time").points[0], time_coord.points[0])
self.assertEqual(
result.coord("forecast_reference_time").points[0], frt_coord.points[0]
)
self.assertEqual(result.coord("forecast_period").points[0], fp_coord.points[0])
def test_renaming_to_set_standard_name(self):
"""Test that CF standard names are set as such in the returned cube,
whilst non-standard names remain as the long_name."""
standard_name_cube = build_spotdata_cube(
1.6, "air_temperature", "degC", 10.0, 59.5, 1.3, "03854"
)
non_standard_name_cube = build_spotdata_cube(
1.6, "toast_temperature", "degC", 10.0, 59.5, 1.3, "03854"
)
self.assertEqual(standard_name_cube.standard_name, "air_temperature")
self.assertEqual(standard_name_cube.long_name, None)
self.assertEqual(non_standard_name_cube.standard_name, None)
self.assertEqual(non_standard_name_cube.long_name, "toast_temperature")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
6645690 | <filename>src/day6.py<gh_stars>0
import numpy as np
def find_closest(loc, coords): #-1 for ties
#make sure they're in the same coordinate system
closest_dist = np.inf
closest_pair_index = 0
hits = 0
dist = np.abs(loc[:,None] - coords).sum(-1)
# np.abs(coords-loc)
import pdb
pdb.set_trace()
for i, pair in enumerate(coords):
x2, y2 = pair[0], pair[1]
current_dist = abs(x2-x1)+abs(y2-y1)
if current_dist<closest_dist:
closest_dist = current_dist
closest_pair_index = i
hits = 1
elif current_dist==closest_dist:
hits += 1
if(hits>1): return -1
return closest_pair_index
def solvepart1():
coords = np.loadtxt(open('inputs/day6.txt'), delimiter=', ', dtype=np.int)
xmin, xmax, ymin, ymax = 1000,0,1000,0
for pair in coords:
if(pair[0]>xmax): xmax = pair[0]
if(pair[0]<xmin): xmin = pair[0]
if(pair[1]>ymax): ymax = pair[1]
if(pair[1]<ymin): ymin = pair[1]
shape = (xmax-xmin, ymax-ymin)
grid = np.zeros(shape, dtype=np.int)
ans = find_closest(np.where(grid==grid), coords)
return ans
for i, _ in np.ndenumerate(grid):
grid[i] = find_closest(i[0]+xmin,i[1]+ymin,coords)
bad = reduce(np.union1d,(grid[0], grid[xmax-xmin-1], grid[:,0], grid[:,ymax-ymin-1]))
print bad
valid = False
ansmax = 0
for i in range(len(coords)):
if i in bad:
continue
ans = (i==grid).sum()
if(ans>ansmax):
ansmax = ans
return ansmax
def check_friendly_region(x1, y1, coords):
limit = 10000
distance_sum = 0
for pair in coords:
x2, y2 = pair[0], pair[1]
current_dist = abs(x2-x1)+abs(y2-y1)
distance_sum += current_dist
if distance_sum>=limit:
return False
return True
def solvepart2():
coords = []
with open('inputs/day6.txt') as f:
for line in f:
pair = map(int, line.split(", "))
coords.append(pair)
xmin, xmax, ymin, ymax = 1000,0,1000,0
for pair in coords:
if(pair[0]>xmax): xmax = pair[0]
if(pair[0]<xmin): xmin = pair[0]
if(pair[1]>ymax): ymax = pair[1]
if(pair[1]<ymin): ymin = pair[1]
shape = (xmax-xmin, ymax-ymin)
grid = np.zeros(shape, dtype=np.int)
# vectorized_f = np.vectorize(find_closest) #TODO RESEARCH
# vectorized_f(grid)
for i, _ in np.ndenumerate(grid):
grid[i] = check_friendly_region(i[0]+xmin,i[1]+ymin,coords)
return grid.sum()
if __name__=='__main__':
print solvepart1()
#print solvepart2()
| StarcoderdataPython |
11305181 | <reponame>Truelite/modian<filename>modian-live-wrapper/lwr/utils.py
# live-wrapper - Wrapper for vmdebootstrap for creating live images
# (C) 2016 <NAME> <<EMAIL>>
# See COPYING for terms of usage, modification and redistribution.
#
# lwr/utils.py - data utilities for suites and url information
"""
General utility functions for URLs or handling differences between
suites, architectures or other variants.
"""
from six.moves.urllib.parse import urljoin
import requests
import os
import shutil
KERNEL = 'vmlinuz'
RAMDISK = 'initrd.gz'
CD_INFO = 'debian-cd_info.tar.gz'
class Fail(BaseException):
pass
def check_url(url):
"""
Check that constructed URLs actually give a HTTP 200.
"""
res = requests.head(url, allow_redirects=True, timeout=30)
if res.status_code != requests.codes.OK: # pylint: disable=no-member
# try using (the slower) get for services with broken redirect support
res = requests.get(
url, allow_redirects=True, stream=True, timeout=30)
if res.status_code != requests.codes.OK: # pylint: disable=no-member
raise Fail("Resources not available at '{}'".format(url))
def cdrom_image_url(mirror, suite, architecture, gtk=False, daily=False):
"""
Create checked URLs for the di helpers.
Returns a tuple of base_url, kernel, ramdisk, cd_info in that order.
"""
if not daily:
# urljoin refuses to use existing subdirs which start with /
if not mirror.endswith('/'):
mirror += '/'
dist_url = urljoin(mirror, 'dists/')
if not suite.endswith('/'):
suite += '/'
suite_url = urljoin(dist_url, suite)
if gtk:
path = 'main/installer-%s/current/images/cdrom/gtk/' % architecture
else:
path = 'main/installer-%s/current/images/cdrom/' % architecture
base_url = urljoin(suite_url, path)
else:
base_url = "https://d-i.debian.org/daily-images/%s/daily/cdrom/" % architecture
if gtk:
base_url += "gtk/"
kernel = urljoin(base_url, KERNEL)
ramdisk = urljoin(base_url, RAMDISK)
cd_info = urljoin(base_url, CD_INFO)
check_url(base_url)
check_url(kernel)
check_url(ramdisk)
return (base_url, kernel, ramdisk, cd_info)
def copytree(source, target):
if not os.path.exists(target):
os.makedirs(target)
shutil.copystat(source, target)
entries = os.listdir(source)
for entry in entries:
src = os.path.join(source, entry)
tgt = os.path.join(target, entry)
if os.path.isdir(src):
copytree(src, tgt)
else:
shutil.copy2(src, tgt)
def copy_files(src, dest):
for filename in os.listdir(src):
src_path = os.path.join(src, filename)
if os.path.isdir(src_path) or os.path.islink(src_path):
continue
shutil.copyfile(
src_path,
os.path.join(dest, filename))
| StarcoderdataPython |
1920372 | import os
import sys
import traceback
def executeNode(node):
"""
TODO: Imprement https://github.com/pyqtgraph/pyqtgraph/blob/229f650adfd04053213fe6567d6308a4751a349b/pyqtgraph/flowchart/Flowchart.py#L248
Args:
Node (Node):
"""
print ("Executing...")
try :
_root = []
_executeSequence = [node]
_upstream = getPreviousNodes(node)
while _upstream:
_n = _upstream.pop(0)
previousNodes = getPreviousNodes(_n)
if not previousNodes:
if not _n in _root:
_root.append(_n)
else:
_upstream += previousNodes
if not _n in _executeSequence:
_executeSequence.insert(0, _n)
_executeSequence = _root + _executeSequence
print ('Execute order :',[n.NODE_NAME for n in _executeSequence])
for node in _executeSequence:
if not hasattr(node, 'execute'):
continue
node.execute()
return True
except Exception as e:
print (traceback.format_exc(e))
def getInputValue(node):
try:
return node.input(0).connected_ports()[0].node().get_property(node.input(0).connected_ports()[0].name())
# return getattr(node.input(0).connected_ports()[0].node(), node.input(0).connected_ports()[0].name())
except IndexError:
print ("Index Error")
return None
def getPreviousNodes(node):
__allPorts = node.inputs() #return port's name
_previousNode = []
for portName in __allPorts.keys():
destPort = __allPorts[portName].connected_ports()
if not destPort:
continue
_previousNode.append(destPort[0].node())
return _previousNode
| StarcoderdataPython |
3468675 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 20 09:29:20 2018
@author: Tilly
"""
import time
import requests
request_id = 677078
def check_status(request_id):
"""
uses the request ID to return the state of the request
Example
input:
request_id = 677078
output:
'COMPLETE', 'PENDING', 'CANCELED' or 'EXPIRED'
"""
request_info = requests.get('https://observe.lco.global/api/userrequests/{}/'.format(request_id)).json()
return request_info.get('state')
while True:
status = check_status(request_id)
if status == 'COMPLETE':
#RUN CODE TO DOWNLOAD THOSE FILES
break
elif status == 'PENDING':
continue #to check every 6 hours
else:
print('REQUEST NO LONGER ACTIVE')
| StarcoderdataPython |
4851930 | from panini import app as panini_app
from panini.middleware.prometheus_monitoring import PrometheusMonitoringMiddleware
from app.utils import Environment
Environment.load("test")
BROKER_HOST, BROKER_PORT = Environment.get_broker()
CONFIG_PATH = Environment.get_config_path()
some_config = Environment.get_config("some-config.yml", path=CONFIG_PATH)
app = panini_app.App(
service_name="template-app",
host=BROKER_HOST,
port=BROKER_PORT,
)
log = app.logger
message = {
"key1": "value1",
"key2": [1, 2, 3, 4],
}
@app.task()
async def publish():
for _ in range(10):
await app.publish(subject="some.request.subject", message=message)
log.info(f"send message {message}")
@app.task(interval=2)
async def request_periodically():
for _ in range(10):
response = await app.request(subject="some.request.subject", message=message)
log.info(f"get response from periodic request {response}")
@app.listen("some.request.subject")
async def receive_messages(msg):
log.info(f"got message {msg.subject}:{msg.data}")
return {"success": True, "data": msg.data}
if __name__ == "__main__":
if Environment.get_prometheus_pushgateway_url():
app.add_middleware(
PrometheusMonitoringMiddleware,
pushgateway_url=Environment.get_prometheus_pushgateway_url(),
)
app.start()
| StarcoderdataPython |
71787 | <reponame>zaltoprofen/chainer<gh_stars>1000+
import unittest
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16,
'forward_options': {'rtol': 3e-3, 'atol': 3e-3},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float32,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
{'dtype': numpy.float64,
'forward_options': {},
'backward_options': {'rtol': 1e-1, 'atol': 1e-1}},
],
[{'reduce': 'no'},
{'reduce': 'mean'},
],
[{'norm': 'L1'},
{'norm': 'L2'},
],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64},
],
))
class TestHinge(unittest.TestCase):
def setUp(self):
self._config_user = chainer.using_config('dtype', self.dtype)
self._config_user.__enter__()
shape = (10, 5)
self.x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
# Avoid values around -1.0 for stability
self.x[numpy.logical_and(-1.01 < self.x, self.x < -0.99)] = 0.5
self.t = numpy.random.randint(
0, shape[1], shape[:1]).astype(self.label_dtype)
if self.reduce == 'no':
self.gy = numpy.random.uniform(
-1, 1, self.x.shape).astype(self.dtype)
def tearDown(self):
self._config_user.__exit__(None, None, None)
def check_forward(self, x_data, t_data):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data, requires_grad=False)
loss = functions.hinge(x_val, t_val, self.norm, self.reduce)
if self.reduce == 'mean':
self.assertEqual(loss.data.shape, ())
else:
self.assertEqual(loss.data.shape, self.x.shape)
self.assertEqual(loss.data.dtype, self.dtype)
loss_value = cuda.to_cpu(loss.data)
# Compute expected value
for i in six.moves.range(self.x.shape[0]):
self.x[i, self.t[i]] *= -1
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
self.x[i, j] = max(0, 1.0 + self.x[i, j])
if self.norm == 'L1':
loss_expect = self.x
elif self.norm == 'L2':
loss_expect = self.x ** 2
if self.reduce == 'mean':
loss_expect = numpy.sum(loss_expect) / self.x.shape[0]
testing.assert_allclose(
loss_expect, loss_value, **self.forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_forward_chainerx_native(self):
self.check_forward(
backend.to_chx(self.x), backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_forward_chainerx_cuda(self):
self.check_forward(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
def check_backward(self, x_data, t_data):
def f(x, t):
return functions.hinge(x, t, self.norm)
gradient_check.check_backward(
f, (x_data, t_data), None, dtype='d', **self.backward_options)
def check_backward_chainerx(self, x_data, t_data):
# TODO(niboshi): gradient_check does not support integer input
# (no_grads) for ChainerX. Support it and merge this method with
# `self.check_backward`.
def f(x):
return functions.hinge(x, t_data, self.norm)
gradient_check.check_backward(
f, (x_data,), None, dtype='d', **self.backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.chainerx
def test_backward_chainerx_native(self):
self.check_backward_chainerx(
backend.to_chx(self.x),
backend.to_chx(self.t))
@attr.gpu
@attr.chainerx
def test_backward_chainerx_cuda(self):
self.check_backward_chainerx(
backend.to_chx(cuda.to_gpu(self.x)),
backend.to_chx(cuda.to_gpu(self.t)))
class TestHingeInvalidOption(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 5, (10,)).astype(numpy.int32)
def check_invalid_norm_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(NotImplementedError):
functions.hinge(x, t, 'invalid_norm', 'mean')
def test_invalid_norm_option_cpu(self):
self.check_invalid_norm_option(numpy)
@attr.gpu
def test_invalid_norm_option_gpu(self):
self.check_invalid_norm_option(cuda.cupy)
def check_invalid_reduce_option(self, xp):
x = xp.asarray(self.x)
t = xp.asarray(self.t)
with self.assertRaises(ValueError):
functions.hinge(x, t, 'L1', 'invalid_option')
def test_invalid_reduce_option_cpu(self):
self.check_invalid_reduce_option(numpy)
@attr.gpu
def test_invalid_reduce_option_gpu(self):
self.check_invalid_reduce_option(cuda.cupy)
testing.run_module(__name__, __file__)
| StarcoderdataPython |
3391048 | <reponame>jackyin68/jobsearch<gh_stars>1-10
from django.db import models
# Create your models here.
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, Completion, Keyword, Text
from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["localhost"])
class NlpCustomAnalyzer(_CustomAnalyzer):
def get_analysis_definition(self):
return {}
ik_analyzer = NlpCustomAnalyzer("ik_max_word", filter=["lowercase"])
class NlpJobType(DocType):
suggest = Completion(analyzer=ik_analyzer)
title = Text(analyzer="ik_max_word")
company = Text(analyzer="ik_max_word")
location = Text(analyzer="ik_max_word")
job_description = Text(analyzer="ik_max_word")
url = Text()
class Meta:
index = "nlpjob"
doc_type = "jobinfo"
if __name__ == "__main__":
NlpJobType.init()
| StarcoderdataPython |
1945856 | # uvozimo ustrezne podatke za povezavo
import auth as auth
# uvozimo psycopg2
import psycopg2, psycopg2.extensions, psycopg2.extras
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # se znebimo problemov s šumniki
import csv
def ustvari_tabelo():
cur.execute("""
CREATE TABLE obisk (
st_izleta INTEGER PRIMARY KEY,
st_dni NUMERIC NOT NULL,
id_mesta NUMERIC REFERENCES mesto(id),
id_namestitve INTEGER REFERENCES namestitev(id_namestitve),
id_transporta INTEGER REFERENCES transport(id_transporta)
);
""")
conn.commit()
def pobrisi_tabelo():
cur.execute("""
DROP TABLE obisk;
""")
conn.commit()
def uvozi_podatke():
with open("podatki/obisk.csv", encoding="utf-8", errors='ignore') as f:
rd = csv.reader(f)
next(rd) # izpusti naslovno vrstico
for r in rd:
cur.execute("""
INSERT INTO obisk
(st_izleta,st_dni,id_mesta,id_namestitve,id_transporta)
VALUES (%s, %s, %s,%s, %s)
""", r)
# rid, = cur.fetchone()
print("Uvožen izlet z ID-jem %s" % (r[0]))
conn.commit()
conn = psycopg2.connect(database=auth.db, host=auth.host, user=auth.user, password=auth.password)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
#pobrisi_tabelo()
#ustvari_tabelo()
uvozi_podatke() | StarcoderdataPython |
11353156 | <filename>pi/emo_reco/helpers/nn/mxconv/mxgooglenet.py
# import the necessary packages
import mxnet as mx
class MxGoogLeNet:
@staticmethod
def conv_module(data, K, kX, kY, pad=(0, 0), stride=(1, 1)):
# define the CONV => BN => RELU pattern
conv = mx.sym.Convolution(data=data, kernel=(kX, kY),
num_filter=K, pad=pad, stride=stride)
bn = mx.sym.BatchNorm(data=conv)
act = mx.sym.Activation(data=bn, act_type="relu")
# return the block
return act
@staticmethod
def inception_module(data, num1x1, num3x3Reduce, num3x3,
num5x5Reduce, num5x5, num1x1Proj):
# the first branch of the Inception module consists of 1x1
# convolutions
conv_1x1 = MxGoogLeNet.conv_module(data, num1x1, 1, 1)
# the second branch of the Inception module is a set of 1x1
# convolutions followed by 3x3 convolutions
conv_r3x3 = MxGoogLeNet.conv_module(data, num3x3Reduce, 1, 1)
conv_3x3 = MxGoogLeNet.conv_module(conv_r3x3, num3x3, 3, 3,
pad=(1, 1))
# the third branch of the Inception module is a set of 1x1
# convolutions followed by 5x5 convolutions
conv_r5x5 = MxGoogLeNet.conv_module(data, num5x5Reduce, 1, 1)
conv_5x5 = MxGoogLeNet.conv_module(conv_r5x5, num5x5, 5, 5,
pad=(2, 2))
# the final branch of the Inception module is the POOL +
# projection layer set
pool = mx.sym.Pooling(data=data, pool_type="max", pad=(1, 1),
kernel=(3, 3), stride=(1, 1))
conv_proj = MxGoogLeNet.conv_module(pool, num1x1Proj, 1, 1)
# concatenate the filters across the channel dimension
concat = mx.sym.Concat(*[conv_1x1, conv_3x3, conv_5x5,
conv_proj])
# return the block
return concat
@staticmethod
def build(classes):
# data input
data = mx.sym.Variable("data")
# Block #1: CONV => POOL => CONV => CONV => POOL
conv1_1 = MxGoogLeNet.conv_module(data, 64, 7, 7,
pad=(3, 3), stride=(2, 2))
pool1 = mx.sym.Pooling(data=conv1_1, pool_type="max",
pad=(1, 1), kernel=(3, 3), stride=(2, 2))
conv1_2 = MxGoogLeNet.conv_module(pool1, 64, 1, 1)
conv1_3 = MxGoogLeNet.conv_module(conv1_2, 192, 3, 3,
pad=(1, 1))
pool2 = mx.sym.Pooling(data=conv1_3, pool_type="max",
pad=(1, 1), kernel=(3, 3), stride=(2, 2))
# Block #3: (INCEP * 2) => POOL
in3a = MxGoogLeNet.inception_module(pool2, 64, 96, 128, 16,
32, 32)
in3b = MxGoogLeNet.inception_module(in3a, 128, 128, 192, 32,
96, 64)
pool3 = mx.sym.Pooling(data=in3b, pool_type="max",
pad=(1, 1), kernel=(3, 3), stride=(2, 2))
# Block #4: (INCEP * 5) => POOL
in4a = MxGoogLeNet.inception_module(pool3, 192, 96, 208, 16,
48, 64)
in4b = MxGoogLeNet.inception_module(in4a, 160, 112, 224, 24,
64, 64)
in4c = MxGoogLeNet.inception_module(in4b, 128, 128, 256, 24,
64, 64)
in4d = MxGoogLeNet.inception_module(in4c, 112, 144, 288, 32,
64, 64)
in4e = MxGoogLeNet.inception_module(in4d, 256, 160, 320, 32,
128, 128,)
pool4 = mx.sym.Pooling(data=in4e, pool_type="max",
pad=(1, 1), kernel=(3, 3), stride=(2, 2))
# Block #5: (INCEP * 2) => POOL => DROPOUT
in5a = MxGoogLeNet.inception_module(pool4, 256, 160, 320, 32,
128, 128)
in5b = MxGoogLeNet.inception_module(in5a, 384, 192, 384, 48,
128, 128)
pool5 = mx.sym.Pooling(data=in5b, pool_type="avg",
kernel=(7, 7), stride=(1, 1))
do = mx.sym.Dropout(data=pool5, p=0.4)
# softmax classifier
flatten = mx.sym.Flatten(data=do)
fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=classes)
model = mx.sym.SoftmaxOutput(data=fc1, name="softmax")
# return the network architecture
return model
if __name__ == "__main__":
# render a visualization of the network
model = MxGoogLeNet.build(1000)
v = mx.viz.plot_network(model, shape={"data": (1, 3, 224, 224)},
node_attrs={"shape": "rect", "fixedsize": "false"})
v.render() | StarcoderdataPython |
4935977 | # -*- coding: utf-8 -*-
"""Module scanning for compression support
"""
# import basic stuff
# import own stuff
import tlsmate.msg as msg
import tlsmate.plugin as plg
import tlsmate.tls as tls
# import other stuff
class ScanCompression(plg.Worker):
name = "compression"
descr = "scan for compression support"
prio = 30
def _compression(self, version):
self.server_profile.allocate_features()
features = self.server_profile.features
if not hasattr(features, "compression"):
features.compression = []
values = self.server_profile.get_profile_values([version])
self.client.init_profile(profile_values=values)
comp_methods = tls.CompressionMethod.all()
while comp_methods:
self.client.profile.compression_methods = comp_methods
server_hello = None
with self.client.create_connection() as conn:
conn.send(msg.ClientHello)
server_hello = conn.wait(msg.ServerHello)
if server_hello is None:
break
if server_hello.version is not version:
break
if server_hello.compression_method not in comp_methods:
break
comp_methods.remove(server_hello.compression_method)
if server_hello.compression_method not in features.compression:
features.compression.append(server_hello.compression_method)
def run(self):
for version in self.server_profile.get_versions(exclude=[tls.Version.SSL20]):
self._compression(version)
| StarcoderdataPython |
1842395 | from database import SQL
import conll
def reorder(trees, outfile): # 重新排序conll树,可重命名所有sent_id
"""
Reorders the trees based on the nr sentencefeature, adds updated text and sentence_id.
Once this is done, the trees are written to a new file.
input: List(Tree), Str
does: Writes <outfile>
output: None
"""
prefix = "_".join(trees[0].sentencefeatures.get("sent_id").split("_")[:-1])
sortable = sorted(list([(int(t.sentencefeatures.get("nr")), t) for t in trees]))
new_trees = list()
for nr, tree in sortable:
# adding metadatas 应该是重命名sent_id,从0开始
tree.sentencefeatures["text"] = tree.sentence()
tree.sentencefeatures["sent_id"] = prefix+"_"+str(nr-1)
# removing useless metadata
del tree.sentencefeatures["nr"]
new_trees.append(tree)
conll.trees2conllFile(new_trees, outfile)
if __name__ == "__main__":
## Open project database
sql = SQL("NaijaSUD") # 输入project名字
db,cursor=sql.open()
## Use 2 functions :
# - exportLastBestAnnotations in lib/database.py -> writes a file with trees and their rank
# - reorder in lib/yuchen.py -> reorder trees based on their rank, write a file with the output
users, c = sql.exportLastBestAnnotations(115,"P_ABJ_GWA_06_Ugo-lifestory_PRO") # 输入textid和text name,可通过链接https://arborator.ilpga.fr/editor.cgi?project=NaijaSUD&textid=74&opensentence=1看到textid
print(users, c)
fpath = "E:/TAL/Stage/arborator/projects/NaijaSUD/export/P_ABJ_GWA_06_Ugo.lifestory_PRO.most.recent.trees.with.feats.conllu" # 输入导出的文件所在路径
trees = conll.conllFile2trees(fpath) # 重新排序conll树,重命名sent_id
reorder(trees, fpath+"_reordered") | StarcoderdataPython |
1767992 | <gh_stars>0
import numpy as np
from scipy.integrate import solve_bvp
class boundary_layer():
def __init__(self, stream, plate, Pr):
self.T_w = 0
self.T_e = 0
self.deltaT = self.T_w - self.T_e
# Prandtl
self.Pr = Pr
# Nusselt
self.Nusselt = []
self.stream = stream
self.plate = plate
self.solve()
def solve(self):
def similitude_ode(x, y, parameters):
# y = [f, f', f'', theta, theta']
# 0 1 2 3 4
Pr = parameters[0]
return np.array([
# flow edo
y[1], # f' = df/dn
y[2], # f'' = d^2f/dn^2
- 3 * y[0] * y[2] + (2 * (y[1]**2)) - y[3], # f''' = - 3ff'' + 2(f')^2 - theta,
# heat edo
y[4], # theta' = dtheta/dn
- 3 * Pr * y[0] * y[4], # theta'' = - 3 Pr f theta'
])
def boundary_conditions(ya, yb, parameters):
# ya = y(eta = 0)
# yb = y(eta -> infty)
# f(0) = 0 ; f'(0) = 0 ; theta(0) = 1
# f'(infty) = 0 ; theta(infty) = 0
return np.array([ya[0], ya[1], ya[3] - 1, yb[1], yb[3]])
self.plate.generate_discrete_space()
self.result = solve_bvp(fun=similitude_ode, bc=boundary_conditions, x=self.plate.x_mesh, y=self.plate.u, p=[self.Pr])
| StarcoderdataPython |
35962 | a = input()
b= input()
print(ord(a) + ord(b))
| StarcoderdataPython |
235410 | <gh_stars>1-10
import datetime
import time
import numpy as np
import random
import ray
import os
from filelock import FileLock
####################################################################################################
####################################################################################################
####################################################################################################
@ray.remote
class SummaryWriter():
def __init__(self, configuration):
self.storage_path = configuration['storage_path']
self.directory_path = None
self.file_key = None
self.per_node = configuration['summary']['per_node']
self.precision = configuration['summary']['precision']
self.initialize_directory()
def initialize_directory(self):
"""
Create a new directory to store the data of the current experiment.
"""
self.directory_path = os.path.join(self.storage_path, self.timestamp())
os.makedirs(self.directory_path, exist_ok=True)
def remove_locks(self):
"""
Removes the lock files since they are no longer required.
"""
for file in os.listdir(self.directory_path):
if file.endswith('.lock'):
file_path = os.path.join(self.directory_path, file)
os.remove(file_path)
def timestamp(self, fmt='%d\\%m\\%y_%H:%M:%S'):
"""
Returns current timestamp.
"""
return datetime.datetime.fromtimestamp(time.time()).strftime(fmt)
def write_summary(self, stamp, observers):
# Iterate through all observers.
for observer in observers:
observer_summary = observer.file_summary(per_node=self.per_node, precision=self.precision)
# Iterate through all observations made by the observer.
for observation in observer_summary:
# Use observation name to get file.
file_path = os.path.join(self.directory_path, observation[0] + '_' + stamp)
lock_path = file_path + '.lock'
with FileLock(lock_path):
with open(file_path, 'a') as file:
# Write observation data to the last line.
file.write(observation[1])
####################################################################################################
####################################################################################################
#################################################################################################### | StarcoderdataPython |
4863869 | from Configurables import DaVinci
DaVinci().DataType = '2012'
DaVinci().Simulation = True
DaVinci().TupleFile = 'mc.root'
# DaVinci().HistogramFile = 'mc-histo.root'
from Configurables import LHCbApp
LHCbApp().CondDBtag = "sim-20130503-1-vc-md100"
LHCbApp().DDDBtag = "dddb-20130503-1"
from GaudiConf import IOHelper
IOHelper().inputFiles([
'./data/Bd2DstTauNu-2012-md-py6-sim08a/00028778_00000009_1.dsttaunu.safestriptrig.dst',
'./data/Bd2DstTauNu-2012-md-py6-sim08a/00028778_00000010_1.dsttaunu.safestriptrig.dst',
], clear=True)
| StarcoderdataPython |
1602074 | import flask
from flask import request, jsonify
app = flask.Flask(__name__)
app.config['DEBUG'] = True
fruits = [
{
'name' : 'watermelon',
'price' : 12
},
{
'name' : 'banana',
'price' : 20
},
{
'name' : 'orange',
'price' : 100
}
]
@app.route("/", methods=['GET'])
def home():
return "<h1>Watermelon Heart</h1><p>Do you like watermelons?</p>"
@app.route("/watermelon", methods=['GET'])
def hello_watermelon():
return "<h1>Hello Watermelon</h1>"
@app.route("/api/v1/resources/fruits/all", methods=['GET'])
def display_fruits():
return jsonify(fruits)
app.run() | StarcoderdataPython |
1684546 | <filename>tests/test_5_adding_functions_to_the_mix.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
from nose.tools import assert_equals, assert_raises_regexp, \
assert_raises, assert_true, assert_is_instance
from diylang.ast import is_list
from diylang.evaluator import evaluate
from diylang.parser import parse
from diylang.types import Closure, DiyLangError, Environment
"""
This part is all about defining and using functions.
We'll start by implementing the `lambda` form which is used to create function
closures.
"""
def test_lambda_evaluates_to_closure():
"""TEST 5.1: The lambda form should evaluate to a Closure
Tip: You'll find the Closure class ready in types.py, just finish the
constructor.
"""
ast = ["lambda", [], 42]
closure = evaluate(ast, Environment())
assert_is_instance(closure, Closure)
def test_lambda_closure_keeps_defining_env():
"""TEST 5.2: The closure should keep a copy of the environment where it was
defined.
Once we start calling functions later, we'll need access to the environment
from when the function was created in order to resolve all free variables.
"""
env = Environment({"foo": 1, "bar": 2})
ast = ["lambda", [], 42]
closure = evaluate(ast, env)
assert_equals(closure.env, env)
def test_lambda_closure_holds_function():
"""TEST 5.3: The closure contains the parameter list and function body
too."""
closure = evaluate(parse("(lambda (x y) (+ x y))"), Environment())
assert_equals(["x", "y"], closure.params)
assert_equals(["+", "x", "y"], closure.body)
def test_lambda_arguments_are_lists():
"""TEST 5.4: The parameters of a `lambda` should be a list."""
closure = evaluate(parse("(lambda (x y) (+ x y))"), Environment())
assert_true(is_list(closure.params))
with assert_raises(DiyLangError):
evaluate(parse("(lambda not-a-list (body of fn))"), Environment())
def test_lambda_number_of_arguments():
"""TEST 5.5: The `lambda` form should expect exactly two arguments."""
with assert_raises_regexp(DiyLangError, "number of arguments"):
evaluate(parse("(lambda (foo) (bar) (baz))"), Environment())
def test_defining_lambda_with_error_in_body():
"""TEST 5.6: The function body should not be evaluated when the lambda is
defined.
The call to `lambda` should return a function closure holding, among other
things the function body. The body should not be evaluated before the
function is called.
"""
ast = parse("""
(lambda (x y)
(function body ((that) would never) work))
""")
assert_is_instance(evaluate(ast, Environment()), Closure)
"""
Now that we have the `lambda` form implemented, let's see if we can call some
functions.
When evaluating ASTs which are lists, if the first element isn't one of the
special forms we have been working with so far, it is a function call. The
first element of the list is the function, and the rest of the elements are
arguments.
"""
def test_evaluating_call_to_closure():
"""TEST 5.7: The first case we'll handle is when the AST is a list with an
actual closure as the first element.
In this first test, we'll start with a closure with no arguments and no
free variables. All we need to do is to evaluate and return the function
body.
"""
closure = evaluate(parse("(lambda () (+ 1 2))"), Environment())
ast = [closure]
result = evaluate(ast, Environment())
assert_equals(3, result)
def test_evaluating_call_to_closure_with_arguments():
"""TEST 5.8: The function body must be evaluated in an environment where
the parameters are bound.
Create an environment where the function parameters (which are stored in
the closure) are bound to the actual argument values in the function call.
Use this environment when evaluating the function body.
Tip: The `zip` and `dict` functions should prove useful when constructing
the new environment.
"""
env = Environment()
closure = evaluate(parse("(lambda (a b) (+ a b))"), env)
ast = [closure, 4, 5]
assert_equals(9, evaluate(ast, env))
def test_creating_closure_with_environment():
"""TEST 5.9: The function parameters must properly shadow the outer scope's
bindings.
When the same bindings exist in the environment and function parameters,
the function parameters must properly overwrite the environment bindings.
"""
env = Environment({"a": 42, "b": "foo"})
closure = evaluate(parse("(lambda (a b) (+ a b))"), env)
ast = [closure, 4, 5]
assert_equals(9, evaluate(ast, env))
def test_call_to_function_should_evaluate_arguments():
"""TEST 5.10: Call to function should evaluate all arguments.
When a function is applied, the arguments should be evaluated before being
bound to the parameter names.
"""
env = Environment()
closure = evaluate(parse("(lambda (a) (+ a 5))"), env)
ast = [closure, parse("(if #f 0 (+ 10 10))")]
assert_equals(25, evaluate(ast, env))
def test_evaluating_call_to_closure_with_free_variables():
"""TEST 5.11: The body should be evaluated in the environment from the closure.
The function's free variables, i.e. those not specified as part of the
parameter list, should be looked up in the environment from where the
function was defined. This is the environment included in the closure. Make
sure this environment is used when evaluating the body.
"""
closure = evaluate(parse("(lambda (x) (+ x y))"), Environment({"y": 1}))
ast = [closure, 0]
result = evaluate(ast, Environment({"y": 2}))
assert_equals(1, result)
"""
Okay, now we're able to evaluate ASTs with closures as the first element. But
normally the closures don't just happen to be there all by themselves.
Generally we'll find some expression, evaluate it to a closure, and then
evaluate a new AST with the closure just like we did above.
(some-exp arg1 arg2 ...) -> (closure arg1 arg2 ...) -> result-of-function-call
"""
def test_calling_very_simple_function_in_environment():
"""TEST 5.12: A call to a symbol corresponds to a call to its value in the
environment.
When a symbol is the first element of the AST list, it is resolved to its
value in the environment (which should be a function closure). An AST with
the variables replaced with its value should then be evaluated instead.
"""
env = Environment()
evaluate(parse("(define add (lambda (x y) (+ x y)))"), env)
assert_is_instance(env.lookup("add"), Closure)
result = evaluate(parse("(add 1 2)"), env)
assert_equals(3, result)
def test_calling_lambda_directly():
"""TEST 5.13: It should be possible to define and call functions directly.
A lambda definition in the call position of an AST should be evaluated, and
then evaluated as before.
"""
ast = parse("((lambda (x) x) 42)")
result = evaluate(ast, Environment())
assert_equals(42, result)
def test_calling_complex_expression_which_evaluates_to_function():
"""TEST 5.14: Actually, all ASTs that are lists beginning with anything except
atoms, or with a symbol, should be evaluated and then called.
In this test, a call is done to the if-expression. The `if` should be
evaluated, which will result in a `lambda` expression. The lambda is
evaluated, giving a closure. The result is an AST with a `closure` as the
first element, which we already know how to evaluate.
"""
ast = parse("""
((if #f
wont-evaluate-this-branch
(lambda (x) (+ x y)))
2)
""")
env = Environment({'y': 3})
assert_equals(5, evaluate(ast, env))
"""
Now that we have the happy cases working, let's see what should happen when
function calls are done incorrectly.
"""
def test_calling_atom_raises_exception():
"""TEST 5.15: A function call to a non-function should result in an
error."""
with assert_raises_regexp(DiyLangError, "not a function"):
evaluate(parse("(#t 'foo 'bar)"), Environment())
with assert_raises_regexp(DiyLangError, "not a function"):
evaluate(parse("(42)"), Environment())
def test_make_sure_arguments_to_functions_are_evaluated():
"""TEST 5.16: The arguments passed to functions should be evaluated
We should accept parameters that are produced through function
calls. If you are seeing stack overflows, e.g.
RuntimeError: maximum recursion depth exceeded while calling a Python
object
then you should double-check that you are properly evaluating the passed
function arguments.
"""
env = Environment()
res = evaluate(parse("((lambda (x) x) (+ 1 2))"), env)
assert_equals(res, 3)
def test_calling_with_wrong_number_of_arguments():
"""TEST 5.17: Functions should raise exceptions when called with wrong
number of arguments."""
env = Environment()
evaluate(parse("(define fn (lambda (p1 p2) 'whatever))"), env)
error_msg = "wrong number of arguments, expected 2 got 3"
with assert_raises_regexp(DiyLangError, error_msg):
evaluate(parse("(fn 1 2 3)"), env)
def test_calling_nothing():
"""TEST 5.18: Calling nothing should fail (remember to quote empty data
lists)"""
with assert_raises(DiyLangError):
evaluate(parse("()"), Environment())
def test_make_sure_arguments_are_evaluated_in_correct_environment():
"""Test 5.19: Function arguments should be evaluated in correct environment
Function arguments should be evaluated in the environment where the
function is called, and not in the environment captured by the function.
"""
env = Environment({'x': 3})
evaluate(parse("(define foo (lambda (x) x))"), env)
env = env.extend({'x': 4})
assert_equals(evaluate(parse("(foo (+ x 1))"), env), 5)
"""
One final test to see that recursive functions are working as expected.
The good news: this should already be working by now :)
"""
def test_calling_function_recursively():
"""TEST 5.20: Tests that a named function is included in the environment where
it is evaluated.
"""
env = Environment()
evaluate(parse("""
(define my-fn
;; A meaningless, but recursive, function
(lambda (x)
(if (eq x 0)
42
(my-fn (- x 1)))))
"""), env)
assert_equals(42, evaluate(parse("(my-fn 0)"), env))
assert_equals(42, evaluate(parse("(my-fn 10)"), env))
| StarcoderdataPython |
1751376 | <reponame>JFF-Bohdan/tamaku<filename>tests/test_solver_implementation.py
from solver.solver_impl import find_best_step, game_result_to_string, play_game_bool
def test_solver_implementation_by_fixture():
with open("./tests/valid_output_fixture.txt", "rt") as file:
for line in file:
if line is None:
break
str(line).strip()
if len(line) == 0:
continue
task = [str(item).strip() for item in line.split() if len(str(item).strip()) > 0]
assert len(task) == 2
assert str(task[0]).isnumeric()
value = int(task[0])
assert game_result_to_string(play_game_bool(value)) == task[1]
def test_solver_low_level_func():
assert find_best_step(17) == 8
| StarcoderdataPython |
3491986 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import codecs
from collections import namedtuple
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives import unchanged_required
from docutils import nodes
from powerline.lib.unicode import u
AUTHOR_LINE_START = '* `'
GLYPHS_AUTHOR_LINE_START = '* The glyphs in the font patcher are created by '
def get_authors():
credits_file = os.path.join(os.path.dirname(__file__), 'license-and-credits.rst')
authors = []
glyphs_author = None
with codecs.open(credits_file, encoding='utf-8') as CF:
section = None
prev_line = None
for line in CF:
line = line[:-1]
if line and not line.replace('-', ''):
section = prev_line
elif section == 'Authors':
if line.startswith(AUTHOR_LINE_START):
authors.append(line[len(AUTHOR_LINE_START):line.index('<')].strip())
elif section == 'Contributors':
if line.startswith(GLYPHS_AUTHOR_LINE_START):
assert(not glyphs_author)
glyphs_author = line[len(GLYPHS_AUTHOR_LINE_START):line.index(',')].strip()
prev_line = line
return {
'authors': ', '.join(authors),
'glyphs_author': glyphs_author,
}
class AutoManSubparsers(object):
def __init__(self):
self.parsers = []
def add_parser(self, command, *args, **kwargs):
self.parsers.append((command, AutoManParser(*args, **kwargs)))
return self.parsers[-1][1]
Argument = namedtuple('Argument', ('names', 'help', 'choices', 'metavar', 'required', 'nargs', 'is_option', 'is_long_option', 'is_short_option', 'multi', 'can_be_joined'))
def parse_argument(*args, **kwargs):
is_option = args[0].startswith('-')
is_long_option = args[0].startswith('--')
is_short_option = is_option and not is_long_option
action = kwargs.get('action', 'store_true')
multi = kwargs.get('action') in ('append',)
nargs = kwargs.get('nargs') or (1 if kwargs.get('metavar') or action in ('append',) else 0)
return Argument(
names=args,
help=u(kwargs.get('help', '')),
choices=[str(choice) for choice in kwargs.get('choices', [])],
metavar=kwargs.get('metavar') or args[-1].lstrip('-').replace('-', '_').upper(),
required=kwargs.get('required', False) if is_option else (
kwargs.get('nargs') not in ('?',)),
nargs=nargs,
multi=multi,
is_option=is_option,
is_long_option=is_long_option,
is_short_option=is_short_option,
can_be_joined=(is_short_option and not multi and not nargs)
)
class AutoManGroup(object):
is_short_option = False
is_option = False
is_long_option = False
can_be_joined = False
def __init__(self):
self.arguments = []
self.required = False
def add_argument(self, *args, **kwargs):
self.arguments.append(parse_argument(*args, **kwargs))
class SurroundWith():
def __init__(self, ret, condition, start='[', end=']'):
self.ret = ret
self.condition = condition
self.start = start
self.end = end
def __enter__(self, *args):
if self.condition:
self.ret.append(nodes.Text(self.start))
def __exit__(self, *args):
if self.condition:
self.ret.append(nodes.Text(self.end))
def insert_separators(ret, sep):
for i in range(len(ret) - 1, 0, -1):
ret.insert(i, nodes.Text(sep))
return ret
def format_usage_arguments(arguments, base_length=None):
line = []
prev_argument = None
arg_indexes = [0]
arguments = arguments[:]
while arguments:
argument = arguments.pop(0)
if isinstance(argument, nodes.Text):
line += [argument]
continue
can_join_arguments = (
argument.is_short_option
and prev_argument
and prev_argument.can_be_joined
and prev_argument.required == argument.required
)
if (
prev_argument
and not prev_argument.required
and prev_argument.can_be_joined
and not can_join_arguments
):
line.append(nodes.Text(']'))
arg_indexes.append(len(line))
if isinstance(argument, AutoManGroup):
arguments = (
[nodes.Text(' (')]
+ insert_separators(argument.arguments[:], nodes.Text(' |'))
+ [nodes.Text(' )')]
+ arguments
)
else:
if not can_join_arguments:
line.append(nodes.Text(' '))
with SurroundWith(line, not argument.required and not argument.can_be_joined):
if argument.can_be_joined and not can_join_arguments and not argument.required:
line.append(nodes.Text('['))
if argument.is_option:
line.append(nodes.strong())
name = argument.names[0]
if can_join_arguments:
name = name[1:]
# `--` is automatically transformed into – (EN DASH)
# when parsing into HTML. We do not need this.
line[-1] += [nodes.Text(char) for char in name]
if argument.nargs:
assert(argument.nargs in (1, '?'))
with SurroundWith(line, argument.nargs == '?' and argument.is_option):
if argument.is_long_option:
line.append(nodes.Text('='))
line.append(nodes.emphasis(text=argument.metavar))
elif not argument.is_option:
line.append(nodes.strong(text=argument.metavar))
if argument.multi:
line.append(nodes.Text('…'))
prev_argument = argument
if (
prev_argument
and prev_argument.can_be_joined
and not prev_argument.required
):
line.append(nodes.Text(']'))
arg_indexes.append(len(line))
ret = []
if base_length is None:
ret = line
else:
length = base_length
prev_arg_idx = arg_indexes.pop(0)
while arg_indexes:
next_arg_idx = arg_indexes.pop(0)
arg_length = sum((len(element.astext()) for element in line[prev_arg_idx:next_arg_idx]))
if length + arg_length > 68:
ret.append(nodes.Text('\n' + (' ' * base_length)))
length = base_length
ret += line[prev_arg_idx:next_arg_idx]
length += arg_length
prev_arg_idx = next_arg_idx
return ret
LITERAL_RE = re.compile(r"`(.*?)'")
def parse_argparse_text(text):
rst_text = LITERAL_RE.subn(r'``\1``', text)[0]
ret = []
for i, text in enumerate(rst_text.split('``')):
if i % 2 == 0:
ret.append(nodes.Text(text))
else:
ret.append(nodes.literal(text=text))
return ret
def flatten_groups(arguments):
for argument in arguments:
if isinstance(argument, AutoManGroup):
for group_argument in flatten_groups(argument.arguments):
yield group_argument
else:
yield argument
def format_arguments(arguments):
return [nodes.definition_list(
'', *[
nodes.definition_list_item(
'',
nodes.term(
# node.Text('') is required because otherwise for some
# reason first name node is seen in HTML output as
# `<strong>abc</strong>`.
'', *([nodes.Text('')] + (
insert_separators([
nodes.strong('', '', *[nodes.Text(ch) for ch in name])
for name in argument.names
], ', ')
if argument.is_option else
# Unless node.Text('') is here metavar is written in
# bold in the man page.
[nodes.Text(''), nodes.emphasis(text=argument.metavar)]
) + (
[] if not argument.is_option or not argument.nargs else
[nodes.Text(' '), nodes.emphasis('', argument.metavar)]
))
),
nodes.definition('', nodes.paragraph('', *parse_argparse_text(argument.help or ''))),
)
for argument in flatten_groups(arguments)
] + [
nodes.definition_list_item(
'',
nodes.term(
'', nodes.Text(''),
nodes.strong(text='-h'),
nodes.Text(', '),
nodes.strong('', '', nodes.Text('-'), nodes.Text('-help')),
),
nodes.definition('', nodes.paragraph('', nodes.Text('Display help and exit.')))
)
]
)]
def format_subcommand_usage(arguments, subcommands, progname, base_length):
return reduce((lambda a, b: a + reduce((lambda c, d: c + d), b, [])), [
[
[progname]
+ format_usage_arguments(arguments)
+ [nodes.Text(' '), nodes.strong(text=subcmd)]
+ format_usage_arguments(subparser.arguments)
+ [nodes.Text('\n')]
for subcmd, subparser in subparsers.parsers
]
for subparsers in subcommands
], [])
def format_subcommands(subcommands):
return reduce((lambda a, b: a + reduce((lambda c, d: c + d), b, [])), [
[
[
nodes.section(
'',
nodes.title(text='Arguments specific to ' + subcmd + ' subcommand'),
*format_arguments(subparser.arguments),
ids=['subcmd-' + subcmd]
)
]
for subcmd, subparser in subparsers.parsers
]
for subparsers in subcommands
], [])
class AutoManParser(object):
def __init__(self, description=None, help=None):
self.description = description
self.help = help
self.arguments = []
self.subcommands = []
def add_argument(self, *args, **kwargs):
self.arguments.append(parse_argument(*args, **kwargs))
def add_subparsers(self):
self.subcommands.append(AutoManSubparsers())
return self.subcommands[-1]
def add_mutually_exclusive_group(self):
self.arguments.append(AutoManGroup())
return self.arguments[-1]
def automan_usage(self, prog):
block = nodes.literal_block()
progname = nodes.strong()
progname += [nodes.Text(prog)]
base_length = len(prog)
if self.subcommands:
block += format_subcommand_usage(self.arguments, self.subcommands, progname, base_length)
else:
block += [progname]
block += format_usage_arguments(self.arguments, base_length)
return [block]
def automan_description(self):
ret = []
if self.help:
ret += parse_argparse_text(self.help)
ret += format_arguments(self.arguments) + format_subcommands(self.subcommands)
return ret
class AutoMan(Directive):
required_arguments = 1
optional_arguments = 0
option_spec = dict(prog=unchanged_required)
has_content = False
def run(self):
module = self.arguments[0]
template_args = {}
template_args.update(get_authors())
get_argparser = __import__(str(module), fromlist=[str('get_argparser')]).get_argparser
parser = get_argparser(AutoManParser)
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
ids=['synopsis-section'],
)
synopsis_section += parser.automan_usage(self.options['prog'])
description_section = nodes.section(
'', nodes.title(text='Description'),
ids=['description-section'],
)
description_section += parser.automan_description()
author_section = nodes.section(
'', nodes.title(text='Author'),
nodes.paragraph(
'',
nodes.Text('Written by {authors} and contributors. The glyphs in the font patcher are created by {glyphs_author}.'.format(
**get_authors()
))
),
ids=['author-section']
)
issues_url = 'https://github.com/Lokaltog/powerline/issues'
reporting_bugs_section = nodes.section(
'', nodes.title(text='Reporting bugs'),
nodes.paragraph(
'',
nodes.Text('Report {prog} bugs to '.format(
prog=self.options['prog'])),
nodes.reference(
issues_url, issues_url,
refuri=issues_url,
internal=False,
),
nodes.Text('.'),
),
ids=['reporting-bugs-section']
)
return [synopsis_section, description_section, author_section, reporting_bugs_section]
def setup(app):
app.add_directive('automan', AutoMan)
| StarcoderdataPython |
6469467 | #! /usr/bin/python
import json
import altair
import pandas
import datetime
import bs4
import os
import csv
import statistics
import locale
#define constants
#TODO Clean up to removal duplicate calls for yesterday
workingDir = os.getcwd()
yesterdayDate = datetime.date.today() - datetime.timedelta(1)
yesterday = yesterdayDate.strftime('%Y-%m-%d')
yesterdayDay = yesterdayDate.day
yesterdayDayName = yesterdayDate.strftime("%A")
yesterdayWeekNum = int(yesterdayDate.strftime("%W"))
yesterdayDayOfWeek = yesterdayDate.weekday()
yesterdayMonth = yesterdayDate.month
yesterdayMonthName = yesterdayDate.strftime("%B")
yesterdayYear= yesterdayDate.year
yesterdayYearName = yesterdayDate.strftime("%Y")
locale.setlocale(locale.LC_ALL, 'en_CA')
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n//10%10!=1)*(n%10<4)*n%10::4])
#read in counters list
counterList = pandas.read_csv('counters.csv',parse_dates=['FirstDate','FirstFullYear'])
#load data
countFile = "counts-" + str(counterList['CounterID'][0]) + "-export.csv"
dailyCount = pandas.read_csv(countFile, parse_dates=['Date']).set_index('Date')
dailyCount.loc[(dailyCount['WeekNum']==yesterdayWeekNum) & (dailyCount['Weekday']==yesterdayDayOfWeek)]
#Mean of weekly cumulative sum to this day of prior weeks
statistics.mean(dailyCount['WeeklyCumSum'].loc[(dailyCount['WeekNum']==yesterdayWeekNum) & (dailyCount['Weekday']==yesterdayDayOfWeek)])
# monthlyCount = dailyCount[['Date','Count']].resample('M',on='Date').sum()
# monthlyCount['Month'] = monthlyCount.index
# monthlyMerge = pandas.merge(monthlyCount,monthlyCount,left_on=[monthlyCount['Month'].dt.year,monthlyCount['Month'].dt.month],right_on=[monthlyCount['Month'].dt.year-1,monthlyCount['Month'].dt.month])
# monthlyMerge['YoYChange']=monthlyMerge['Count_y']-monthlyMerge['Count_x']
# monthlyCount = pandas.merge(monthlyCount,monthlyMerge[['Month_y','YoYChange']],left_on=monthlyCount['Month'],right_on=monthlyMerge['Month_y'])
# monthlyCount = monthlyCount.drop(columns=['Month_y'])
# #drop the current month, as it is partial
# monthlyCount = monthlyCount[(monthlyCount['Month'].dt.month<yesterdayMonth) | (monthlyCount['Month'].dt.year<yesterdayYear)]
# #Year over year by Month
# testVisual = altair.Chart(monthlyCount).mark_bar().encode(
# altair.X('yearmonth(Month):T', axis=altair.Axis(title='Months')),
# altair.Y('YoYChange:Q', axis=altair.Axis(title='Year over Year Change')),
# color=altair.condition(
# altair.datum.YoYChange > 0,
# altair.value("green"), # The positive color
# altair.value("darkgrey") # The negative color
# )
# ).properties(width=200,height=200)
#Count of days, by year and binned to 500
# testVisual = altair.Chart(dailyCount).mark_circle().encode(
# altair.X('Temp:Q', bin=altair.Bin(step=2)), #
# altair.Y('Date:O', timeUnit='year'),
# altair.Size('count(Count):O')#,
# #altair.Color('mean(Count):Q')
# ).properties(width=200,height=200)
#Temp by count by year
# testVisual = altair.Chart(dailyCount).mark_circle().encode(
# altair.X('Temp:Q', bin=altair.Bin(step=5)),
# altair.Y('Date:O', timeUnit='year'),
# altair.Color('mean(Count):Q'),
# altair.Size('count(Count):Q'),
# altair.Tooltip('mean(Count)',format=',.0f')
# ).properties(width=200,height=200)
##Count of days by binned by 1000s
# testVisual = altair.Chart(dailyCount).mark_bar().encode(
# altair.X('Date:O', timeUnit='year', axis=altair.Axis(title=None,domainWidth=0,labelAngle=0,tickWidth=0)),
# altair.Y('count(Count):Q',sort='descending', axis=altair.Axis(title='Days by Total Bikes',domainWidth=0)),
# altair.Color('Count:Q', bin=True)
# #altair.Size('count(Count):Q'),
# #altair.Tooltip('mean(Count)',format=',.0f')
# ).transform_filter(
# altair.FieldLTPredicate(field='DayOfYear',lt=datetime.datetime.today().timetuple().tm_yday)
# ).properties(width=200,height=200
# ).configure_axis(
# grid=False
# ).configure_view(
# strokeWidth=0
# ).configure_legend(
# labelBaseline='top'
# )
testVisual.save('testVisual.json') | StarcoderdataPython |
1835948 | <reponame>k-for-code/serverless-transformers-on-aws-lambda
import warnings
from functools import lru_cache
warnings.filterwarnings("ignore")
from tqdm import tqdm
from transformers import (AutoConfig, AutoModelForSequenceClassification,
AutoTokenizer, pipeline)
from src import config, utils
logger = utils.create_logger(project_name=config.PREDICTION_TYPE, level="INFO")
class Classifier:
def __init__(self):
_ = self.get_sentiment_pipeline(model_name=config.DEFAULT_MODEL_NAME, tokenizer_name=config.DEFAULT_TOKENIZER_NAME) #warm up
@staticmethod
@lru_cache(maxsize=config.CACHE_MAXSIZE)
def get_sentiment_pipeline(model_name: str, tokenizer_name: str) -> pipeline:
"""Sentiment pipeline for the given model and tokenizer
Args:
model_name (str): Indicating the name of the model
tokenizer_name (str): Indicating the name of the tokenizer
Returns:
pipeline: sentiment pipeline
"""
logger.info(f"Loading model: {model_name}")
id2label = config.ID_SENTIMENT_MAPPING[model_name]
label2id = {label: idx for idx, label in id2label.items()}
model_config = AutoConfig.from_pretrained(model_name)
model_config.label2id = label2id
model_config.id2label = id2label
model = AutoModelForSequenceClassification.from_pretrained(
model_name, config=model_config
)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
classification_pipeline = pipeline(
"sentiment-analysis", model=model, tokenizer=tokenizer
)
return classification_pipeline
def get_clean_text(self, text: str) -> str:
"""Clean the text
Args:
text (str): text
Returns:
str: clean text
"""
return text.strip().lower()
def __call__(self, request: dict)-> dict:
"""Predict the sentiment of the given texts
Args:
request (dict): request containing the list of text to predict the sentiment
Returns:
dict: classes of the given text
"""
texts = [self.get_clean_text(text) for text in request["texts"]]
model_name = request["model_name"]
tokenizer_name = request["tokenizer_name"]
logger.info(f"Predicting sentiment for {len(texts)} texts")
classification_pipeline = self.get_sentiment_pipeline(model_name, tokenizer_name)
predictions = classification_pipeline(texts)
for i, pred in enumerate(predictions):
predictions[i]["score"] = round(pred["score"], 2)
return {
"predictions": predictions
}
| StarcoderdataPython |
9709917 | <gh_stars>1-10
import supervisor
supervisor.set_next_stack_limit(4096 + 4096)
#### My boot.py : disable midi, repl and mass storage unless button pressed
from usb_midi import disable as mididisable
from usb_cdc import disable as consoledisable
from digitalio import DigitalInOut, Direction, Pull
from storage import disable_usb_drive as massstoragedisable
from board import GP28, LED
from time import sleep
mididisable()
# button gp28 at boot will allow access to repl and files
accessButton = DigitalInOut(GP28); accessButton.direction = Direction.INPUT; accessButton.pull = Pull.UP
# Disable devices by default (button not pressed)
if accessButton.value:
massstoragedisable()
consoledisable()
else:
onlight=DigitalInOut(LED)
onlight.direction=Direction.OUTPUT
blinkcount=3
while blinkcount:
onlight.value=1
sleep(.1)
onlight.value=0
sleep(.1)
blinkcount-=1 | StarcoderdataPython |
3464836 | <gh_stars>1-10
# @author <NAME>
# This code is licensed under the MIT license (see LICENSE.txt for details).
"""
Utility classes to provide a graphical interface to paperfetcher handsearch and snowballsearch classes.
"""
import ipywidgets
from IPython.display import display
import os
from paperfetcher import handsearch, parsers, snowballsearch
class CrossrefHandSearchDOIWidget:
def __init__(self, default_save_location="./"):
self.name = ipywidgets.Text(
value='untitled',
disabled=False
)
self.issn_list = ipywidgets.Textarea(
placeholder='Comma-separated ISSNs',
disabled=False
)
self.keyword_list = ipywidgets.Textarea(
placeholder='Comma-separated keywords',
disabled=False
)
self.from_date = ipywidgets.DatePicker(
disabled=False
)
self.until_date = ipywidgets.DatePicker(
disabled=False
)
self.save_location = ipywidgets.Text(
value=default_save_location,
disabled=False
)
self.output_format = ipywidgets.ToggleButtons(
options=[('Plain text (.txt)', 'txt'),
('CSV (.csv)', 'csv'),
('Excel (.xlsx)', 'excel')],
disabled=False,
button_style='',
)
self.search_button = ipywidgets.Button(
description='Search',
disabled=False,
button_style='',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
def execute_search(self, b):
# Disable click while search is being performed
self.search_button._click_handlers.callbacks = []
ISSNs = list(self.issn_list.value.strip().split(","))
if self.keyword_list.value is not None and self.keyword_list.value != "":
keywords = list(self.keyword_list.value.strip().split(","))
else:
keywords = None
fromd = self.from_date.value
untild = self.until_date.value
for issn in ISSNs:
print("Searching ISSN: %s" % issn)
search = handsearch.CrossrefSearch(ISSN=issn,
keyword_list=keywords,
from_date=fromd,
until_date=untild)
search(select=True, select_fields=["DOI"])
doi_ds = search.get_DOIDataset()
# Check if save location exists, if not, create it
if not os.path.exists(self.save_location.value):
os.makedirs(self.save_location.value)
if self.output_format.value == 'txt':
doi_ds.save_txt(self.save_location.value + "/{}_{}.txt".format(self.name.value, issn))
elif self.output_format.value == 'csv':
doi_ds.save_csv(self.save_location.value + "/{}_{}.csv".format(self.name.value, issn))
elif self.output_format.value == 'excel':
doi_ds.save_excel(self.save_location.value + "/{}_{}.xlsx".format(self.name.value, issn))
else:
raise ValueError("Undefined output format.")
# Enable click again
self.search_button.on_click(self.execute_search)
def __call__(self):
items = [ipywidgets.Label('Name of search (A-Za-z0-9):'), self.name,
ipywidgets.Label('ISSNs:'), self.issn_list,
ipywidgets.Label('Search keywords:'), self.keyword_list,
ipywidgets.Label('Fetch from this date onwards:'), self.from_date,
ipywidgets.Label('Fetch until this date:'), self.until_date,
ipywidgets.Label('Location to save DOIs:'), self.save_location,
ipywidgets.Label('Output format for DOIs:'), self.output_format,
self.search_button, ipywidgets.Label('')]
self.search_button.on_click(self.execute_search)
display(ipywidgets.GridBox(items, layout=ipywidgets.Layout(grid_template_columns="400px 600px")))
class CrossrefHandSearchCitationsWidget:
def __init__(self, default_save_location="./"):
self.name = ipywidgets.Text(
value='untitled',
disabled=False
)
self.issn_list = ipywidgets.Textarea(
placeholder='Comma-separated ISSNs',
disabled=False
)
self.keyword_list = ipywidgets.Textarea(
placeholder='Comma-separated keywords',
disabled=False
)
self.from_date = ipywidgets.DatePicker(
disabled=False
)
self.until_date = ipywidgets.DatePicker(
disabled=False
)
self.fields = ipywidgets.SelectMultiple(
options=[('DOI', 'DOI'),
('URL', 'URL'),
('Title', 'title'),
('Authors', 'author'),
('Publication Date', 'issued')],
value=['DOI', 'URL', 'title', 'author', 'issued'],
disabled=False
)
self.save_location = ipywidgets.Text(
value=default_save_location,
disabled=False
)
self.output_format = ipywidgets.ToggleButtons(
options=[('Plain text (.txt)', 'txt'),
('CSV (.csv)', 'csv'),
('Excel (.xlsx)', 'excel')],
disabled=False,
button_style='',
)
self.search_button = ipywidgets.Button(
description='Search',
disabled=False,
button_style='',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
def execute_search(self, b):
# Disable click while search is being performed
self.search_button._click_handlers.callbacks = []
ISSNs = list(self.issn_list.value.strip().split(","))
if self.keyword_list.value is not None and self.keyword_list.value != "":
keywords = list(self.keyword_list.value.strip().split(","))
else:
keywords = None
fromd = self.from_date.value
untild = self.until_date.value
field_list = []
field_parsers = []
for field in self.fields.value:
field_list.append(field)
if field == "DOI":
field_parsers.append(None)
elif field == "URL":
field_parsers.append(None)
elif field == "title":
field_parsers.append(parsers.crossref_title_parser)
elif field == "author":
field_parsers.append(parsers.crossref_authors_parser)
elif field == "issued":
field_parsers.append(parsers.crossref_date_parser)
else:
raise NotImplementedError("Cannot parse field {}.".format(field))
for issn in ISSNs:
print("Searching ISSN: %s" % issn)
search = handsearch.CrossrefSearch(ISSN=issn,
keyword_list=keywords,
from_date=fromd,
until_date=untild)
search(select=True, select_fields=field_list)
cit_ds = search.get_CitationsDataset(field_list=field_list,
field_parsers_list=field_parsers)
if self.output_format.value == 'txt':
cit_ds.save_txt(self.save_location.value + "/{}_{}.txt".format(self.name.value, issn))
elif self.output_format.value == 'csv':
cit_ds.save_csv(self.save_location.value + "/{}_{}.csv".format(self.name.value, issn))
elif self.output_format.value == 'excel':
cit_ds.save_excel(self.save_location.value + "/{}_{}.xlsx".format(self.name.value, issn))
else:
raise ValueError("Undefined output format.")
# Enable click again
self.search_button.on_click(self.execute_search)
def __call__(self):
items = [ipywidgets.Label('Name of search (A-Za-z0-9):'), self.name,
ipywidgets.Label('ISSNs:'), self.issn_list,
ipywidgets.Label('Search keywords:'), self.keyword_list,
ipywidgets.Label('Fetch from this date onwards:'), self.from_date,
ipywidgets.Label('Fetch until this date:'), self.until_date,
ipywidgets.Label('Fields to fetch (Shift + click to select multiple):'), self.fields,
ipywidgets.Label('Location to save DOIs:'), self.save_location,
ipywidgets.Label('Output format for DOIs:'), self.output_format,
self.search_button, ipywidgets.Label('')]
self.search_button.on_click(self.execute_search)
display(ipywidgets.GridBox(items, layout=ipywidgets.Layout(grid_template_columns="400px 600px")))
class CrossrefSnowballSearchDOIWidget:
def __init__(self, default_save_location="./"):
self.name = ipywidgets.Text(
value='untitled_snowball',
disabled=False
)
self.doi_list = ipywidgets.Textarea(
placeholder='Comma-separated DOIs',
disabled=False
)
self.save_location = ipywidgets.Text(
value=default_save_location,
disabled=False
)
self.search_button = ipywidgets.Button(
description='Search',
disabled=False,
button_style='',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
def execute_search(self, b):
# Disable click while search is being performed
self.search_button._click_handlers.callbacks = []
DOIs = list(self.doi_list.value.strip().split(","))
search = snowballsearch.CrossrefSearch(DOIs)
search()
doi_ds = search.get_DOIDataset()
# Check if save location exists, if not, create it
if not os.path.exists(self.save_location.value):
os.makedirs(self.save_location.value)
doi_ds.save_txt(self.save_location.value + "/{}.txt".format(self.name.value))
# Enable click again
self.search_button.on_click(self.execute_search)
def __call__(self):
items = [ipywidgets.Label('Name of search (A-Za-z0-9):'), self.name,
ipywidgets.Label('Search DOIs:'), self.doi_list,
ipywidgets.Label('Location to save result DOIs:'), self.save_location,
self.search_button, ipywidgets.Label('')]
self.search_button.on_click(self.execute_search)
display(ipywidgets.GridBox(items, layout=ipywidgets.Layout(grid_template_columns="400px 600px")))
| StarcoderdataPython |
11341476 | # https://leetcode.com/problems/add-two-numbers/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
carry = 0
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
carry = 0
l3 = ListNode()
l3_begin = l3
while True:
sum = 0
if carry:
sum += 1
carry = 0
sum += l1.val + l2.val
if sum >= 10:
sum -= 10
carry = 1
l3.val = sum
if l1.next and l2.next:
l3.next = ListNode()
l3 = l3.next
l2 = l2.next
l1 = l1.next
elif l1.next and not l2.next:
l3.next = ListNode()
l3 = l3.next
l2 = ListNode()
l1 = l1.next
elif l2.next and not l1.next:
l3.next = ListNode()
l3 = l3.next
l2 = l2.next
l1 = ListNode()
else:
if carry:
l3.next = ListNode()
l3 = l3.next
l3.val += carry
carry = 0
break
return l3_begin
| StarcoderdataPython |
4925643 | <filename>lisa/tools/lsblk.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from dataclasses import dataclass
from typing import List
from lisa.executable import Tool
from lisa.util import find_patterns_groups_in_lines
@dataclass
class PartitionInfo(object):
name: str = ""
mountpoint: str = ""
size: int = 0
type: str = ""
available_blocks: int = 0
used_blocks: int = 0
total_blocks: int = 0
percentage_blocks_used: int = 0
def __init__(
self,
name: str,
mountpoint: str,
size: int = 0,
type: str = "",
available_blocks: int = 0,
used_blocks: int = 0,
total_blocks: int = 0,
percentage_blocks_used: int = 0,
):
self.name = name
self.mountpoint = mountpoint
self.size = size
self.type = type
self.available_blocks = available_blocks
self.used_blocks = used_blocks
self.total_blocks = total_blocks
self.percentage_blocks_used = percentage_blocks_used
class Lsblk(Tool):
# NAME="loop2" SIZE="34017280" TYPE="loop" MOUNTPOINT="/snap/snapd/13640"
_LSBLK_ENTRY_REGEX = re.compile(
r'NAME="(?P<name>\S+)"\s+SIZE="(?P<size>\d+)"\s+'
r'TYPE="(?P<type>\S+)"\s+MOUNTPOINT="(?P<mountpoint>\S*)"'
)
@property
def command(self) -> str:
return "lsblk"
def get_partitions(self, force_run: bool = False) -> List[PartitionInfo]:
# parse output of lsblk
output = self.run(
"-b -P -o NAME,SIZE,TYPE,MOUNTPOINT", sudo=True, force_run=force_run
).stdout
partition_info = []
lsblk_entries = find_patterns_groups_in_lines(
output, [self._LSBLK_ENTRY_REGEX]
)[0]
for lsblk_entry in lsblk_entries:
partition_info.append(
PartitionInfo(
name=lsblk_entry["name"],
size=int(lsblk_entry["size"]),
type=lsblk_entry["type"],
mountpoint=lsblk_entry["mountpoint"],
)
)
return partition_info
| StarcoderdataPython |
6553552 | from cx_Freeze import setup, Executable
setup(
name = "AdcpTerminal",
version = "1.0.0",
description = "Setup the Terminal and connection to crossbar.io",
executables = [Executable("../frontend/qt/mainwindow.py")],
)
| StarcoderdataPython |
146537 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constants and Enums
In independent file to minimize circular imports.
"""
from enum import Enum
CONF_CROSS_SECTION = "crossSectionControl"
#
# FAST_FLUX_THRESHOLD_EV is the energy threshold above which neutrons are considered "fast" [eV]
#
FAST_FLUX_THRESHOLD_EV = 100000.0 # eV
# CROSS SECTION LIBRARY GENERATION CONSTANTS
MAXIMUM_XS_LIBRARY_ENERGY = 1.4190675e7 # eV
ULTRA_FINE_GROUP_LETHARGY_WIDTH = 1.0 / 120.0
# LOWEST_ENERGY_EV cannot be zero due to integrating lethargy, and lethargy is undefined at 0.0
# The lowest lower boundary of many group structures such as any WIMS, SCALE or CASMO
# is 1e-5 eV, therefore it is chosen here. This number must be lower than all of the
# defined group structures, and as of this writing the lowest in this module is cinder63 with a
# lowest upper boundary of 5e-3 eV. The chosen 1e-5 eV is rather arbitrary but expected to be low
# enough to support other group structures. For fast reactors, there will be
# no sensitivity at all to this value since there is no flux in this region.
LOWEST_ENERGY_EV = 1.0e-5
# Highest energy will typically depend on what physics code is being run, but this is
# a decent round number to use.
HIGH_ENERGY_EV = 1.5e07
# Particle types constants
GAMMA = "Gamma"
NEUTRON = "Neutron"
NEUTRONGAMMA = "Neutron and Gamma"
# Constants for neutronics setting controlling saving of files after neutronics calculation
# See setting 'neutronicsOutputsToSave'
ALL = "All"
RESTARTFILES = "Restart files"
INPUTOUTPUT = "Input/Output"
FLUXFILES = "Flux files"
| StarcoderdataPython |
85506 | import os
from testglobals import config
import subprocess
import re
# Feature names generally follow the naming used by Linux's /proc/cpuinfo.
SUPPORTED_CPU_FEATURES = {
# These aren't comprehensive; they are only CPU features that we care about
# x86:
'sse', 'sse2', 'sse3', 'ssse3', 'sse4_1', 'sse4_2',
'avx1', 'avx2',
'popcnt', 'bmi1', 'bmi2'
}
cpu_feature_cache = None
def get_cpu_features():
if config.os in ['mingw32', 'linux'] and os.path.exists('/proc/cpuinfo'):
f = open('/proc/cpuinfo').read()
flags = re.search(r'flags\s*:\s*.*$', f, re.M)
if flags is None:
print('get_cpu_features: failed to find cpu features')
return {}
flags = set(flags.group(0).split())
if 'pni' in flags:
flags.add('sse3')
flags.remove('pni')
return flags
elif config.os == 'darwin':
out = subprocess.check_output(['sysctl', 'hw']).decode('UTF-8')
features = set()
def check_feature(darwin_name, our_name=None):
if re.search(r'hw\.optional.%s:\s*1' % darwin_name, out) is not None:
features.add(darwin_name if our_name is None else our_name)
for feature in SUPPORTED_CPU_FEATURES:
check_feature(feature)
# A few annoying cases
check_feature('avx1_0', 'avx1')
check_feature('avx2_0', 'avx2')
return features
else:
# TODO: Add {Open,Free}BSD support
print('get_cpu_features: Lacking support for your platform')
return {}
def have_cpu_feature(feature):
"""
A testsuite predicate for testing the availability of CPU features.
"""
assert feature in SUPPORTED_CPU_FEATURES
if cpu_feature_cache is None:
cpu_feature_cache = get_cpu_features()
print('Found CPU features:', ' '.join(cpu_feature_cache))
# Sanity checking
assert all(feat in SUPPORTED_CPU_FEATURES
for feat in cpu_feature_cache)
return feature in cpu_feature_cache
if __name__ == '__main__':
import sys
config.os = sys.argv[1]
print(get_cpu_features())
| StarcoderdataPython |
89549 | <filename>tests/monitoring/fixtures/server.py
# pylint: disable=redefined-outer-name
import logging
from typing import List
from unittest.mock import patch
import pytest
from eth_utils import to_canonical_address
from web3 import Web3
from monitoring_service.database import Database
from monitoring_service.service import MonitoringService
from raiden.utils.typing import Address, BlockNumber, BlockTimeout, MonitoringServiceAddress
from raiden_contracts.constants import (
CONTRACT_MONITORING_SERVICE,
CONTRACT_SERVICE_REGISTRY,
CONTRACT_TOKEN_NETWORK_REGISTRY,
CONTRACT_USER_DEPOSIT,
)
from request_collector.server import RequestCollector
from tests.constants import TEST_CHAIN_ID, TEST_MSC_ADDRESS
log = logging.getLogger(__name__)
TEST_POLL_INTERVAL = 0.001
@pytest.fixture(scope="session")
def ms_address(create_service_account) -> Address:
return to_canonical_address(create_service_account())
@pytest.fixture
def default_cli_args_ms(default_cli_args) -> List[str]:
return default_cli_args + [
"--token-network-registry-contract-address",
"0x" + "1" * 40,
"--monitor-contract-address",
"0x" + "2" * 40,
"--user-deposit-contract-address",
"0x" + "3" * 40,
"--accept-disclaimer",
]
@pytest.fixture
def ms_database() -> Database:
return Database(
filename=":memory:",
chain_id=TEST_CHAIN_ID,
msc_address=TEST_MSC_ADDRESS,
registry_address=Address(bytes([3] * 20)),
receiver=Address(bytes([4] * 20)),
)
@pytest.fixture
def monitoring_service( # pylint: disable=too-many-arguments
ms_address,
web3: Web3,
monitoring_service_contract,
user_deposit_contract,
token_network_registry_contract,
ms_database: Database,
get_private_key,
service_registry,
):
ms = MonitoringService(
web3=web3,
private_key=get_private_key(ms_address),
contracts={
CONTRACT_TOKEN_NETWORK_REGISTRY: token_network_registry_contract,
CONTRACT_MONITORING_SERVICE: monitoring_service_contract,
CONTRACT_USER_DEPOSIT: user_deposit_contract,
CONTRACT_SERVICE_REGISTRY: service_registry,
},
sync_start_block=BlockNumber(0),
required_confirmations=BlockTimeout(0), # for faster tests
poll_interval=0.01, # for faster tests
db_filename=":memory:",
)
# We need a shared db between MS and RC so the MS can use MR saved by the RC
ms.context.database = ms_database
ms.database = ms_database
ms.chain_id = TEST_CHAIN_ID # workaround for https://github.com/ethereum/web3.py/issues/1677
return ms
@pytest.fixture
def request_collector(
ms_address: MonitoringServiceAddress, ms_database: Database, get_private_key
):
with patch("request_collector.server.MatrixListener"):
rc = RequestCollector(private_key=get_private_key(ms_address), state_db=ms_database)
rc.start()
yield rc
rc.stop()
rc.join()
| StarcoderdataPython |
6643645 | <reponame>mudkipmaster/MLB-Crawler
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
from matplotlib.patches import Circle
class Visualizer:
def set_up_sz_plot(self, title):
f = plt.figure()
ax = f.gca()
ax.axis('equal')
self.plot_sz(ax)
ax.set(xlim=[-2, 3], ylim=[-2, 3])
ax.set_xlabel("Distance (ft)")
ax.set_ylabel("Distance (ft)")
ax.set_title(title)
return f, ax
def plot_sz(self, ax):
ext_x = [0, 1, 1, 0, 0, 0.01, 0.01, 0.99, 0.99, 0.01]
ext_y = [0, 0, 1, 1, 0, 0.01, 0.99, 0.99, 0.01, 0.01]
ax.fill(ext_x, ext_y, color='k')
# ax.add_patch(
# Rectangle((0, 0), 1, 1, color=(0, 1, 0, 0.25))) # 17 inches = 1.42 feet
def show_pitches(self, strikes, balls, file=None, title="Missed Pitch Calls"):
fig, ax = self.set_up_sz_plot(title)
# strikes = self.data[self.data["pitch_type"] == "S"]
# balls = self.data[self.data["pitch_type"] == "B"]
ball_circles = [Circle((xi, yi), radius=0.1729 / 2, edgecolor="k") for xi, yi in
zip(balls["normalized_pitch_px"], balls["normalized_pitch_pz"])]
bc = PatchCollection(ball_circles, facecolors="b", edgecolors="k")
ax.add_collection(bc)
strike_circles = [Circle((xi, yi), radius=0.1729 / 2) for xi, yi in
zip(strikes["normalized_pitch_px"], strikes["normalized_pitch_pz"])]
sc = PatchCollection(strike_circles, facecolors="r", edgecolors="k")
# sc = PatchCollection(strike_circles, facecolors="r")
ax.add_collection(sc)
ax.legend(handles=[Circle((0, 0), radius=0.1205, color='r', label="Called Strikes"),
Circle((0, 0), radius=0.1205, color='b', label="Called Balls")])
if (file == None):
plt.show()
else:
plt.savefig(file, format="png")
def get_2dhist(self, strikes, balls):
ball_hist, xedges, yedges = np.histogram2d(balls["normalized_pitch_pz"], balls["normalized_pitch_px"],
range=[[-1, 2], [-1, 2]], bins=40, density=True)
strike_hist, _, _ = np.histogram2d(strikes["normalized_pitch_pz"], strikes["normalized_pitch_px"],
range=[[-1, 2], [-1, 2]], bins=40, density=True)
return strike_hist - ball_hist, xedges, yedges
def show_probability(self, strikes, balls, file=None, title="Probability Distribution of Pitches"):
fig, ax = self.set_up_sz_plot(title)
probability_hist, xedges, yedges = self.get_2dhist(strikes, balls)
farthest_distance_from_zero = np.maximum(abs(np.min(probability_hist)), abs(np.max(probability_hist)))
ax.imshow(probability_hist, interpolation='nearest', origin='low',
extent=[yedges[0], yedges[-1], xedges[0], xedges[-1]], cmap="bwr", vmin=-farthest_distance_from_zero,
vmax=farthest_distance_from_zero)
if (file == None):
plt.show()
else:
plt.savefig(file, format="png")
def get_differential_2dhist(self, strikes1, balls1, strikes2, balls2):
strikes, xedges, yedges = self.get_2dhist(strikes1, strikes2)
balls, _, _ = self.get_2dhist(balls1, balls2)
combined = strikes + balls
magnitude = np.sum(np.abs(combined))/(40*40)*100
return combined, xedges, yedges, magnitude
def show_differential_probability(self, strikes1, balls1, strikes2, balls2, file=None,
title="Difference between the Probability Distribution of Pitches"):
fig, ax = self.set_up_sz_plot(title)
combined, xedges, yedges, magnitude = self.get_differential_2dhist(strikes1,balls1,strikes2,balls2)
ax.imshow(combined, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],cmap="bwr", vmin=-1, vmax=1)
ax.set_title(title+" (%0.2f%%)"%(magnitude))
if (file == None):
plt.show()
else:
plt.savefig(file, format="png")
| StarcoderdataPython |
9753925 | <filename>noise_characterization/tomography/DDTMarginalsAnalyzer.py
"""
@authors: <NAME>, <NAME>, <NAME>
@contact: <EMAIL>
REFERENCES:
[0] <NAME>, <NAME>, <NAME>,
"Mitigation of readout noise in near-term quantum devices
by classical post-processing based on detector tomography",
Quantum 4, 257 (2020)
[0.5] <NAME>, <NAME>, <NAME>, <NAME>,
"Modeling and mitigation of cross-talk effects in readout noise
with applications to the Quantum Approximate Optimization Algorithm",
Quantum 5, 464 (2021).
"""
import copy
import numpy as np
from typing import Optional, Dict, List, Union
from collections import defaultdict
from base_classes.marginals_analyzer_base import MarginalsAnalyzerBase
from functions import ancillary_functions as anf
class DDTMarginalsAnalyzer(MarginalsAnalyzerBase):
"""
Class that handles results of Diagonal Detector Tomography.
Main functionalities allow to calculate noise matrices on subsets_list of qubits.
This includes averaged noise matrices, i_index.e., averaged over states off all other qubits,
as well as state-dependent, i_index.e., conditioned on the particular
input classical state of some other qubits.
In this class, and all its children, we use the following convention
for storing marginal noise matrices:
:param noise_matrices_dictionary: nested dictionary with following structure:
noise_matrices_dictionary[qubits_subset_string]['averaged']
= average noise matrix on qubits subset
and
noise_matrices_dictionary[qubits_subset_string][other_qubits_subset_string][input_state_bitstring]
= noise matrix on qubits subset depending on input state of other qubits.
where:
- qubits_subset_string - is string labeling qubits subset (e.g., 'q1q2q15...')
- other_qubits_subset_string - string labeling other subset
- input_state_bitstring - bitstring labeling
input state of qubits in other_qubits_subset_string
"""
def __init__(self,
results_dictionary_ddot: Dict[str, Dict[str, int]],
bitstrings_right_to_left: bool,
marginals_dictionary: Optional[Dict[str, Dict[str, np.ndarray]]] = None,
noise_matrices_dictionary: Optional[
Dict[str, Union[np.ndarray, Dict[str, Dict[str, np.ndarray]]]]] = None
) -> None:
"""
:param results_dictionary_ddot: see description of MarginalsAnalyzerBase.
Here we use classical input states (bitstrings) of qubits as LABELS for experiments.
:param bitstrings_right_to_left: specify whether bitstrings
should be read from right to left (when interpreting qubit labels)
:param marginals_dictionary: see description of MarginalsAnalyzerBase.
:param noise_matrices_dictionary: nested dictionary with following structure:
"""
super().__init__(results_dictionary_ddot,
bitstrings_right_to_left,
marginals_dictionary
)
if noise_matrices_dictionary is None:
noise_matrices_dictionary = {}
# TODO FBM: Make sure whether this helps in anything
# TODO FBM: (because we anyway perform checks in the functions later)
if marginals_dictionary is not None:
for experiment_key, dictionary_of_marginals in marginals_dictionary.items():
for marginal_key in dictionary_of_marginals.keys():
if marginal_key not in noise_matrices_dictionary.keys():
noise_matrices_dictionary[marginal_key] = {}
self._noise_matrices_dictionary = noise_matrices_dictionary
@property
def noise_matrices_dictionary(self) -> Dict[str, Union[
np.ndarray, Dict[str, Dict[str, np.ndarray]]]]:
return self._noise_matrices_dictionary
@noise_matrices_dictionary.setter
def noise_matrices_dictionary(self,
noise_matrices_dictionary: Dict[str, Union[
np.ndarray, Dict[str, Dict[str, np.ndarray]]]] = None) -> None:
self._noise_matrices_dictionary = noise_matrices_dictionary
@staticmethod
def get_noise_matrix_from_counts_dict(
results_dictionary: Union[Dict[str, np.ndarray], defaultdict]) -> np.ndarray:
"""Return noise matrix from counts dictionary.
Assuming that the results are given only for qubits of interest.
:param results_dictionary: dictionary with experiments of the form:
results_dictionary[input_state_bitstring] = probability_distribution
where:
- input_state_bitstring is bitstring denoting classical input state
- probability_distribution - estimated vector of probabilities for that input state
:return: noise_matrix: the array representing noise on qubits
on which the experiments were performed
"""
number_of_qubits = len(list(results_dictionary.keys())[0])
noise_matrix = np.zeros((2 ** number_of_qubits, 2 ** number_of_qubits))
for input_state, probability_vector in results_dictionary.items():
noise_matrix[:, int(input_state, 2)] = probability_vector[:, 0]
return noise_matrix
@staticmethod
def average_noise_matrices_over_some_qubits(matrices_cluster: Dict[str, np.ndarray],
all_neighbors: List[int],
qubits_to_be_left: List[int]) -> Dict[str, np.ndarray]:
"""
Given dictionary of noise matrices, average them over some qubits.
:param matrices_cluster: dictionary for which KEY is classical INPUT state of neighbors,
and VALUE is potentially_stochastic_matrix noise matrix
:param all_neighbors: list of neighbors of given cluster
:param qubits_to_be_left: qubits which we are interested in and we do not average over them
:return: dictionary of noise matrices
depending on the state of neighbors MINUS qubits_to_be_averaged_over
"""
if all_neighbors is None or len(all_neighbors)==0:
return {'averaged': matrices_cluster['averaged']}
reversed_enumerated = anf.get_reversed_enumerated_from_indices(all_neighbors)
averaging_normalization = int(2 ** (len(all_neighbors) - len(qubits_to_be_left)))
states_after_averaging = anf.register_names_qubits(range(len(qubits_to_be_left)),
len(qubits_to_be_left), False)
averaged_dimension = list(matrices_cluster.values())[0].shape[0]
averaged_matrices_cluster = {
state: np.zeros((averaged_dimension, averaged_dimension), dtype=float) for state in
states_after_averaging}
qubits_to_be_averaged_over = list(set(all_neighbors).difference(set(qubits_to_be_left)))
qubits_to_be_averaged_over_mapped = [reversed_enumerated[q_index] for q_index in
qubits_to_be_averaged_over]
for neighbors_state, conditional_noise_matrix in matrices_cluster.items():
list_string_neighbors = list(copy.deepcopy(neighbors_state))
list_string_neighbors_to_be_left = list(np.delete(list_string_neighbors,
qubits_to_be_averaged_over_mapped))
string_neighbors = ''.join(list_string_neighbors_to_be_left)
averaged_matrices_cluster[
string_neighbors] += conditional_noise_matrix / averaging_normalization
return averaged_matrices_cluster
def _compute_noise_matrix_averaged(self,
subset: List[int]) -> np.ndarray:
"""Noise matrix for subset of qubits, averaged over all other qubits
:param subset: subset of qubits we are interested in
By default takes data from self._marginals_dictionary. If data is not present, then it
calculates marginals_dictionary for given subset
and updates the class's property self.marginals_dictionary
"""
# TODO FBM: Perhaps add possibility of using existing marginals_dictionary for bigger subset that includes
# target subset
subset_key = 'q' + 'q'.join([str(s) for s in subset])
marginal_dict_now = self.get_averaged_marginal_for_subset(subset)
noise_matrix_averaged = self.get_noise_matrix_from_counts_dict(marginal_dict_now)
if not anf.is_stochastic(noise_matrix_averaged):
raise ValueError('Noise matrix not stochastic for subset:', subset)
if subset_key in self._noise_matrices_dictionary.keys():
self._noise_matrices_dictionary[subset_key]['averaged'] = noise_matrix_averaged
else:
self._noise_matrices_dictionary[subset_key] = {'averaged': noise_matrix_averaged}
return noise_matrix_averaged
def _get_noise_matrix_averaged(self,
subset: List[int]) -> np.ndarray:
"""
Like self._compute_noise_matrix_averaged but if matrix is already in class' property,
does not calculate it again.
:param subset: subset of qubits we are interested in
"""
subset_key = 'q' + 'q'.join([str(s) for s in subset])
try:
return self._noise_matrices_dictionary[subset_key]['averaged']
except(KeyError):
return self._compute_noise_matrix_averaged(subset)
def _compute_noise_matrix_dependent(self,
qubits_of_interest: List[int],
neighbors_of_interest: Union[List[int], None]) \
-> Dict[str, np.ndarray]:
"""Return lower-dimensional effective noise matrices acting on qubits_of_interest"
conditioned on input states of neighbors_of_interest
:param qubits_of_interest: labels of qubits in marginal we are interested in
:param neighbors_of_interest: labels of qubits that affect noise matrix on qubits_of_interest
:return conditional_noise_matrices_dictionary: dictionary with structure
conditional_noise_matrices_dictionary['averaged'] =
noise matrix on qubits_of_interest averaged over input states of other qubits
and
conditional_noise_matrices_dictionary[input_state_neighbors_bitstring] =
noise matrix on qubits_of_interest conditioned on input state of neighbors being
input_state_neighbors_bitstring
"""
# If there are no all_neighbors,
# then this corresponds to averaging over all qubits except qubits_of_interest
if len(neighbors_of_interest) == 0 or neighbors_of_interest is None:
cluster_string = self.get_qubits_key(qubits_of_interest)
if 'averaged' in self._noise_matrices_dictionary[cluster_string].keys():
return {'averaged': self._noise_matrices_dictionary[cluster_string]['averaged']}
else:
noise_matrix = self._get_noise_matrix_averaged(qubits_of_interest)
return {'averaged': noise_matrix}
# check if there is no collision between qubits_of_interest and neighbors_of_interest
# (if there is, then the method_name won't be consistent)
if len(anf.lists_intersection(qubits_of_interest, neighbors_of_interest)) != 0:
print(qubits_of_interest, neighbors_of_interest)
raise ValueError('Qubits of interest and neighbors overlap')
# first, get averaged noise matrix on qubits of interest and all_neighbors of interest
# TODO FBM: make sure that qubit indices are correct (I think they are)
all_qubits = sorted(qubits_of_interest + neighbors_of_interest)
all_qubits_enumerated = anf.get_reversed_enumerated_from_indices(all_qubits)
# we will get noise matrix on all of the qubits first, and then we will process it to get
# conditional marginal noise matrices on qubits_of_interest
big_lambda = self._get_noise_matrix_averaged(all_qubits)
total_number_of_qubits = int(np.log2(big_lambda.shape[0]))
total_dimension = int(2 ** total_number_of_qubits)
number_of_qubits_of_interest = len(qubits_of_interest)
number_of_neighbors = len(neighbors_of_interest)
# Normalization when averaging over states of non-neighbours (each with the same probability)
normalization = 2 ** (
total_number_of_qubits - number_of_neighbors - number_of_qubits_of_interest)
# classical register on all qubits
classical_register_all_qubits = ["{0:b}".format(i).zfill(total_number_of_qubits) for i in
range(total_dimension)]
# classical register on neighbours
classical_register_neighbours = ["{0:b}".format(i).zfill(number_of_neighbors) for i in
range(2 ** number_of_neighbors)]
# create dictionary of the marginal states of qubits_of_interest and neighbors_of_interest
# for the whole register (this function is storing data which could also be calculated in situ
# in the loops later, but this is faster)
indices_dictionary_small = {}
for neighbors_state_bitstring in classical_register_all_qubits:
small_string = ''.join([list(neighbors_state_bitstring)[all_qubits_enumerated[b]] for b in
qubits_of_interest])
neighbours_string = ''.join(
[list(neighbors_state_bitstring)[all_qubits_enumerated[b]] for b in
neighbors_of_interest])
# first place in list is label for state of qubits_of_interest
# and second for neighbors_of_interest
indices_dictionary_small[neighbors_state_bitstring] = [small_string, neighbours_string]
# initiate dictionary for which KEY is input state of all_neighbors
# and VALUE will the the corresponding noise matrix on qubits_of_interest
conditional_noise_matrices = {
s: np.zeros((2 ** number_of_qubits_of_interest, 2 ** number_of_qubits_of_interest)) for s
in
classical_register_neighbours}
# go through all classical states
for measured_state_integer in range(total_dimension):
for input_state_integer in range(total_dimension):
lambda_element = big_lambda[measured_state_integer, input_state_integer]
# input state of all qubits in binary format
input_state_bitstring = classical_register_all_qubits[input_state_integer]
# measured state of all qubits in binary format
measured_state_bitstring = classical_register_all_qubits[measured_state_integer]
# input state of qubits_of_interest in binary format
input_state_small = indices_dictionary_small[input_state_bitstring][0]
# measured state of qubits_of_interest in binary format
measured_state_small = indices_dictionary_small[measured_state_bitstring][0]
# input state of neighbors_of_interest in binary format
input_state_neighbours = indices_dictionary_small[input_state_bitstring][1]
# element of small lambda labeled by (measured state | input state),
# and the lambda itself is labeled by input state of all_neighbors
conditional_noise_matrices[input_state_neighbours][
int(measured_state_small, 2), int(input_state_small, 2)] += lambda_element
# normalize matrices
for neighbors_state_bitstring in classical_register_neighbours:
conditional_noise_matrices[neighbors_state_bitstring] /= normalization
# conditional_noise_matrices['all_neighbors'] = neighbors_of_interest
cluster_string = 'q' + 'q'.join(str(s) for s in qubits_of_interest)
neighbours_string = 'q' + 'q'.join(str(s) for s in neighbors_of_interest)
if cluster_string not in self._noise_matrices_dictionary.keys():
# If there is no entry for our cluster in the dictionary, we create it and add
# averaged noise matrix
averaged_noise_matrix = np.zeros(
(2 ** number_of_qubits_of_interest, 2 ** number_of_qubits_of_interest))
for neighbors_state_bitstring in conditional_noise_matrices.keys():
averaged_noise_matrix += conditional_noise_matrices[neighbors_state_bitstring]
averaged_noise_matrix /= 2 ** number_of_qubits_of_interest
self._noise_matrices_dictionary[cluster_string] = {'averaged': averaged_noise_matrix}
self._noise_matrices_dictionary[cluster_string][neighbours_string] = conditional_noise_matrices
return self._noise_matrices_dictionary[cluster_string][neighbours_string]
def get_noise_matrix_dependent(self,
qubits_of_interest: List[int],
neighbors_of_interest: List[int]) -> dict:
"""Description:
like self._compute_noise_matrix_dependent
but checks whether matrices were already calculated to prevent multiple computations of the
same matrices
:param qubits_of_interest: labels of qubits in marginal we are interested in
:param neighbors_of_interest: labels of qubits that affect noise matrix on qubits_of_interest
:return conditional_noise_matrices_dictionary:
"""
cluster_key = self.get_qubits_key(qubits_of_interest)
if cluster_key not in self._noise_matrices_dictionary.keys():
self.compute_subset_noise_matrices_averaged([qubits_of_interest])
if len(neighbors_of_interest) == 0 or neighbors_of_interest is None:
neighbors_key = 'averaged'
if neighbors_key in self._noise_matrices_dictionary[cluster_key]:
if not anf.is_stochastic(self._noise_matrices_dictionary[cluster_key]['averaged']):
anf.cool_print('Bug is here')
print(cluster_key, neighbors_key)
# TODO FBM: SOMETHING IS BROKEN
self._noise_matrices_dictionary[cluster_key][
'averaged'] = self._compute_noise_matrix_averaged(qubits_of_interest)
if not anf.is_stochastic(self._noise_matrices_dictionary[cluster_key]['averaged']):
anf.cool_print('And I cant fix it')
# anf.print_array_nicely(self._noise_matrices_dictionary[cluster_key]['averaged'])
return {'averaged': self._noise_matrices_dictionary[cluster_key]['averaged']}
else:
return self._compute_noise_matrix_dependent(qubits_of_interest,
neighbors_of_interest)
else:
neighbors_key = 'q' + 'q'.join([str(s) for s in neighbors_of_interest])
if neighbors_key in self._noise_matrices_dictionary[cluster_key]:
return self._noise_matrices_dictionary[cluster_key][neighbors_key]
else:
return self._compute_noise_matrix_dependent(qubits_of_interest,
neighbors_of_interest)
def compute_subset_noise_matrices_averaged(self,
subsets_list: List[List[int]],
show_progress_bar: Optional[bool] = False) -> None:
"""Description:
computes averaged (over all other qubits) noise matrices on subsets_list of qubits
:param subsets_list: subsets_list of qubit indices
:param show_progress_bar: whether to show animated progress bar. requires tqdm package
"""
# self.normalize_marginals()
subsets_range = range(len(subsets_list))
if show_progress_bar:
from tqdm import tqdm
subsets_range = tqdm(subsets_range)
for subset_index in subsets_range:
self._compute_noise_matrix_averaged(subsets_list[subset_index])
| StarcoderdataPython |
9669839 | #Declare a list x
x=[1,2,13,4,15]
print('printing the list declared:', x)
#Iterate the list forward and print the value
for i in x:
print(i)
#Length of the list
print('length of the list is:', len(x))
#Print the new line
print("")
#Decrement the value held in i until it become zero
while(i):
i = i-1
print(i)
| StarcoderdataPython |
1957516 | <filename>rl/data/interactions_producer.py
import abc
import numpy as np
import tensorflow as tf
from rl.utils.env_batch import EnvBatch, SingleEnvBatch
class BaseInteractionsProducer(abc.ABC):
def __init__(self, env, policy, batch_size, env_step=None):
self._env = env
self._policy = policy
self._batch_size = batch_size
if env_step is None:
env_step = tf.train.get_or_create_global_step()
self._env_step = env_step
self._elapsed_steps_ph = tf.placeholder(self._env_step.dtype, [],
name="elapsed_steps")
self._updated_env_step = self._env_step.assign_add(self._elapsed_steps_ph)
self._summary_manager = None
@property
def batch_size(self):
return self._batch_size
@property
def observation_space(self):
return self._env.observation_space
@property
def action_space(self):
return self._env.action_space
@property
def env_step(self):
return self._env_step
@property
def summary_manager(self):
return self._summary_manager
@abc.abstractmethod
def start(self, session, summary_manager=None):
if not self._policy.is_built:
raise ValueError("Policy must be built before calling start")
self._session = session
self._summary_manager = summary_manager
def _update_env_step(self, elapsed_steps):
return self._session.run(self._updated_env_step,
{self._elapsed_steps_ph: elapsed_steps})
@abc.abstractmethod
def next(self):
...
class OnlineInteractionsProducer(BaseInteractionsProducer):
def __init__(self, env, policy, batch_size, cutoff=True, env_step=None):
if not isinstance(env, EnvBatch):
env = SingleEnvBatch(env)
if batch_size % env.num_envs != 0:
raise ValueError("env.num_envs = {} does not divide batch_size = {}"
.format(env.num_envs, batch_size))
super(OnlineInteractionsProducer, self).__init__(env, policy, batch_size,
env_step=env_step)
self._cutoff = cutoff
@property
def num_envs(self):
return self._env.num_envs
def start(self, session, summary_manager=None):
super(OnlineInteractionsProducer, self).start(session, summary_manager)
self._state = {"latest_observations": self._env.reset()}
obs_shape = ((self.batch_size,)
+ self._state["latest_observations"].shape[1:])
obs_type = self._state["latest_observations"].dtype
self._trajectory = {
"observations": np.empty(obs_shape, dtype=obs_type),
"rewards": np.empty(self.batch_size, dtype=np.float32),
"resets": np.empty(self.batch_size, dtype=np.bool),
}
act = self._policy.act(self._state["latest_observations"], sess=session)
for key, val in act.items():
val_batch_shape = (self.batch_size,) + val.shape[1:]
self._trajectory[key] = np.empty(val_batch_shape, val.dtype)
if self._policy.state_inputs is not None:
self._state[self._policy.state_inputs] = self._policy.state_values
def next(self):
observations = self._trajectory["observations"]
actions = self._trajectory["actions"]
self._state["env_steps"] = self.batch_size
if self._policy.state_inputs is not None:
self._state[self._policy.state_inputs] = self._policy.state_values
for i in range(0, self.batch_size, self.num_envs):
batch_slice = slice(i, i + self.num_envs)
observations[batch_slice] = self._state["latest_observations"]
act = self._policy.act(self._state["latest_observations"], self._session)
for key, val in act.items():
self._trajectory[key][batch_slice] = val
obs, rews, resets, infos = self._env.step(actions[batch_slice])
self._state["latest_observations"] = obs
self._trajectory["rewards"][batch_slice] = rews
self._trajectory["resets"][batch_slice] = resets
if np.any(resets):
self._policy.reset(resets)
if self.summary_manager is not None:
env_step = self._session.run(self.env_step) + i + self.num_envs
if self.summary_manager.summary_time(env_step):
for info in np.asarray(infos)[resets]:
self.summary_manager.add_summary_dict(
info.get("summaries", info), step=env_step)
if self._cutoff:
self._state["env_steps"] = i + self.num_envs
break
self._update_env_step(self._state["env_steps"])
if self._state["env_steps"] == self.batch_size:
self._trajectory["state"] = self._state
return self._trajectory
else:
traj = {key: val[:self._state["env_steps"]]
for key, val in self._trajectory.items() if key != "state"}
traj["state"] = self._state
return traj
| StarcoderdataPython |
1699314 | <gh_stars>0
from fetch_data import fetch_poem
from markov_python.cc_markov import MarkovChain
def intialize_model():
'''Feed the data to a markov chain and return the object of it'''
poems = fetch_poem()
mc = MarkovChain()
for poem in poems:
mc.add_string(poems[poem])
return mc
def generate_poem(mc, para = 3):
'''Prints the paragraphs for the poem generated via Markov Chain generated text
Input :
mc : object for the markov model
para : No. of paragraphs to print (Default is 3)'''
for p in range(para):
words = mc.generate_text(48)
line = ''
for idx, word in enumerate(words):
if idx % 8 == 0:
print(line)
line = ''
line += word + ' '
print()
if __name__ == '__main__':
print ("This program generates poem similiar to <NAME>'s poems")
mc = intialize_model()
n = int(input("Number of Paragraphs you want to generate : "))
generate_poem(mc, n)
input() | StarcoderdataPython |
5128647 | <filename>compoundInvestment.py
# Calculate the total savings you can get from a starting annualSalary
def f():
annualSalary = 80000
global timeWorking
timeWorking= 20
annualSalaryIncrease = 5000
super = 0.1 # Superannuation
annualSavingPercent = 0.1
tax = 0.04
totalSaving = annualSalary*(super+annualSavingPercent-tax)
annualROI = 1.1
# Calculating part
for i in range(1, timeWorking+1):
if (i == 1):
annualSaving = annualSalary*(super+annualSavingPercent-tax)
totalSaving = annualSaving
else:
# The savings includes superannuation + personal savings - tax
annualSalary += annualSalaryIncrease
annualSaving = annualSalary * (super+annualSavingPercent-tax)
totalSaving = totalSaving * annualROI + annualSaving
print('year:', i, '/ annualSalary:', annualSalary, '/ annualSaving:',annualSaving, '/ totalSaving:', totalSaving)
return totalSaving
print('You have saved', f(), 'after', timeWorking, 'years working.') | StarcoderdataPython |
8000465 | from .utils import RequestHandler, HTTPException, role_admitted, scanAllLinks, export
from .controllers import Roles, add_event_to_user, verify_token, remove_event_from_user
from .controllers import get_event, get_all_events, update_event, get_user_events, get_user_tags
import json
class Events(RequestHandler):
@role_admitted(Roles.USER, Roles.ADMIN)
def get(self):
token = self.request.headers['HTTP_AUTHORIZATION']
user_urlsafe = verify_token(token)
user_tags = get_user_tags(user_urlsafe)
event_data = []
events = get_all_events(user_tags)
for event in events:
event_dict = {}
event_dict["id"] = event.urlsafe
event_dict["title"] = event.title
event_dict["image_link"] = event.image_link
event_dict["description"] = event.description
event_dict["location"] = event.location
event_dict["date"] = event.date.strftime("%Y-%m-%d %H:%M")
event_data.append(event_dict)
json_data = json.dumps(event_data)
return json_data
@role_admitted(Roles.USER, Roles.ADMIN)
def post(self):
event_urlsafe = self.request.payload.get("event_id")
field_name = self.request.payload.get("field_name")
modification = self.request.payload.get("modification")
if not event_urlsafe or field_name or modification:
raise HTTPException("400", "Please specify all fields")
update_event(event_urlsafe, field_name, modification)
return {}
class EventInfo(RequestHandler):
@role_admitted(Roles.USER, Roles.ADMIN)
def get(self):
urlsafe = self.request.args["event_id"]
if not urlsafe:
raise HTTPException("400", "Please specify event id")
event = get_event(urlsafe)
event_dict = {}
event_dict["id"] = event.urlsafe
event_dict["title"] = event.title
event_dict["image_link"] = event.image_link
event_dict["description"] = event.description
event_dict["location"] = event.location
event_dict["date"] = event.date.strftime("%Y-%m-%d %H:%M")
if event.price:
event_dict["price"] = event.price
else:
event_dict["price"] = "-"
if event.capacity:
event_dict["capacity"] = event.capacity
else:
event_dict["capacity"] = "-"
tag_names = []
for tag in event.tags:
tag_entity = tag.get()
tag_names.append(tag_entity.name)
event_dict["tags"] = tag_names
return event_dict
class MyEvents(RequestHandler):
@role_admitted(Roles.USER, Roles.ADMIN)
def get(self):
event_data = []
token = self.request.headers['HTTP_AUTHORIZATION']
user_urlsafe = verify_token(token)
events = get_user_events(user_urlsafe)
print (events)
for event_key in events:
event = event_key.get()
if event is None:
continue
event_dict = {}
event_dict["id"] = event.urlsafe
event_dict["title"] = event.title
event_dict["image_link"] = event.image_link
event_dict["description"] = event.description
event_dict["location"] = event.location
event_dict["date"] = event.date.strftime("%Y-%m-%d %H:%M")
event_data.append(event_dict)
json_data = json.dumps(event_data)
return json_data
@role_admitted(Roles.USER, Roles.ADMIN)
def put(self):
event_urlsafe = self.request.payload.get('event_id')
if not event_urlsafe:
raise HTTPException("400", "Please specify event id")
event = get_event(event_urlsafe)
token = self.request.headers['HTTP_AUTHORIZATION']
user_urlsafe = verify_token(token)
add_event_to_user(user_urlsafe, event.key)
return {'status':'200', 'message':'Event heart succesfully added'}
@role_admitted(Roles.USER, Roles.ADMIN)
def delete(self):
event_urlsafe = self.request.payload.get('event_id')
if not event_urlsafe:
raise HTTPException("400", "Please specify event id")
event = get_event(event_urlsafe)
token = self.request.headers['HTTP_AUTHORIZATION']
user_urlsafe = verify_token(token)
remove_event_from_user(user_urlsafe, event.key)
return {'status':'200', 'message':'Event heart succesfully removed'}
class Calendar(RequestHandler):
@role_admitted(Roles.USER, Roles.ADMIN)
def get(self):
token = self.request.headers['HTTP_AUTHORIZATION']
user_urlsafe = verify_token(token)
data = export(user_urlsafe)
return data.encode('utf-8')
class EventsCrawl(RequestHandler):
def get(self):
scanAllLinks()
return {}
| StarcoderdataPython |
1602940 | <gh_stars>10-100
import mymodule
a = mymodule.A()
a.spam()
b = mymodule.B()
b.bar()
| StarcoderdataPython |
11366482 | import numpy as np
from astropy.coordinates import ICRS, SkyCoord
from astropy.wcs.utils import skycoord_to_pixel
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import cdshealpix
from ... import mocpy
def border(moc, ax, wcs, **kw_mpl_pathpatch):
from .utils import build_plotting_moc
moc_to_plot = build_plotting_moc(moc, wcs)
if moc_to_plot.empty():
return
max_order = moc_to_plot.max_order
ipixels_open = mocpy.flatten_pixels(moc_to_plot._interval_set._intervals, moc_to_plot.max_order)
# Take the complement if the MOC covers more than half of the sky
num_ipixels = 3 << (2*(max_order + 1))
sky_fraction = ipixels_open.shape[0] / float(num_ipixels)
if sky_fraction > 0.5:
ipixels_all = np.arange(num_ipixels)
ipixels_open = np.setdiff1d(ipixels_all, ipixels_open, assume_unique=True)
neighbors = cdshealpix.neighbours(ipixels_open, max_order).T
# Select the direct neighbors (i.e. those in WEST, NORTH, EAST and SOUTH directions)
neighbors = neighbors[[3, 7, 5, 1], :]
ipix_moc = np.isin(neighbors, ipixels_open)
west_edge = ipix_moc[0, :]
south_edge = ipix_moc[1, :]
east_edge = ipix_moc[2, :]
north_edge = ipix_moc[3, :]
num_ipix_moc = ipix_moc.sum(axis=0)
ipixels_border_id = (num_ipix_moc < 4)
# The border of each HEALPix cells is drawn one at a time
path_vertices_l = []
codes = []
west_border = west_edge[ipixels_border_id]
south_border = south_edge[ipixels_border_id]
east_border = east_edge[ipixels_border_id]
north_border = north_edge[ipixels_border_id]
ipixels_border = ipixels_open[ipixels_border_id]
ipix_lon_boundaries, ipix_lat_boundaries = cdshealpix.vertices(ipixels_border, max_order)
ipix_boundaries = SkyCoord(ipix_lon_boundaries, ipix_lat_boundaries, frame=ICRS())
# Projection on the given WCS
xp, yp = skycoord_to_pixel(coords=ipix_boundaries, wcs=wcs)
from . import culling_backfacing_cells
xp, yp, frontface_id = culling_backfacing_cells.backface_culling(xp, yp)
west_border = west_border[frontface_id]
south_border = south_border[frontface_id]
east_border = east_border[frontface_id]
north_border = north_border[frontface_id]
for i in range(xp.shape[0]):
vx = xp[i]
vy = yp[i]
if not north_border[i]:
path_vertices_l += [(vx[0], vy[0]), (vx[1], vy[1]), (0, 0)]
codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY]
if not east_border[i]:
path_vertices_l += [(vx[1], vy[1]), (vx[2], vy[2]), (0, 0)]
codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY]
if not south_border[i]:
path_vertices_l += [(vx[2], vy[2]), (vx[3], vy[3]), (0, 0)]
codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY]
if not west_border[i]:
path_vertices_l += [(vx[3], vy[3]), (vx[0], vy[0]), (0, 0)]
codes += [Path.MOVETO] + [Path.LINETO] + [Path.CLOSEPOLY]
path = Path(path_vertices_l, codes)
perimeter_patch = PathPatch(path, **kw_mpl_pathpatch)
ax.add_patch(perimeter_patch)
from . import axis_viewport
axis_viewport.set(ax, wcs)
| StarcoderdataPython |
95136 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import unicode_literals
import os
import sys
import gzip
import json
import re
from maya import mel, cmds
import maya.OpenMaya as om
import maya.OpenMayaMPx as omp
from default import AzureBatchRenderJob, AzureBatchRenderAssets
try:
str_type = unicode
except NameError:
str_type = str
class ArnoldRenderJob(AzureBatchRenderJob):
render_engine = 'arnold'
def __init__(self):
self._renderer = 'arnold'
self.label = 'Arnold'
self.log_levels = [
"0 - Errors",
"1 - Warnings + Info",
"2 - Debug"
]
def settings(self):
if self.scene_name == '':
job_name = "Untitled"
else:
job_name = str_type(os.path.splitext(os.path.basename(self.scene_name))[0])
file_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
if file_prefix:
file_prefix = os.path.split(file_prefix)[1]
else:
file_prefix = "<Scene>"
self.job_name = self.display_string("Job name: ", job_name)
self.output_name = self.display_string("Output prefix: ", file_prefix)
self.start = self.display_int("Start frame: ", self.start_frame, edit=True)
self.end = self.display_int("End frame: ", self.end_frame, edit=True)
self.step = self.display_int("Frame step: ", self.frame_step, edit=True)
self.additional_flags_field = self.display_string("Additional flags: ", self.additional_flags, edit=True)
try:
log_level = cmds.getAttr("defaultArnoldRenderOptions.log_verbosity")
except ValueError:
log_level = 1
self.logging = self.display_menu("Logging: ", self.log_levels, log_level+1)
def get_title(self):
return str_type(cmds.textField(self.job_name, query=True, text=True))
def render_enabled(self):
return True
@property
def additional_flags(self):
return str_type(" ")
def get_jobdata(self):
if self.scene_name == '':
raise ValueError("Current Maya scene has not been saved to disk.")
pending_changes = cmds.file(query=True, modified=True)
if not pending_changes:
return self.scene_name, [self.scene_name]
options = {
'save': "Save and continue",
'nosave': "Continue without saving",
'cancel': "Cancel"
}
answer = cmds.confirmDialog(title="Unsaved Changes",
message="There are unsaved changes. Continue?",
button=options.values(),
defaultButton=options['save'],
cancelButton=options['cancel'],
dismissString=options['cancel'])
if answer == options['cancel']:
raise Exception("Submission cancelled")
if answer == options['save']:
cmds.SaveScene()
return self.scene_name, [self.scene_name]
def get_params(self):
params = {}
params['frameStart'] = cmds.intField(self.start, query=True, value=True)
params['frameEnd'] = cmds.intField(self.end, query=True, value=True)
params['frameStep'] = cmds.intField(self.step, query=True, value=True)
params['renderer'] = self._renderer
params['logLevel'] = int(cmds.optionMenu(self.logging, query=True, select=True)) - 1
#additionalFlags has to default to " " rather than an empty string, in order to be accepted by the template
additionalFlagsValue = str_type(cmds.textField(self.additional_flags_field, query=True, text=True))
if not additionalFlagsValue:
additionalFlagsValue = " "
params['additionalFlags'] = additionalFlagsValue
return params
class ArnoldRenderAssets(AzureBatchRenderAssets):
assets = []
render_engine = 'arnold'
replace_pattern = re.compile(r'#+')
file_nodes = {
'aiStandIn': ['dso'],
'aiPhotometricLight': ['aiFilename'],
'aiVolume': ['filename'],
'aiImage': ['filename']
}
def check_path(self, path):
"""
TODO: The pattern replacements are currently not strict enough,
for example:
'test.#.png' will match test.1.png, test.1001.png, test.1test.png, test.9.9.test.png
when we only want to match test.1.png and test.1001.png.
We need to replace with a proper regex match as glob is insufficient.
Other assumptions:
- Asset patterns will ONLY occur in the filename, not the path.
- A UDIM reference will always be 4 digits.
- A single '#' character can represent multiple digits.
"""
if '#' in path:
return self.replace_pattern.sub('[0-9]*', path)
elif '<udim>' in path:
return path.replace('<udim>', '[0-9][0-9][0-9][0-9]')
elif '<tile>' in path:
return path.replace('<tile>', '_u*_v*')
else:
return path
def renderer_assets(self):
self.assets = []
collected = []
for node_type, attributes in self.file_nodes.items():
nodes = cmds.ls(type=node_type)
for node in nodes:
for attr in attributes:
path = cmds.getAttr(node + '.' + attr)
if path:
collected.append(path)
for path in collected:
self.assets.append(self.check_path(path))
return self.assets
def setup_script(self, script_handle, pathmap, searchpaths):
search_path = ';'.join(searchpaths).encode('utf-8')
procedural_searchpath = str("setAttr -type \"string\" defaultArnoldRenderOptions.procedural_searchpath \"{}\";\n").format(search_path)
plugin_searchpath = str("setAttr -type \"string\" defaultArnoldRenderOptions.plugin_searchpath \"{}\";\n").format(search_path)
texture_searchpath = str("setAttr -type \"string\" defaultArnoldRenderOptions.texture_searchpath \"{}\";\n").format(search_path)
script_handle.write(procedural_searchpath)
script_handle.write(plugin_searchpath)
script_handle.write(texture_searchpath)
# This kind of explicit asset re-direct is kinda ugly - so far
# it only seems to be needed on aiImage nodes, which appear to
# be bypassed by the 'dirmap' command. We may need to extend this
# to other ai node types.
script_handle.write("$aiImageNodes = `ls -type aiImage`;\n")
script_handle.write("for ( $aiImageNode in $aiImageNodes ) {\n")
script_handle.write("string $fullname = `getAttr ($aiImageNode + \".filename\")`;\n")
script_handle.write("string $basename = basename($fullname, \"\");\n")
script_handle.write("setAttr -type \"string\" ($aiImageNode + \".filename\") $basename;\n")
script_handle.write("}\n")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.